From d5a7d710ee24a545014785d8cb5fcbcf82647d14 Mon Sep 17 00:00:00 2001 From: peter942 Date: Tue, 29 Aug 2023 23:09:22 +0200 Subject: [PATCH 001/164] Fixing navigation issues --- .../components/frame_styling_page.py | 39 ++++++++++++------- ui_components/widgets/frame_selector.py | 2 + 2 files changed, 28 insertions(+), 13 deletions(-) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index fdba7eef..29c7a90f 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -218,12 +218,17 @@ def frame_styling_page(mainheader2, project_uuid: str): st.markdown("***") - - styling_views = ["Generate Variants", "Crop, Move & Rotate Image", "Inpainting & Background Removal","Draw On Image"] - - st.session_state['styling_view'] = option_menu(None, styling_views, icons=['magic', 'crop', "paint-bucket", 'pencil'], menu_icon="cast", default_index=0, key="styling_view_selector", orientation="horizontal", styles={ + if 'styling_view_index' not in st.session_state: + st.session_state['styling_view_index'] = 0 + st.session_state['change_styling_view_type'] = False + + styling_views = ["Generate Variants", "Crop, Move & Rotate Image", "Inpainting & BG Removal","Draw On Image"] + + st.session_state['styling_view'] = option_menu(None, styling_views, icons=['magic', 'crop', "paint-bucket", 'pencil'], menu_icon="cast", default_index=st.session_state['styling_view_index'], key="styling_view_selector", orientation="horizontal", styles={ "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "#66A9BE"}}) - + + if st.session_state['styling_view_index'] != styling_views.index(st.session_state['styling_view']): + st.session_state['styling_view_index'] = styling_views.index(st.session_state['styling_view']) if st.session_state['styling_view'] == "Generate Variants": @@ -299,7 +304,7 @@ def frame_styling_page(mainheader2, project_uuid: str): elif how_to_crop == "Precision Cropping": precision_cropping_element(stage_name, project_uuid) - elif st.session_state['styling_view'] == "Inpainting & Background Removal": + elif st.session_state['styling_view'] == "Inpainting & BG Removal": with st.expander("🌌 Inpainting, Background Removal & More", expanded=True): @@ -354,8 +359,11 @@ def frame_styling_page(mainheader2, project_uuid: str): timing_details = data_repo.get_timing_list_from_project(project_uuid) for i in range(start_index, end_index): - index_of_current_item = i - st.subheader(f"Frame {i+1}") + + + display_number = i + 1 + + st.subheader(f"Frame {display_number}") image1, image2, image3 = st.columns([2, 3, 2]) with image1: @@ -376,26 +384,29 @@ def frame_styling_page(mainheader2, project_uuid: str): with time2: st.write("") - if st.button(f"Jump to single frame view for #{index_of_current_item}"): - st.session_state['current_frame_index'] = index_of_current_item + if st.button(f"Jump to single frame view for #{display_number}"): + st.session_state['prev_frame_index'] = display_number st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid st.session_state['frame_styling_view_type'] = "Individual View" st.session_state['change_view_type'] = True st.experimental_rerun() + st.markdown("---") btn1, btn2, btn3 = st.columns([2, 1, 1]) with btn1: - if st.button("Delete this keyframe", key=f'{index_of_current_item}'): + if st.button("Delete this keyframe", key=f'{i}'): delete_frame(timing_details[i].uuid) st.experimental_rerun() with btn2: - if st.button("⬆️", key=f"Promote {index_of_current_item}"): + if st.button("⬆️", key=f"Promote {display_number}"): move_frame("Up", timing_details[i].uuid) st.experimental_rerun() with btn3: - if st.button("⬇️", key=f"Demote {index_of_current_item}"): + if st.button("⬇️", key=f"Demote {display_number}"): move_frame("Down", timing_details[i].uuid) st.experimental_rerun() + + st.markdown("***") # Display radio buttons for pagination at the bottom st.markdown("***") @@ -475,6 +486,8 @@ def frame_styling_page(mainheader2, project_uuid: str): with timing3: current_preview_video_element(timing_details[idx].uuid) + st.markdown("***") + st.markdown("***") diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index 1f55a194..f1137fa4 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -25,7 +25,9 @@ def frame_selector_widget(): # st.write(st.session_state['prev_frame_index']) # st.write(st.session_state['current_frame_index']) st.session_state['current_frame_index'] = st.number_input(f"Key frame # (out of {len(timing_details)})", 1, len(timing_details), value=st.session_state['prev_frame_index'], step=1, key="which_image_selector") + st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid + if st.session_state['prev_frame_index'] != st.session_state['current_frame_index']: st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid From e435da1aeaaa1e700d6428a8ec4690c44311b297 Mon Sep 17 00:00:00 2001 From: peter942 Date: Fri, 8 Sep 2023 12:14:09 +0200 Subject: [PATCH 002/164] Adding a space --- ui_components/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui_components/setup.py b/ui_components/setup.py index 8bb5ae0e..54285593 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -29,7 +29,7 @@ def setup_app_ui(): h1, h2 = st.columns([1, 3]) with h1: - st.markdown("#:red[Ba]:green[no]:orange[do]:blue[co]") + st.markdown("# :red[Ba]:green[no]:orange[do]:blue[co]") sections = ["Open Project", "App Settings", "New Project"] From 491fe1fdb222c32174b84e04af958fe5bfc76c14 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Tue, 12 Sep 2023 23:43:12 +0530 Subject: [PATCH 003/164] wip: code refactoring --- banodoco_settings.py | 2 +- .../components/custom_models_page.py | 2 +- .../components/frame_styling_page.py | 5 +- ui_components/components/new_project_page.py | 2 +- .../components/video_rendering_page.py | 2 +- ui_components/{ => methods}/common_methods.py | 753 +----------------- ui_components/methods/video_methods.py | 405 ++++++++++ ui_components/models.py | 32 +- ui_components/widgets/attach_audio_element.py | 2 +- ui_components/widgets/cropping_element.py | 2 +- .../widgets/frame_clip_generation_elements.py | 146 ++++ ui_components/widgets/frame_selector.py | 2 +- ui_components/widgets/frame_time_selector.py | 4 +- ui_components/widgets/prompt_finder.py | 2 +- ui_components/widgets/styling_element.py | 2 +- utils/common_utils.py | 22 +- utils/media_processor/interpolator.py | 116 +++ utils/media_processor/video.py | 132 +-- 18 files changed, 775 insertions(+), 858 deletions(-) rename ui_components/{ => methods}/common_methods.py (78%) create mode 100644 ui_components/methods/video_methods.py create mode 100644 ui_components/widgets/frame_clip_generation_elements.py create mode 100644 utils/media_processor/interpolator.py diff --git a/banodoco_settings.py b/banodoco_settings.py index cbef9f9e..29518cfb 100644 --- a/banodoco_settings.py +++ b/banodoco_settings.py @@ -7,7 +7,7 @@ from shared.logging.constants import LoggingType from shared.logging.logging import AppLogger from shared.constants import AnimationStyleType -from ui_components.common_methods import add_image_variant +from ui_components.methods.common_methods import add_image_variant from ui_components.models import InternalAppSettingObject, InternalFrameTimingObject, InternalProjectObject, InternalUserObject from utils.common_utils import copy_sample_assets, create_working_assets, save_or_host_file from utils.constants import ImageStage diff --git a/ui_components/components/custom_models_page.py b/ui_components/components/custom_models_page.py index f00bc705..4f52467e 100644 --- a/ui_components/components/custom_models_page.py +++ b/ui_components/components/custom_models_page.py @@ -2,7 +2,7 @@ from typing import List import streamlit as st from shared.constants import AIModelCategory, AIModelType -from ui_components.common_methods import train_model +from ui_components.methods.common_methods import train_model from ui_components.models import InternalAIModelObject from utils.common_utils import get_current_user_uuid diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 640cc7f6..3fef1207 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -2,12 +2,11 @@ from streamlit_image_comparison import image_comparison import time from PIL import Image -from ui_components.common_methods import delete_frame, drawing_mode, promote_image_variant, save_uploaded_image, \ +from ui_components.methods.common_methods import delete_frame, drawing_mode, promote_image_variant, save_uploaded_image, \ trigger_restyling_process, create_timings_row_at_frame_number, move_frame, \ calculate_desired_duration_of_individual_clip, create_or_get_single_preview_video, \ calculate_desired_duration_of_individual_clip, apply_image_transformations, \ - ai_frame_editing_element, clone_styling_settings, zoom_inputs,\ - current_individual_clip_element,current_preview_video_element,update_animation_style_element + ai_frame_editing_element, clone_styling_settings, zoom_inputs from ui_components.widgets.cropping_element import manual_cropping_element, precision_cropping_element from ui_components.widgets.frame_time_selector import single_frame_time_selector, update_frame_time from ui_components.widgets.frame_selector import frame_selector_widget diff --git a/ui_components/components/new_project_page.py b/ui_components/components/new_project_page.py index a7ba23f6..5eeb8204 100644 --- a/ui_components/components/new_project_page.py +++ b/ui_components/components/new_project_page.py @@ -1,6 +1,6 @@ import streamlit as st from banodoco_settings import create_new_project -from ui_components.common_methods import save_audio_file,create_timings_row_at_frame_number, save_uploaded_image +from ui_components.methods.common_methods import save_audio_file,create_timings_row_at_frame_number, save_uploaded_image from utils.common_utils import get_current_user_uuid, reset_project_state from utils.data_repo.data_repo import DataRepo import time diff --git a/ui_components/components/video_rendering_page.py b/ui_components/components/video_rendering_page.py index 783366e4..621ba78e 100644 --- a/ui_components/components/video_rendering_page.py +++ b/ui_components/components/video_rendering_page.py @@ -2,7 +2,7 @@ import datetime import streamlit as st from shared.constants import InternalFileTag, InternalFileType -from ui_components.common_methods import render_video +from ui_components.methods.common_methods import render_video import random import time import os diff --git a/ui_components/common_methods.py b/ui_components/methods/common_methods.py similarity index 78% rename from ui_components/common_methods.py rename to ui_components/methods/common_methods.py index f23be05d..089e12c9 100644 --- a/ui_components/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -32,15 +32,15 @@ from pydub import AudioSegment import shutil from moviepy.editor import concatenate_videoclips, TextClip, VideoFileClip, vfx -from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip from backend.models import InternalFileObject from shared.file_upload.s3 import is_s3_image_url, upload_file from ui_components.constants import CROPPED_IMG_LOCAL_PATH, MASK_IMG_LOCAL_PATH, SECOND_MASK_FILE, SECOND_MASK_FILE_PATH, TEMP_MASK_FILE, VideoQuality, WorkflowStageType from ui_components.models import InternalAIModelObject, InternalAppSettingObject, InternalBackupObject, InternalFrameTimingObject, InternalProjectObject, InternalSettingObject -from utils.common_utils import add_temp_file_to_project, generate_pil_image, generate_temp_file, get_current_user_uuid, save_or_host_file, save_or_host_file_bytes +from utils.common_utils import add_temp_file_to_project, convert_bytes_to_file, generate_pil_image, generate_temp_file, get_current_user_uuid, save_or_host_file, save_or_host_file_bytes from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo from shared.constants import AnimationStyleType +from utils.media_processor.video import VideoProcessor from utils.ml_processor.ml_interface import get_ml_client from utils.ml_processor.replicate.constants import REPLICATE_MODEL from ui_components.models import InternalFileObject @@ -128,121 +128,6 @@ def save_uploaded_image(image, project_uuid, frame_uuid, save_type): print(f"Failed to save image file due to: {str(e)}") return None -def create_individual_clip(timing_uuid): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - if timing.animation_style == "": - project_setting = data_repo.get_project_setting( - timing.project.uuid) - animation_style = project_setting.default_animation_style - else: - animation_style = timing.animation_style - - if animation_style == AnimationStyleType.INTERPOLATION.value: - output_video = prompt_interpolation_model(timing_uuid) - - elif animation_style == AnimationStyleType.DIRECT_MORPHING.value: - output_video = create_video_without_interpolation(timing_uuid) - - return output_video - -#returns a video generated through interpolating frames between the current frame and the next frame -def prompt_interpolation_model(timing_uuid) -> InternalFileType: - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - img1 = timing.primary_image_location - next_timing = data_repo.get_next_timing(timing_uuid) - img2 = next_timing.primary_image_location - - if not img1.startswith("http"): - img1 = open(img1, "rb") - - if not img2.startswith("http"): - img2 = open(img2, "rb") - - ml_client = get_ml_client() - output = ml_client.predict_model_output(REPLICATE_MODEL.google_frame_interpolation, frame1=img1, frame2=img2, - times_to_interpolate=timing.interpolation_steps) - file_name = ''.join(random.choices( - string.ascii_lowercase + string.digits, k=16)) + ".mp4" - - video_location = "videos/" + timing.project.uuid + \ - "/assets/videos/0_raw/" + str(file_name) - - temp_output_file = generate_temp_file(output, '.mp4') - - video_bytes = None - with open(temp_output_file.name, 'rb') as f: - video_bytes = f.read() - - hosted_url = save_or_host_file_bytes(video_bytes, video_location) - file_data = { - "name": file_name, - "type": InternalFileType.VIDEO.value, - "project_id": timing.project.uuid, - "tag": InternalFileTag.GENERATED_VIDEO.value - } - - if hosted_url: - file_data["hosted_url"] = hosted_url - else: - file_data["local_path"] = video_location - - video_file = data_repo.create_file(**file_data) - - return video_file - -def create_video_without_interpolation(timing_uuid): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - image_path_or_url = timing.primary_image_location - - video_location = "videos/" + timing.project.uuid + "/assets/videos/0_raw/" + \ - ''.join(random.choices(string.ascii_lowercase + - string.digits, k=16)) + ".mp4" - - os.makedirs(os.path.dirname(video_location), exist_ok=True) - - if image_path_or_url.startswith("http://") or image_path_or_url.startswith("https://"): - response = r.get(image_path_or_url) - image = np.asarray(bytearray(response.content), dtype="uint8") - image = cv2.imdecode(image, cv2.IMREAD_COLOR) - else: - image = cv2.imread(image_path_or_url) - - if image is None: - raise ValueError( - "Could not read the image. Please provide a valid image path or URL.") - - height, width, _ = image.shape - fourcc = cv2.VideoWriter_fourcc(*"avc1") - fps = int(1 / 0.1) - video_writer = cv2.VideoWriter( - video_location, fourcc, fps, (width, height)) - - for _ in range(fps): - video_writer.write(image) - - video_writer.release() - - unique_file_name = str(uuid.uuid4()) - file_data = { - "name": unique_file_name, - "type": InternalFileType.VIDEO.value, - "local_path": video_location, - "tag": InternalFileTag.GENERATED_VIDEO.value - } - - video_file: InternalFileObject = data_repo.create_file(**file_data) - - return video_file - def create_alpha_mask(size, edge_blur_radius): mask = Image.new('L', size, 0) @@ -890,190 +775,6 @@ def rotate_image(location, degree): return rotated_image -# returns the timed_clip, which is the interpolated video with correct length -def create_or_get_single_preview_video(timing_uuid): - data_repo = DataRepo() - - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - project_details: InternalSettingObject = data_repo.get_project_setting( - timing.project.uuid) - - if not timing.interpolated_clip: - data_repo.update_specific_timing(timing_uuid, interpolation_steps=3) - interpolated_video: InternalFileObject = prompt_interpolation_model( - timing_uuid) - data_repo.update_specific_timing( - timing_uuid, interpolated_clip_id=interpolated_video.uuid) - - if not timing.timed_clip: - timing = data_repo.get_timing_from_uuid(timing_uuid) - - temp_video_file = None - if timing.interpolated_clip.hosted_url: - temp_video_file = generate_temp_file(timing.interpolated_clip.hosted_url, '.mp4') - - file_path = temp_video_file.name if temp_video_file else timing.interpolated_clip.local_path - clip = VideoFileClip(file_path) - - number_text = TextClip(str(timing.aux_frame_index), - fontsize=24, color='white') - number_background = TextClip(" ", fontsize=24, color='black', bg_color='black', size=( - number_text.w + 10, number_text.h + 10)) - number_background = number_background.set_position( - ('left', 'top')).set_duration(clip.duration) - number_text = number_text.set_position( - (number_background.w - number_text.w - 5, number_background.h - number_text.h - 5)).set_duration(clip.duration) - clip_with_number = CompositeVideoClip([clip, number_background, number_text]) - - # clip_with_number.write_videofile(timing.interpolated_clip.local_path) - # temp_output_file = generate_temp_file(timing.interpolated_clip.location, '.mp4') - clip_with_number.write_videofile(filename=file_path, codec='libx264', audio_codec='aac') - - if temp_video_file: - video_bytes = None - with open(file_path, 'rb') as f: - video_bytes = f.read() - - hosted_url = save_or_host_file_bytes(video_bytes, timing.interpolated_clip.local_path) - data_repo.update_file(timing.interpolated_clip.uuid, hosted_url=hosted_url) - - os.remove(temp_video_file.name) - - # timed_clip has the correct length (equal to the time difference between the current and the next frame) - # which the interpolated video may or maynot have - clip_duration = calculate_desired_duration_of_individual_clip( - timing_uuid) - data_repo.update_specific_timing( - timing_uuid, clip_duration=clip_duration) - - # TODO: fix refetching of variables - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - output_video = update_speed_of_video_clip( - timing.interpolated_clip, True, timing_uuid) - data_repo.update_specific_timing( - timing_uuid, timed_clip_id=output_video.uuid) - - # adding audio if the audio file is present - if project_details.audio: - audio_bytes = get_audio_bytes_for_slice(timing_uuid) - add_audio_to_video_slice(timing.timed_clip, audio_bytes) - - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - return timing.timed_clip - -#preview_clips have frame numbers on them. Preview clip is generated from index-2 to index+2 frames -def create_full_preview_video(timing_uuid, speed=1) -> InternalFileObject: - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - timing.project.uuid) - index_of_item = timing.aux_frame_index - - num_timing_details = len(timing_details) - clips = [] - - temp_file_list = [] - - for i in range(index_of_item - 2, index_of_item + 3): - - if i < 0 or i >= num_timing_details-1: - continue - - primary_variant_location = timing_details[i].primary_image_location - - print( - f"primary_variant_location for i={i}: {primary_variant_location}") - - if not primary_variant_location: - break - - preview_video = create_or_get_single_preview_video( - timing_details[i].uuid) - - clip = VideoFileClip(preview_video.location) - - number_text = TextClip(str(i), fontsize=24, color='white') - number_background = TextClip(" ", fontsize=24, color='black', bg_color='black', size=( - number_text.w + 10, number_text.h + 10)) - number_background = number_background.set_position( - ('left', 'top')).set_duration(clip.duration) - number_text = number_text.set_position( - (number_background.w - number_text.w - 5, number_background.h - number_text.h - 5)).set_duration(clip.duration) - - clip_with_number = CompositeVideoClip( - [clip, number_background, number_text]) - - # remove existing preview video - temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4", mode='wb') - temp_file_list.append(temp_file) - clip_with_number.write_videofile(temp_file.name, codec='libx264', bitrate='3000k') - video_bytes = None - with open(temp_file.name, 'rb') as f: - video_bytes = f.read() - - hosted_url = save_or_host_file_bytes(video_bytes, preview_video.local_path) - if hosted_url: - data_repo.update_file(preview_video.uuid, hosted_url=hosted_url) - - clips.append(preview_video) - - # if preview_video.local_path: - # os.remove(preview_video.local_path) - - print(clips) - video_clips = [] - - for v in clips: - path = v.location - if 'http' in path: - temp_file = generate_temp_file(path) - temp_file_list.append(temp_file) - path = temp_file.name - - video_clips.append(VideoFileClip(path)) - - # video_clips = [VideoFileClip(v.location) for v in clips] - combined_clip = concatenate_videoclips(video_clips) - output_filename = str(uuid.uuid4()) + ".mp4" - video_location = f"videos/{timing.project.uuid}/assets/videos/1_final/{output_filename}" - - temp_output_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4", mode='wb') - combined_clip = combined_clip.fx(vfx.speedx, speed) - combined_clip.write_videofile(temp_output_file.name) - - video_bytes = None - with open(temp_output_file.name, 'rb') as f: - video_bytes = f.read() - - hosted_url = save_or_host_file_bytes(video_bytes, video_location) - - video_data = { - "name": output_filename, - "type": InternalFileType.VIDEO.value, - "project_id": timing.project.uuid - } - if hosted_url: - video_data.update({"hosted_url": hosted_url}) - else: - video_data.update({"local_path": video_location}) - - # if speed != 1.0: - # clip = VideoFileClip(video_location) - # output_clip = clip.fx(vfx.speedx, speed) - # os.remove(video_location) - # output_clip.write_videofile( - # video_location, codec="libx264", preset="fast") - - for file in temp_file_list: - os.remove(file.name) - - video_file = data_repo.create_file(**video_data) - - return video_file def move_frame(direction, timing_uuid): data_repo = DataRepo() @@ -1094,7 +795,7 @@ def move_frame(direction, timing_uuid): data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index + 1) # updating clip_duration - calculate_desired_duration_of_each_clip(timing.project.uuid) + update_clip_duration_of_all_timing_frames(timing.project.uuid) def delete_frame(timing_uuid): data_repo = DataRepo() @@ -2051,140 +1752,6 @@ def prompt_model_dreambooth(timing_uuid, source_image_file: InternalFileObject): return None - -def get_duration_from_video(input_video_file: InternalFileObject): - input_video = input_video_file.local_path - video_capture = cv2.VideoCapture(input_video) - frame_rate = video_capture.get(cv2.CAP_PROP_FPS) - total_duration = video_capture.get(cv2.CAP_PROP_FRAME_COUNT) / frame_rate - video_capture.release() - return total_duration - - -# get audio_bytes of correct duration for a given frame -def current_individual_clip_element(timing_uuid): - def generate_individual_clip(timing_uuid, quality): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - - if quality == 'full': - interpolation_steps = calculate_dynamic_interpolations_steps(timing.clip_duration) - elif quality == 'preview': - interpolation_steps = 3 - data_repo.update_specific_timing(timing_uuid, interpolation_steps=interpolation_steps, interpolated_clip_id=None) - video = create_individual_clip(timing_uuid) - data_repo.update_specific_timing(timing_uuid, interpolated_clip_id=video.uuid) - output_video = update_speed_of_video_clip(timing.interpolated_clip, True, timing_uuid) - - timing = data_repo.get_timing_from_uuid(timing_uuid) - data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) - return output_video - - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - idx = timing.aux_frame_index - - st.info(f"Individual Clip for #{idx+1}:") - if timing.timed_clip: - st.video(timing.timed_clip.location) - - if timing.interpolation_steps is not None: - - if calculate_dynamic_interpolations_steps(timing.clip_duration) > timing.interpolation_steps: - st.error("Low Resolution") - if st.button("Generate Full Resolution Clip", key=f"generate_full_resolution_video_{idx}"): - generate_individual_clip(timing.uuid, 'full') - st.experimental_rerun() - else: - st.success("Full Resolution") - else: - st.error(''' - **----------------------------------------** - - --------- - - ================== - - **No Individual Clip Created Yet** - - ================== - - --------- - - **----------------------------------------** - - - ''') - gen1, gen2 = st.columns([1, 1]) - - with gen1: - if st.button("Generate Low-Resolution Clip", key=f"generate_preview_video_{idx}"): - generate_individual_clip(timing.uuid, 'preview') - st.experimental_rerun() - with gen2: - if st.button("Generate Full Resolution Clip", key=f"generate_full_resolution_video_{idx}"): - generate_individual_clip(timing.uuid, 'full') - st.experimental_rerun() - - -def update_animation_style_element(timing_uuid, horizontal=True): - data_repo = DataRepo() - timing = data_repo.get_timing_from_uuid(timing_uuid) - idx = timing.aux_frame_index - - animation_styles = AnimationStyleType.value_list() - - if f"animation_style_index_{idx}" not in st.session_state: - st.session_state[f"animation_style_index_{idx}"] = animation_styles.index( - timing.animation_style) - st.session_state[f"animation_style_{idx}"] = timing.animation_style - - st.session_state[f"animation_style_{idx}"] = st.radio( - "Animation style:", animation_styles, index=st.session_state[f"animation_style_index_{idx}"], key=f"animation_style_radio_{idx}", help="This is for the morph from the current frame to the next one.", horizontal=horizontal) - - if st.session_state[f"animation_style_{idx}"] != timing.animation_style: - st.session_state[f"animation_style_index_{idx}"] = animation_styles.index(st.session_state[f"animation_style_{idx}"]) - data_repo.update_specific_timing(timing.uuid, animation_style=st.session_state[f"animation_style_{idx}"]) - st.experimental_rerun() - - -def current_preview_video_element(timing_uuid): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - idx = timing.aux_frame_index - st.info("Preview Video in Context:") - - preview_video_1, preview_video_2 = st.columns([2.5, 1]) - - with preview_video_1: - if timing.preview_video: - st.video(timing.preview_video.location) - else: - st.error(''' - **----------------------------------------** - - --------- - - ================== - - **No Preview Video Created Yet** - - ================== - - --------- - - **----------------------------------------** - ''') - - with preview_video_2: - - if st.button("Generate New Preview Video", key=f"generate_preview_{idx}"): - preview_video = create_full_preview_video( - timing.uuid, 1.0) - data_repo.update_specific_timing( - timing.uuid, preview_video_id=preview_video.uuid) - st.experimental_rerun() - def get_audio_bytes_for_slice(timing_uuid): data_repo = DataRepo() @@ -2205,123 +1772,8 @@ def get_audio_bytes_for_slice(timing_uuid): return audio_bytes -def slice_part_of_video(project_name, index_of_current_item, video_start_percentage, video_end_percentage, slice_name, timing_details): - input_video = timing_details[int( - index_of_current_item)]["interpolated_video"] - total_clip_duration = get_duration_from_video(input_video) - start_time = float(video_start_percentage) * float(total_clip_duration) - end_time = float(video_end_percentage) * float(total_clip_duration) - clip = VideoFileClip(input_video).subclip( - t_start=start_time, t_end=end_time) - output_video = "videos/" + \ - str(project_name) + "/assets/videos/0_raw/" + str(slice_name) + ".mp4" - clip.write_videofile(output_video, audio=False) - clip.close() - - -def update_speed_of_video_clip(video_file: InternalFileObject, save_to_new_location, timing_uuid) -> InternalFileObject: - data_repo = DataRepo() - - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - desired_duration = timing.clip_duration - animation_style = timing.animation_style - location_of_video = video_file.local_path - - temp_video_file, temp_output_file = None, None - if video_file.hosted_url and is_s3_image_url(video_file.hosted_url): - temp_video_file = generate_temp_file(video_file.hosted_url, '.mp4') - - temp_output_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4", mode='wb') - location_of_video = temp_video_file.name if temp_video_file else video_file.local_path - - new_file_name = ''.join(random.choices( - string.ascii_lowercase + string.digits, k=16)) + ".mp4" - new_file_location = "videos/" + \ - str(timing.project.uuid) + \ - "/assets/videos/1_final/" + str(new_file_name) - - if animation_style == AnimationStyleType.DIRECT_MORPHING.value: - # Load the video clip - clip = VideoFileClip(location_of_video) - - clip = clip.set_fps(120) - - # Calculate the number of frames to keep - input_duration = clip.duration - total_frames = len(list(clip.iter_frames())) - target_frames = int(total_frames * (desired_duration / input_duration)) - - # Determine which frames to keep - keep_every_n_frames = total_frames / target_frames - frames_to_keep = [int(i * keep_every_n_frames) - for i in range(target_frames)] - - # Create a new video clip with the selected frames - output_clip = concatenate_videoclips( - [clip.subclip(i/clip.fps, (i+1)/clip.fps) for i in frames_to_keep]) - - output_clip.write_videofile(filename=temp_output_file.name, codec="libx265") - - elif animation_style == AnimationStyleType.INTERPOLATION.value: - clip = VideoFileClip(location_of_video) - input_video_duration = clip.duration - desired_duration = timing.clip_duration - desired_speed_change = float( - input_video_duration) / float(desired_duration) - - print("Desired Speed Change: " + str(desired_speed_change)) - - # Apply the speed change using moviepy - output_clip = clip.fx(vfx.speedx, desired_speed_change) - - output_clip.write_videofile(filename=temp_output_file.name, codec="libx264", preset="fast") - - with open(temp_output_file.name, 'rb') as f: - video_bytes = f.read() - - hosted_url = save_or_host_file_bytes(video_bytes, new_file_location) - - # TODO: remove save_to_new_location - if hosted_url: - video_file: InternalFileObject = data_repo.create_file( - name=new_file_name, type=InternalFileType.VIDEO.value, hosted_url=hosted_url) - else: - video_file: InternalFileObject = data_repo.create_file( - name=new_file_name, type=InternalFileType.VIDEO.value, local_path=new_file_location) - - if temp_video_file: - os.remove(temp_video_file.name) - - if temp_output_file: - os.remove(temp_output_file.name) - - return video_file - - -def calculate_desired_duration_of_individual_clip(timing_uuid): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - timing_details = data_repo.get_timing_list_from_project( - timing.project.uuid) - length_of_list = len(timing_details) - - # last frame - if timing.aux_frame_index == length_of_list - 1: - time_of_frame = timing.frame_time - total_duration_of_frame = 0.0 # can be changed - else: - time_of_frame = timing.frame_time - time_of_next_frame = data_repo.get_next_timing(timing_uuid).frame_time - total_duration_of_frame = float( - time_of_next_frame) - float(time_of_frame) - - return total_duration_of_frame - - -def calculate_desired_duration_of_each_clip(project_uuid): +# calculates and updates clip duration of all the timings +def update_clip_duration_of_all_timing_frames(project_uuid): data_repo = DataRepo() timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( project_uuid) @@ -2350,8 +1802,6 @@ def calculate_desired_duration_of_each_clip(project_uuid): time_of_next_frame) - float(time_of_frame) duration_of_static_time = 0.0 - duration_of_morph = float( - total_duration_of_frame) - float(duration_of_static_time) data_repo.update_specific_timing( timing_item.uuid, clip_duration=total_duration_of_frame) @@ -2506,199 +1956,6 @@ def create_timings_row_at_frame_number(project_uuid, index_of_frame, frame_time= return timing -def add_audio_to_video_slice(video_file, audio_bytes): - video_location = video_file.local_path - # Save the audio bytes to a temporary file - audio_file = "temp_audio.wav" - with open(audio_file, 'wb') as f: - f.write(audio_bytes.getvalue()) - - # Create an input video stream - video_stream = ffmpeg.input(video_location) - - # Create an input audio stream - audio_stream = ffmpeg.input(audio_file) - - # Add the audio stream to the video stream - output_stream = ffmpeg.output(video_stream, audio_stream, "output_with_audio.mp4", - vcodec='copy', acodec='aac', strict='experimental') - - # Run the ffmpeg command - output_stream.run() - - # Remove the original video file and the temporary audio file - os.remove(video_location) - os.remove(audio_file) - - # TODO: handle online update in this case - # Rename the output file to have the same name as the original video file - os.rename("output_with_audio.mp4", video_location) - - -def calculate_dynamic_interpolations_steps(clip_duration): - - if clip_duration < 0.17: - interpolation_steps = 2 - elif clip_duration < 0.3: - interpolation_steps = 3 - elif clip_duration < 0.57: - interpolation_steps = 4 - elif clip_duration < 1.1: - interpolation_steps = 5 - elif clip_duration < 2.17: - interpolation_steps = 6 - elif clip_duration < 4.3: - interpolation_steps = 7 - else: - interpolation_steps = 8 - return interpolation_steps - - -def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileTag.GENERATED_VIDEO.value): - data_repo = DataRepo() - - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - project_uuid) - - calculate_desired_duration_of_each_clip(project_uuid) - - total_number_of_videos = len(timing_details) - 1 - - for i in range(0, total_number_of_videos): - index_of_current_item = i - current_timing: InternalFrameTimingObject = data_repo.get_timing_from_frame_number( - project_uuid, i) - - timing = timing_details[i] - if quality == VideoQuality.HIGH.value: - data_repo.update_specific_timing( - current_timing.uuid, timed_clip_id=None) - interpolation_steps = calculate_dynamic_interpolations_steps( - timing_details[index_of_current_item].clip_duration) - if not timing.interpolation_steps or timing.interpolation_steps < interpolation_steps: - data_repo.update_specific_timing( - current_timing.uuid, interpolation_steps=interpolation_steps, interpolated_clip_id=None) - else: - if not timing.interpolation_steps or timing.interpolation_steps < 3: - data_repo.update_specific_timing( - current_timing.uuid, interpolation_steps=3) - - if not timing.interpolated_clip: - video_location = create_individual_clip(current_timing.uuid) - data_repo.update_specific_timing( - current_timing.uuid, interpolated_clip_id=video_location.uuid) - - project_settings: InternalSettingObject = data_repo.get_project_setting( - timing.project.uuid) - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - timing.project.uuid) - total_number_of_videos = len(timing_details) - 2 - - for i in timing_details: - index_of_current_item = timing_details.index(i) - timing = timing_details[index_of_current_item] - current_timing: InternalFrameTimingObject = data_repo.get_timing_from_frame_number( - timing.project.uuid, index_of_current_item) - if index_of_current_item <= total_number_of_videos: - if not current_timing.timed_clip: - desired_duration = current_timing.clip_duration - location_of_input_video_file = current_timing.interpolated_clip - - output_video = update_speed_of_video_clip( - location_of_input_video_file, True, timing.uuid) - - if quality == VideoQuality.PREVIEW.value: - print("") - ''' - clip = VideoFileClip(location_of_output_video) - - number_text = TextClip(str(index_of_current_item), fontsize=24, color='white') - number_background = TextClip(" ", fontsize=24, color='black', bg_color='black', size=(number_text.w + 10, number_text.h + 10)) - number_background = number_background.set_position(('right', 'bottom')).set_duration(clip.duration) - number_text = number_text.set_position((number_background.w - number_text.w - 5, number_background.h - number_text.h - 5)).set_duration(clip.duration) - - clip_with_number = CompositeVideoClip([clip, number_background, number_text]) - - # remove existing preview video - os.remove(location_of_output_video) - clip_with_number.write_videofile(location_of_output_video, codec='libx264', bitrate='3000k') - ''' - - data_repo.update_specific_timing( - current_timing.uuid, timed_clip_id=output_video.uuid) - - video_list = [] - temp_file_list = [] - - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - timing.project.uuid) - - # TODO: CORRECT-CODE - for i in timing_details: - index_of_current_item = timing_details.index(i) - current_timing: InternalFrameTimingObject = data_repo.get_timing_from_frame_number( - project_uuid, index_of_current_item) - if index_of_current_item <= total_number_of_videos: - temp_video_file = None - if current_timing.timed_clip.hosted_url: - temp_video_file = generate_temp_file(current_timing.timed_clip.hosted_url, '.mp4') - temp_file_list.append(temp_video_file) - - file_path = temp_video_file.name if temp_video_file else current_timing.timed_clip.local_path - - video_list.append(file_path) - - video_clip_list = [VideoFileClip(v) for v in video_list] - finalclip = concatenate_videoclips(video_clip_list) - - output_video_file = f"videos/{timing.project.uuid}/assets/videos/2_completed/{final_video_name}.mp4" - if project_settings.audio: - temp_audio_file = None - if 'http' in project_settings.audio.location: - temp_audio_file = generate_temp_file(project_settings.audio.location, '.mp4') - temp_file_list.append(temp_audio_file) - - audio_location = temp_audio_file.name if temp_audio_file else project_settings.audio.location - - audio_clip = AudioFileClip(audio_location) - finalclip = finalclip.set_audio(audio_clip) - - temp_video_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") - - finalclip.write_videofile( - temp_video_file.name, - fps=60, # or 60 if your original video is 60fps - audio_bitrate="128k", - bitrate="5000k", - codec="libx264", - audio_codec="aac" - ) - - temp_video_file.close() - video_bytes = None - with open(temp_video_file.name, "rb") as f: - video_bytes = f.read() - - hosted_url = save_or_host_file_bytes(video_bytes, output_video_file) - - file_data = { - "name": final_video_name, - "type": InternalFileType.VIDEO.value, - "tag": file_tag, - "project_id": project_uuid - } - - if hosted_url: - file_data.update({"hosted_url": hosted_url}) - else: - file_data.update({"local_path": output_video_file}) - - data_repo.create_file(**file_data) - - for file in temp_file_list: - os.remove(file.name) - - def create_depth_mask_image(input_image, layer, timing_uuid): if not input_image.startswith("http"): input_image = open(input_image, "rb") diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py new file mode 100644 index 00000000..aed50677 --- /dev/null +++ b/ui_components/methods/video_methods.py @@ -0,0 +1,405 @@ +import os +import tempfile +from typing import List +import uuid +from moviepy.editor import concatenate_videoclips, TextClip, VideoFileClip, vfx +from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip + +from backend.models import InternalFileObject +from ui_components.methods.common_methods import add_audio_to_video_slice, calculate_desired_duration_of_individual_clip, get_audio_bytes_for_slice, update_speed_of_video_clip +from ui_components.models import InternalFrameTimingObject, InternalSettingObject +from utils.common_utils import convert_bytes_to_file, generate_temp_file, save_or_host_file_bytes +from utils.data_repo.data_repo import DataRepo +from utils.media_processor.interpolator import VideoInterpolator + + +# returns the timed_clip, which is the interpolated video with correct length +def create_or_get_single_preview_video(timing_uuid): + data_repo = DataRepo() + + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + project_details: InternalSettingObject = data_repo.get_project_setting( + timing.project.uuid) + + if not timing.interpolated_clip: + data_repo.update_specific_timing(timing_uuid, interpolation_steps=3) + interpolated_video: InternalFileObject = VideoInterpolator.video_through_frame_interpolation( + timing_uuid) + data_repo.update_specific_timing( + timing_uuid, interpolated_clip_id=interpolated_video.uuid) + + if not timing.timed_clip: + timing = data_repo.get_timing_from_uuid(timing_uuid) + + temp_video_file = None + if timing.interpolated_clip.hosted_url: + temp_video_file = generate_temp_file(timing.interpolated_clip.hosted_url, '.mp4') + + file_path = temp_video_file.name if temp_video_file else timing.interpolated_clip.local_path + clip = VideoFileClip(file_path) + + number_text = TextClip(str(timing.aux_frame_index), + fontsize=24, color='white') + number_background = TextClip(" ", fontsize=24, color='black', bg_color='black', size=( + number_text.w + 10, number_text.h + 10)) + number_background = number_background.set_position( + ('left', 'top')).set_duration(clip.duration) + number_text = number_text.set_position( + (number_background.w - number_text.w - 5, number_background.h - number_text.h - 5)).set_duration(clip.duration) + clip_with_number = CompositeVideoClip([clip, number_background, number_text]) + + clip_with_number.write_videofile(filename=file_path, codec='libx264', audio_codec='aac') + + if temp_video_file: + video_bytes = None + with open(file_path, 'rb') as f: + video_bytes = f.read() + + hosted_url = save_or_host_file_bytes(video_bytes, timing.interpolated_clip.local_path) + data_repo.update_file(timing.interpolated_clip.uuid, hosted_url=hosted_url) + + os.remove(temp_video_file.name) + + # timed_clip has the correct length (equal to the time difference between the current and the next frame) + # which the interpolated video may or maynot have + clip_duration = calculate_desired_duration_of_individual_clip(timing_uuid) + data_repo.update_specific_timing(timing_uuid, clip_duration=clip_duration) + + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + output_video = update_speed_of_video_clip(timing.interpolated_clip, timing_uuid) + data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) + + # adding audio if the audio file is present + if project_details.audio: + audio_bytes = get_audio_bytes_for_slice(timing_uuid) + add_audio_to_video_slice(timing.timed_clip, audio_bytes) + + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + return timing.timed_clip + + +# preview_clips have frame numbers on them. Preview clip is generated from index-2 to index+2 frames +def create_full_preview_video(timing_uuid, speed=1) -> InternalFileObject: + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( + timing.project.uuid) + index_of_item = timing.aux_frame_index + + num_timing_details = len(timing_details) + clips = [] + + temp_file_list = [] + + for i in range(index_of_item - 2, index_of_item + 3): + if i < 0 or i >= num_timing_details-1: + continue + + primary_variant_location = timing_details[i].primary_image_location + print(f"primary_variant_location for i={i}: {primary_variant_location}") + + if not primary_variant_location: + break + + preview_video = create_or_get_single_preview_video(timing_details[i].uuid) + + clip = VideoFileClip(preview_video.location) + + number_text = TextClip(str(i), fontsize=24, color='white') + number_background = TextClip(" ", fontsize=24, color='black', bg_color='black', size=( + number_text.w + 10, number_text.h + 10)) + number_background = number_background.set_position( + ('left', 'top')).set_duration(clip.duration) + number_text = number_text.set_position( + (number_background.w - number_text.w - 5, number_background.h - number_text.h - 5)).set_duration(clip.duration) + + clip_with_number = CompositeVideoClip( + [clip, number_background, number_text]) + + # remove existing preview video + temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4", mode='wb') + temp_file_list.append(temp_file) + clip_with_number.write_videofile(temp_file.name, codec='libx264', bitrate='3000k') + video_bytes = None + with open(temp_file.name, 'rb') as f: + video_bytes = f.read() + + hosted_url = save_or_host_file_bytes(video_bytes, preview_video.local_path) + if hosted_url: + data_repo.update_file(preview_video.uuid, hosted_url=hosted_url) + + clips.append(preview_video) + + print(clips) + video_clips = [] + + for v in clips: + path = v.location + if 'http' in path: + temp_file = generate_temp_file(path) + temp_file_list.append(temp_file) + path = temp_file.name + + video_clips.append(VideoFileClip(path)) + + # video_clips = [VideoFileClip(v.location) for v in clips] + combined_clip = concatenate_videoclips(video_clips) + output_filename = str(uuid.uuid4()) + ".mp4" + video_location = f"videos/{timing.project.uuid}/assets/videos/1_final/{output_filename}" + + temp_output_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4", mode='wb') + combined_clip = combined_clip.fx(vfx.speedx, speed) + combined_clip.write_videofile(temp_output_file.name) + + video_bytes = None + with open(temp_output_file.name, 'rb') as f: + video_bytes = f.read() + + video_file = convert_bytes_to_file( + video_location, + "video/mp4", + video_bytes, + timing.project.uuid + ) + + for file in temp_file_list: + os.remove(file.name) + + return video_file + +def update_speed_of_video_clip(video_file: InternalFileObject, timing_uuid) -> InternalFileObject: + data_repo = DataRepo() + + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + + desired_duration = timing.clip_duration + animation_style = st.session_state[f"{timing_uuid}_animation_style"] + + temp_video_file = None + if video_file.hosted_url and is_s3_image_url(video_file.hosted_url): + temp_video_file = generate_temp_file(video_file.hosted_url, '.mp4') + + location_of_video = temp_video_file.name if temp_video_file else video_file.local_path + + new_file_name = ''.join(random.choices(string.ascii_lowercase + string.digits, k=16)) + ".mp4" + new_file_location = "videos/" + str(timing.project.uuid) + "/assets/videos/1_final/" + str(new_file_name) + + video_bytes = VideoProcessor.update_video_speed( + location_of_video, + animation_style, + desired_duration + ) + + hosted_url = save_or_host_file_bytes(video_bytes, new_file_location) + + if hosted_url: + video_file: InternalFileObject = data_repo.create_file( + name=new_file_name, type=InternalFileType.VIDEO.value, hosted_url=hosted_url) + else: + video_file: InternalFileObject = data_repo.create_file( + name=new_file_name, type=InternalFileType.VIDEO.value, local_path=new_file_location) + + if temp_video_file: + os.remove(temp_video_file.name) + + return video_file + + +def calculate_desired_duration_of_individual_clip(timing_uuid): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + timing_details = data_repo.get_timing_list_from_project( + timing.project.uuid) + length_of_list = len(timing_details) + + # last frame + if timing.aux_frame_index == length_of_list - 1: + time_of_frame = timing.frame_time + total_duration_of_frame = 0.0 # can be changed + else: + time_of_frame = timing.frame_time + time_of_next_frame = data_repo.get_next_timing(timing_uuid).frame_time + total_duration_of_frame = float( + time_of_next_frame) - float(time_of_frame) + + return total_duration_of_frame + + +def add_audio_to_video_slice(video_file, audio_bytes): + video_location = video_file.local_path + # Save the audio bytes to a temporary file + audio_file = "temp_audio.wav" + with open(audio_file, 'wb') as f: + f.write(audio_bytes.getvalue()) + + # Create an input video stream + video_stream = ffmpeg.input(video_location) + + # Create an input audio stream + audio_stream = ffmpeg.input(audio_file) + + # Add the audio stream to the video stream + output_stream = ffmpeg.output(video_stream, audio_stream, "output_with_audio.mp4", + vcodec='copy', acodec='aac', strict='experimental') + + # Run the ffmpeg command + output_stream.run() + + # Remove the original video file and the temporary audio file + os.remove(video_location) + os.remove(audio_file) + + # TODO: handle online update in this case + # Rename the output file to have the same name as the original video file + os.rename("output_with_audio.mp4", video_location) + + + +def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileTag.GENERATED_VIDEO.value): + data_repo = DataRepo() + + timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( + project_uuid) + + update_clip_duration_of_all_timing_frames(project_uuid) + + total_number_of_videos = len(timing_details) - 1 + + for i in range(0, total_number_of_videos): + index_of_current_item = i + current_timing: InternalFrameTimingObject = data_repo.get_timing_from_frame_number( + project_uuid, i) + + timing = timing_details[i] + if quality == VideoQuality.HIGH.value: + data_repo.update_specific_timing( + current_timing.uuid, timed_clip_id=None) + interpolation_steps = calculate_dynamic_interpolations_steps( + timing_details[index_of_current_item].clip_duration) + if not timing.interpolation_steps or timing.interpolation_steps < interpolation_steps: + data_repo.update_specific_timing( + current_timing.uuid, interpolation_steps=interpolation_steps, interpolated_clip_id=None) + else: + if not timing.interpolation_steps or timing.interpolation_steps < 3: + data_repo.update_specific_timing( + current_timing.uuid, interpolation_steps=3) + + if not timing.interpolated_clip: + video_location = create_interpolated_clip(current_timing.uuid) + data_repo.update_specific_timing( + current_timing.uuid, interpolated_clip_id=video_location.uuid) + + project_settings: InternalSettingObject = data_repo.get_project_setting( + timing.project.uuid) + timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( + timing.project.uuid) + total_number_of_videos = len(timing_details) - 2 + + for i in timing_details: + index_of_current_item = timing_details.index(i) + timing = timing_details[index_of_current_item] + current_timing: InternalFrameTimingObject = data_repo.get_timing_from_frame_number( + timing.project.uuid, index_of_current_item) + if index_of_current_item <= total_number_of_videos: + if not current_timing.timed_clip: + desired_duration = current_timing.clip_duration + location_of_input_video_file = current_timing.interpolated_clip + + output_video = update_speed_of_video_clip( + location_of_input_video_file, timing.uuid) + + if quality == VideoQuality.PREVIEW.value: + print("") + ''' + clip = VideoFileClip(location_of_output_video) + + number_text = TextClip(str(index_of_current_item), fontsize=24, color='white') + number_background = TextClip(" ", fontsize=24, color='black', bg_color='black', size=(number_text.w + 10, number_text.h + 10)) + number_background = number_background.set_position(('right', 'bottom')).set_duration(clip.duration) + number_text = number_text.set_position((number_background.w - number_text.w - 5, number_background.h - number_text.h - 5)).set_duration(clip.duration) + + clip_with_number = CompositeVideoClip([clip, number_background, number_text]) + + # remove existing preview video + os.remove(location_of_output_video) + clip_with_number.write_videofile(location_of_output_video, codec='libx264', bitrate='3000k') + ''' + + data_repo.update_specific_timing( + current_timing.uuid, timed_clip_id=output_video.uuid) + + video_list = [] + temp_file_list = [] + + timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( + timing.project.uuid) + + # TODO: CORRECT-CODE + for i in timing_details: + index_of_current_item = timing_details.index(i) + current_timing: InternalFrameTimingObject = data_repo.get_timing_from_frame_number( + project_uuid, index_of_current_item) + if index_of_current_item <= total_number_of_videos: + temp_video_file = None + if current_timing.timed_clip.hosted_url: + temp_video_file = generate_temp_file(current_timing.timed_clip.hosted_url, '.mp4') + temp_file_list.append(temp_video_file) + + file_path = temp_video_file.name if temp_video_file else current_timing.timed_clip.local_path + + video_list.append(file_path) + + video_clip_list = [VideoFileClip(v) for v in video_list] + finalclip = concatenate_videoclips(video_clip_list) + + output_video_file = f"videos/{timing.project.uuid}/assets/videos/2_completed/{final_video_name}.mp4" + if project_settings.audio: + temp_audio_file = None + if 'http' in project_settings.audio.location: + temp_audio_file = generate_temp_file(project_settings.audio.location, '.mp4') + temp_file_list.append(temp_audio_file) + + audio_location = temp_audio_file.name if temp_audio_file else project_settings.audio.location + + audio_clip = AudioFileClip(audio_location) + finalclip = finalclip.set_audio(audio_clip) + + temp_video_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") + + finalclip.write_videofile( + temp_video_file.name, + fps=60, # or 60 if your original video is 60fps + audio_bitrate="128k", + bitrate="5000k", + codec="libx264", + audio_codec="aac" + ) + + temp_video_file.close() + video_bytes = None + with open(temp_video_file.name, "rb") as f: + video_bytes = f.read() + + hosted_url = save_or_host_file_bytes(video_bytes, output_video_file) + + file_data = { + "name": final_video_name, + "type": InternalFileType.VIDEO.value, + "tag": file_tag, + "project_id": project_uuid + } + + if hosted_url: + file_data.update({"hosted_url": hosted_url}) + else: + file_data.update({"local_path": output_video_file}) + + data_repo.create_file(**file_data) + + for file in temp_file_list: + os.remove(file.name) diff --git a/ui_components/models.py b/ui_components/models.py index 15302f2b..4335d6c2 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -1,5 +1,7 @@ import datetime +import streamlit as st import json +from shared.constants import AnimationStyleType from ui_components.constants import TEMP_MASK_FILE @@ -112,8 +114,8 @@ def __init__(self, **kwargs): self.notes = kwargs['notes'] if 'notes' in kwargs and kwargs["notes"] else "" self.adapter_type = kwargs['adapter_type'] if 'adapter_type' in kwargs and kwargs["adapter_type"] else None self.clip_duration = kwargs['clip_duration'] if 'clip_duration' in kwargs and kwargs["clip_duration"] else 0 - self.animation_style = kwargs['animation_style'] if 'animation_style' in kwargs and kwargs["animation_style"] else None - self.interpolation_steps = kwargs['interpolation_steps'] if 'interpolation_steps' in kwargs and kwargs["interpolation_steps"] else 0 + #self.animation_style = kwargs['animation_style'] if 'animation_style' in kwargs and kwargs["animation_style"] else None + #self.interpolation_steps = kwargs['interpolation_steps'] if 'interpolation_steps' in kwargs and kwargs["interpolation_steps"] else 0 self.low_threshold = kwargs['low_threshold'] if 'low_threshold' in kwargs and kwargs["low_threshold"] else 0 self.high_threshold = kwargs['high_threshold'] if 'high_threshold' in kwargs and kwargs["high_threshold"] else 0 self.aux_frame_index = kwargs['aux_frame_index'] if 'aux_frame_index' in kwargs else 0 @@ -154,6 +156,32 @@ def primary_variant_index(self): idx += 1 return -1 + + @property + def animation_style(self): + key = f"{self.uuid}_animation_style" + if not (key in st.session_state and st.session_state[key]): + st.session_state[key] = AnimationStyleType.INTERPOLATION.value + + return st.session_state[key] + + @animation_style.setter + def animation_style(self, val): + key = f"{self.uuid}_animation_style" + st.session_state[key] = val + + @property + def interpolation_steps(self): + key = f"{self.uuid}_interpolation_steps" + if not (key in st.session_state and st.session_state[key]): + st.session_state[key] = 3 + + return st.session_state[key] + + @interpolation_steps.setter + def interpolation_steps(self, val): + key = f"{self.uuid}_interpolation_steps" + st.session_state[key] = val class InternalAppSettingObject: diff --git a/ui_components/widgets/attach_audio_element.py b/ui_components/widgets/attach_audio_element.py index 24135235..080035ab 100644 --- a/ui_components/widgets/attach_audio_element.py +++ b/ui_components/widgets/attach_audio_element.py @@ -1,5 +1,5 @@ import streamlit as st -from ui_components.common_methods import save_audio_file +from ui_components.methods.common_methods import save_audio_file from ui_components.models import InternalProjectObject, InternalSettingObject from utils.data_repo.data_repo import DataRepo diff --git a/ui_components/widgets/cropping_element.py b/ui_components/widgets/cropping_element.py index 596f50ad..1f6b2055 100644 --- a/ui_components/widgets/cropping_element.py +++ b/ui_components/widgets/cropping_element.py @@ -8,7 +8,7 @@ from backend.models import InternalFileObject from shared.constants import InternalFileType -from ui_components.common_methods import apply_image_transformations, fetch_image_by_stage, inpaint_in_black_space_element, reset_zoom_element, save_zoomed_image, zoom_inputs +from ui_components.methods.common_methods import apply_image_transformations, fetch_image_by_stage, inpaint_in_black_space_element, reset_zoom_element, save_zoomed_image, zoom_inputs from ui_components.constants import WorkflowStageType from ui_components.models import InternalProjectObject, InternalSettingObject from utils.common_utils import generate_pil_image, save_or_host_file diff --git a/ui_components/widgets/frame_clip_generation_elements.py b/ui_components/widgets/frame_clip_generation_elements.py new file mode 100644 index 00000000..a73f3216 --- /dev/null +++ b/ui_components/widgets/frame_clip_generation_elements.py @@ -0,0 +1,146 @@ +import streamlit as st +from shared.constants import AnimationStyleType +from ui_components.methods.common_methods import create_full_preview_video, create_interpolated_clip, update_speed_of_video_clip +from ui_components.models import InternalFrameTimingObject +from utils.common_utils import convert_bytes_to_file +from utils.data_repo.data_repo import DataRepo +from utils.media_processor.interpolator import VideoInterpolator + + +# get audio_bytes of correct duration for a given frame +def current_individual_clip_element(timing_uuid): + def generate_individual_clip(timing_uuid, quality): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + next_timing: InternalFrameTimingObject = data_repo.get_next_timing(timing_uuid) + + if quality == 'full': + interpolation_steps = VideoInterpolator.calculate_dynamic_interpolations_steps(timing.clip_duration) + elif quality == 'preview': + interpolation_steps = 3 + + timing.interpolated_steps = interpolation_steps + img_list = [timing.source_image.location, next_timing.source_image.location] + settings = {"interpolation_steps": timing.interpolation_steps} + video_bytes = VideoInterpolator.create_interpolated_clip( + img_list, + timing.animation_style, + settings + ) + + video_location = "" + video = convert_bytes_to_file( + video_location, + "video/mp4", + video_bytes, + timing.project.uuid + ) + + data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video.uuid) + output_video = update_speed_of_video_clip(timing.interpolated_clip, timing_uuid) + data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) + return output_video + + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + idx = timing.aux_frame_index + + st.info(f"Individual Clip for #{idx+1}:") + if timing.timed_clip: + st.video(timing.timed_clip.location) + + if timing.interpolation_steps is not None: + if VideoInterpolator.calculate_dynamic_interpolations_steps(timing.clip_duration) > timing.interpolation_steps: + st.error("Low Resolution") + if st.button("Generate Full Resolution Clip", key=f"generate_full_resolution_video_{idx}"): + generate_individual_clip(timing.uuid, 'full') + st.experimental_rerun() + else: + st.success("Full Resolution") + else: + st.error(''' + **----------------------------------------** + + --------- + + ================== + + **No Individual Clip Created Yet** + + ================== + + --------- + + **----------------------------------------** + + + ''') + gen1, gen2 = st.columns([1, 1]) + + with gen1: + if st.button("Generate Low-Resolution Clip", key=f"generate_preview_video_{idx}"): + generate_individual_clip(timing.uuid, 'preview') + st.experimental_rerun() + with gen2: + if st.button("Generate Full Resolution Clip", key=f"generate_full_resolution_video_{idx}"): + generate_individual_clip(timing.uuid, 'full') + st.experimental_rerun() + + +def update_animation_style_element(timing_uuid, horizontal=True): + data_repo = DataRepo() + timing = data_repo.get_timing_from_uuid(timing_uuid) + idx = timing.aux_frame_index + + animation_styles = AnimationStyleType.value_list() + + if f"animation_style_index_{idx}" not in st.session_state: + st.session_state[f"animation_style_index_{idx}"] = animation_styles.index( + timing.animation_style) + st.session_state[f"animation_style_{idx}"] = timing.animation_style + + st.session_state[f"animation_style_{idx}"] = st.radio( + "Animation style:", animation_styles, index=st.session_state[f"animation_style_index_{idx}"], key=f"animation_style_radio_{idx}", help="This is for the morph from the current frame to the next one.", horizontal=horizontal) + + if st.session_state[f"animation_style_{idx}"] != timing.animation_style: + st.session_state[f"animation_style_index_{idx}"] = animation_styles.index(st.session_state[f"animation_style_{idx}"]) + timing.animation_style = st.session_state[f"animation_style_{idx}"] + st.experimental_rerun() + + +def current_preview_video_element(timing_uuid): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + idx = timing.aux_frame_index + st.info("Preview Video in Context:") + + preview_video_1, preview_video_2 = st.columns([2.5, 1]) + + with preview_video_1: + if timing.preview_video: + st.video(timing.preview_video.location) + else: + st.error(''' + **----------------------------------------** + + --------- + + ================== + + **No Preview Video Created Yet** + + ================== + + --------- + + **----------------------------------------** + ''') + + with preview_video_2: + + if st.button("Generate New Preview Video", key=f"generate_preview_{idx}"): + preview_video = create_full_preview_video( + timing.uuid, 1.0) + data_repo.update_specific_timing( + timing.uuid, preview_video_id=preview_video.uuid) + st.experimental_rerun() \ No newline at end of file diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index f1137fa4..4b5d7171 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -9,7 +9,7 @@ from utils.data_repo.data_repo import DataRepo from ui_components.constants import WorkflowStageType from shared.file_upload.s3 import upload_file -from ui_components.common_methods import delete_frame, add_image_variant, promote_image_variant, save_uploaded_image, replace_image_widget +from ui_components.methods.common_methods import delete_frame, add_image_variant, promote_image_variant, save_uploaded_image, replace_image_widget def frame_selector_widget(): diff --git a/ui_components/widgets/frame_time_selector.py b/ui_components/widgets/frame_time_selector.py index 33e70f80..9e55af58 100644 --- a/ui_components/widgets/frame_time_selector.py +++ b/ui_components/widgets/frame_time_selector.py @@ -1,5 +1,5 @@ from typing import List -from ui_components.common_methods import calculate_desired_duration_of_each_clip +from ui_components.methods.common_methods import update_clip_duration_of_all_timing_frames from ui_components.models import InternalFrameTimingObject from utils.data_repo.data_repo import DataRepo import streamlit as st @@ -38,6 +38,6 @@ def update_frame_time(timing_uuid, frame_time): data_repo.update_specific_timing(frame.uuid, frame_time=new_frame_time, timed_clip_id=None) # updating clip_duration - calculate_desired_duration_of_each_clip(timing.project.uuid) + update_clip_duration_of_all_timing_frames(timing.project.uuid) st.experimental_rerun() \ No newline at end of file diff --git a/ui_components/widgets/prompt_finder.py b/ui_components/widgets/prompt_finder.py index de2b4825..69223145 100644 --- a/ui_components/widgets/prompt_finder.py +++ b/ui_components/widgets/prompt_finder.py @@ -2,7 +2,7 @@ from PIL import Image import streamlit as st -from ui_components.common_methods import prompt_clip_interrogator +from ui_components.methods.common_methods import prompt_clip_interrogator from utils.common_utils import save_or_host_file diff --git a/ui_components/widgets/styling_element.py b/ui_components/widgets/styling_element.py index b2c92508..88598244 100644 --- a/ui_components/widgets/styling_element.py +++ b/ui_components/widgets/styling_element.py @@ -4,7 +4,7 @@ from typing import List from shared.constants import AIModelCategory, AIModelType -from ui_components.common_methods import trigger_restyling_process +from ui_components.methods.common_methods import trigger_restyling_process from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo diff --git a/utils/common_utils.py b/utils/common_utils.py index ca339e56..790361f9 100644 --- a/utils/common_utils.py +++ b/utils/common_utils.py @@ -378,4 +378,24 @@ def reset_project_state(): for k in numbered_keys_to_delete: key = k + str(i) if key in st.session_state: - del st.session_state[key] \ No newline at end of file + del st.session_state[key] + + +def convert_bytes_to_file(file_location_to_save, mime_type, file_bytes, project_uuid): + data_repo = DataRepo() + + hosted_url = save_or_host_file_bytes(file_bytes, file_location_to_save, mime_type=mime_type) + file_data = { + "name": str(uuid.uuid4()), + "type": InternalFileType.IMAGE.value, + "project_id": project_uuid.uuid + } + + if hosted_url: + file_data.update({'hosted_url': hosted_url}) + else: + file_data.update({'local_path': file_location_to_save}) + + file = data_repo.create_file(**file_data) + + return file \ No newline at end of file diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py new file mode 100644 index 00000000..1760794d --- /dev/null +++ b/utils/media_processor/interpolator.py @@ -0,0 +1,116 @@ +import os +import cv2 +import streamlit as st +import requests as r +import numpy as np +from shared.constants import AnimationStyleType +from utils.common_utils import generate_temp_file + +from utils.data_repo.data_repo import DataRepo +from utils.ml_processor.ml_interface import get_ml_client +from utils.ml_processor.replicate.constants import REPLICATE_MODEL + + +class VideoInterpolator: + @staticmethod + def calculate_dynamic_interpolations_steps(clip_duration): + if clip_duration < 0.17: + interpolation_steps = 2 + elif clip_duration < 0.3: + interpolation_steps = 3 + elif clip_duration < 0.57: + interpolation_steps = 4 + elif clip_duration < 1.1: + interpolation_steps = 5 + elif clip_duration < 2.17: + interpolation_steps = 6 + elif clip_duration < 4.3: + interpolation_steps = 7 + else: + interpolation_steps = 8 + + return interpolation_steps + + @staticmethod + def create_interpolated_clip(img_location_list, animation_style, settings): + data_repo = DataRepo() + if not animation_style: + project_setting = data_repo.get_project_setting(st.session_state["project_uuid"]) + animation_style = project_setting.default_animation_style + + if animation_style == AnimationStyleType.INTERPOLATION.value: + output_video_bytes = VideoInterpolator.video_through_frame_interpolation( + img_location_list, + settings + ) + + elif animation_style == AnimationStyleType.DIRECT_MORPHING.value: + output_video_bytes = VideoInterpolator.video_through_direct_morphing( + img_location_list, + settings + ) + + return output_video_bytes + + # returns a video bytes generated through interpolating frames between the given list of frames + @staticmethod + def video_through_frame_interpolation(img_location_list, settings): + # TODO: extend this for more than two images + img1 = img_location_list[0] + img2 = img_location_list[1] + + if not img1.startswith("http"): + img1 = open(img1, "rb") + + if not img2.startswith("http"): + img2 = open(img2, "rb") + + ml_client = get_ml_client() + output = ml_client.predict_model_output(REPLICATE_MODEL.google_frame_interpolation, frame1=img1, frame2=img2, + times_to_interpolate=settings['interpolation_steps']) + + temp_output_file = generate_temp_file(output, '.mp4') + video_bytes = None + with open(temp_output_file.name, 'rb') as f: + video_bytes = f.read() + + os.remove(temp_output_file.name) + + return video_bytes + + @staticmethod + def video_through_direct_morphing(img_location_list, settings): + def load_image(image_path_or_url): + if image_path_or_url.startswith("http"): + response = r.get(image_path_or_url) + image = np.asarray(bytearray(response.content), dtype="uint8") + image = cv2.imdecode(image, cv2.IMREAD_COLOR) + else: + image = cv2.imread(image_path_or_url) + + return image + + img1 = load_image(img_location_list[0]) + img2 = load_image(img_location_list[1]) + + if img1 is None or img2 is None: + raise ValueError("Could not read one or both of the images.") + + num_frames = settings['interpolation_steps'] # Number of frames in the video + video_frames = [] + + for alpha in np.linspace(0, 1, num_frames): + morphed_image = cv2.addWeighted(img1, alpha, img2, 1 - alpha, 0) + video_frames.append(morphed_image) + + fourcc = cv2.VideoWriter_fourcc(*"avc1") + video_bytes = [] + for frame in video_frames: + ret, frame_bytes = cv2.imencode('.mp4', frame, fourcc) + if not ret: + raise ValueError("Failed to encode video frame") + video_bytes.append(frame_bytes.tobytes()) + + video_data = b''.join(video_bytes) + return video_data + diff --git a/utils/media_processor/video.py b/utils/media_processor/video.py index fe470ace..baa5411b 100644 --- a/utils/media_processor/video.py +++ b/utils/media_processor/video.py @@ -1,106 +1,52 @@ -from io import BytesIO -import cv2, os - -import requests +import os import tempfile -from utils.common_utils import generate_temp_file - -from utils.data_repo.data_repo import DataRepo - -def resize_video(input_video_uuid, width, height, crop_type=None, output_format='mp4'): - data_repo = DataRepo() - temp_file = None - input_video = data_repo.get_file_from_uuid(input_video_uuid) - input_path = input_video.location - - if input_path.contains('http'): - temp_file = generate_temp_file(input_path) - input_video = cv2.VideoCapture(temp_file.name) - else: - input_video = cv2.VideoCapture(input_path) - - if not input_video.isOpened(): - raise ValueError(f"Could not open the video file: {input_path}") - - # Get source video properties - src_width = int(input_video.get(cv2.CAP_PROP_FRAME_WIDTH)) - src_height = int(input_video.get(cv2.CAP_PROP_FRAME_HEIGHT)) - fps = int(input_video.get(cv2.CAP_PROP_FPS)) - num_frames = int(input_video.get(cv2.CAP_PROP_FRAME_COUNT)) - - # Calculate aspect ratios - src_aspect_ratio = src_width / src_height - target_aspect_ratio = width / height - - if target_aspect_ratio > src_aspect_ratio: - # Scale to target width, maintaining aspect ratio - new_width = width - new_height = int(src_height * (width / src_width)) - else: - # Scale to target height, maintaining aspect ratio - new_width = int(src_width * (height / src_height)) - new_height = height - - # Determine the crop type based on the input dimensions, if not provided - if crop_type is None: - width_diff = abs(src_width - width) / src_width - height_diff = abs(src_height - height) / src_height - crop_type = 'top_bottom' if height_diff > width_diff else 'left_right' - - # Calculate crop dimensions - if crop_type == 'top_bottom': - crop_top = (new_height - height) // 2 - crop_bottom = new_height - crop_top - crop_left = 0 - crop_right = new_width - elif crop_type == 'left_right': - crop_top = 0 - crop_bottom = new_height - crop_left = (new_width - width) // 2 - crop_right = new_width - crop_left - else: - raise ValueError("Invalid crop_type. Must be 'top_bottom' or 'left_right'.") +from moviepy.editor import concatenate_videoclips, TextClip, VideoFileClip, vfx - # Create output video - fourcc = cv2.VideoWriter_fourcc(*'mp4v') - - # delete the video at the input_path - os.remove(input_path) - if temp_file: - os.remove(temp_file.name) +from shared.constants import AnimationStyleType - temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4", mode='wb') - output_video = cv2.VideoWriter(temp_file.name, fourcc, fps, (width, height)) +class VideoProcessor: + @staticmethod + def update_video_speed(video_location, animation_style, desired_duration): + temp_output_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4", mode='wb') + if animation_style == AnimationStyleType.DIRECT_MORPHING.value: + # Load the video clip + clip = VideoFileClip(video_location) - for _ in range(num_frames): - ret, frame = input_video.read() - if not ret: - break + clip = clip.set_fps(120) - # Resize frame - frame = cv2.resize(frame, (new_width, new_height)) + # Calculate the number of frames to keep + input_duration = clip.duration + total_frames = len(list(clip.iter_frames())) + target_frames = int(total_frames * (desired_duration / input_duration)) - # Crop frame - frame = frame[crop_top:crop_bottom, crop_left:crop_right] + # Determine which frames to keep + keep_every_n_frames = total_frames / target_frames + frames_to_keep = [int(i * keep_every_n_frames) + for i in range(target_frames)] - # Write frame to output video - output_video.write(frame) + # Create a new video clip with the selected frames + output_clip = concatenate_videoclips( + [clip.subclip(i/clip.fps, (i+1)/clip.fps) for i in frames_to_keep]) + output_clip.write_videofile(filename=temp_output_file.name, codec="libx265") - # Release resources - input_video.release() - output_video.release() - file_bytes = BytesIO() + elif animation_style == AnimationStyleType.INTERPOLATION.value: + clip = VideoFileClip(video_location) + input_video_duration = clip.duration + desired_speed_change = float( + input_video_duration) / float(desired_duration) - with open(temp_file.name, 'rb') as f: - file_bytes.write(f.read()) - os.remove(temp_file.name) + print("Desired Speed Change: " + str(desired_speed_change)) - file_bytes.seek(0) + # Apply the speed change using moviepy + output_clip = clip.fx(vfx.speedx, desired_speed_change) + + output_clip.write_videofile(filename=temp_output_file.name, codec="libx264", preset="fast") + + with open(temp_output_file.name, 'rb') as f: + video_bytes = f.read() - # Upload the video file to the specified data repository - data_repo = DataRepo() - uploaded_url = data_repo.upload_file(file_bytes) + if temp_output_file: + os.remove(temp_output_file.name) - data_repo.update_file(input_video.uuid, hosted_url=uploaded_url) - cv2.destroyAllWindows() + return video_bytes \ No newline at end of file From 8534dda5d29cc9e5dc7de781278319a57139989b Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 13 Sep 2023 13:21:10 +0530 Subject: [PATCH 004/164] wip: code refactoring --- ui_components/methods/common_methods.py | 845 +--------------------- ui_components/methods/file_methods.py | 157 ++++ ui_components/methods/ml_methods.py | 708 ++++++++++++++++++ ui_components/methods/training_methods.py | 84 +++ ui_components/methods/video_methods.py | 72 +- utils/common_utils.py | 173 +---- 6 files changed, 1000 insertions(+), 1039 deletions(-) create mode 100644 ui_components/methods/file_methods.py create mode 100644 ui_components/methods/ml_methods.py create mode 100644 ui_components/methods/training_methods.py diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 089e12c9..13f6aa2a 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -3,50 +3,29 @@ import streamlit as st from streamlit_drawable_canvas import st_canvas import os -import base64 -from PIL import Image, ImageDraw, ImageFont, ImageOps, ImageEnhance, ImageFilter, ImageChops +from PIL import Image, ImageDraw, ImageOps, ImageFilter from moviepy.editor import * -from requests_toolbelt.multipart.encoder import MultipartEncoder import cv2 -from moviepy.video.io.VideoFileClip import VideoFileClip -import csv -import pandas as pd -import replicate -import urllib import requests as r -import imageio -import ffmpeg -import string import math import json -import tempfile -import boto3 import time -import zipfile -from math import gcd -import random import uuid from io import BytesIO import numpy as np -from shared.constants import REPLICATE_USER, SERVER, AIModelCategory, InternalFileTag, InternalFileType, ServerType +from shared.constants import SERVER, InternalFileType, ServerType from pydub import AudioSegment -import shutil -from moviepy.editor import concatenate_videoclips, TextClip, VideoFileClip, vfx from backend.models import InternalFileObject -from shared.file_upload.s3 import is_s3_image_url, upload_file -from ui_components.constants import CROPPED_IMG_LOCAL_PATH, MASK_IMG_LOCAL_PATH, SECOND_MASK_FILE, SECOND_MASK_FILE_PATH, TEMP_MASK_FILE, VideoQuality, WorkflowStageType -from ui_components.models import InternalAIModelObject, InternalAppSettingObject, InternalBackupObject, InternalFrameTimingObject, InternalProjectObject, InternalSettingObject -from utils.common_utils import add_temp_file_to_project, convert_bytes_to_file, generate_pil_image, generate_temp_file, get_current_user_uuid, save_or_host_file, save_or_host_file_bytes +from ui_components.constants import CROPPED_IMG_LOCAL_PATH, MASK_IMG_LOCAL_PATH, SECOND_MASK_FILE, SECOND_MASK_FILE_PATH, TEMP_MASK_FILE, WorkflowStageType +from ui_components.methods.file_methods import add_temp_file_to_project, generate_pil_image, save_or_host_file, save_or_host_file_bytes +from ui_components.methods.ml_methods import create_depth_mask_image, inpainting, remove_background +from ui_components.methods.video_methods import calculate_desired_duration_of_individual_clip +from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo from shared.constants import AnimationStyleType -from utils.media_processor.video import VideoProcessor -from utils.ml_processor.ml_interface import get_ml_client -from utils.ml_processor.replicate.constants import REPLICATE_MODEL -from ui_components.models import InternalFileObject - -from urllib.parse import urlparse +from ui_components.models import InternalFileObject from typing import Union from streamlit_image_comparison import image_comparison @@ -128,7 +107,6 @@ def save_uploaded_image(image, project_uuid, frame_uuid, save_type): print(f"Failed to save image file due to: {str(e)}") return None - def create_alpha_mask(size, edge_blur_radius): mask = Image.new('L', size, 0) draw = ImageDraw.Draw(mask) @@ -383,8 +361,6 @@ def ai_frame_editing_element(timing_uuid, stage=WorkflowStageType.SOURCE.value): if "current_frame_uuid" not in st.session_state: st.session_state['current_frame_uuid'] = timing_details[0].uuid - def reset_new_image(): - st.session_state['edited_image'] = "" if "edited_image" not in st.session_state: st.session_state.edited_image = "" @@ -775,7 +751,6 @@ def rotate_image(location, degree): return rotated_image - def move_frame(direction, timing_uuid): data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( @@ -817,137 +792,6 @@ def delete_frame(timing_uuid): st.session_state['current_frame_index'] = max(1, st.session_state['current_frame_index'] - 1) st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid - -def dynamic_prompting(prompt, source_image, timing_uuid): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - if "[expression]" in prompt: - prompt_expression = facial_expression_recognition(source_image) - prompt = prompt.replace("[expression]", prompt_expression) - - if "[location]" in prompt: - prompt_location = prompt_model_blip2( - source_image, "What's surrounding the character?") - prompt = prompt.replace("[location]", prompt_location) - - if "[mouth]" in prompt: - prompt_mouth = prompt_model_blip2( - source_image, "is their mouth open or closed?") - prompt = prompt.replace("[mouth]", "mouth is " + str(prompt_mouth)) - - if "[looking]" in prompt: - prompt_looking = prompt_model_blip2( - source_image, "the person is looking") - prompt = prompt.replace("[looking]", "looking " + str(prompt_looking)) - - data_repo.update_specific_timing(timing_uuid, prompt=prompt) - - -def trigger_restyling_process( - timing_uuid, - model_uuid, - prompt, - strength, - negative_prompt, - guidance_scale, - seed, - num_inference_steps, - transformation_stage, - promote_new_generation, - custom_models, - adapter_type, - update_inference_settings, - low_threshold, - high_threshold -): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - custom_pipeline = "" - - # TODO: add proper form validations throughout the code - if not prompt: - st.error("Please enter a prompt") - return - - if update_inference_settings is True: - prompt = prompt.replace(",", ".") - prompt = prompt.replace("\n", "") - data_repo.update_project_setting( - timing.project.uuid, - default_prompt=prompt, - default_strength=strength, - default_model_id=model_uuid, - default_custom_pipeline=custom_pipeline, - default_negative_prompt=negative_prompt, - default_guidance_scale=guidance_scale, - default_seed=seed, - default_num_inference_steps=num_inference_steps, - default_which_stage_to_run_on=transformation_stage, - default_custom_models=custom_models, - default_adapter_type=adapter_type - ) - - if low_threshold != "": - data_repo.update_project_setting( - timing.project.uuid, default_low_threshold=low_threshold) - if high_threshold != "": - data_repo.update_project_setting( - timing.project.uuid, default_high_threshold=high_threshold) - - if timing.source_image == "": - source_image = "" - else: - source_image = timing.source_image - - data_repo.update_specific_timing( - uuid=timing_uuid, - model_id=model_uuid, - source_image_id=timing.source_image.uuid, - prompt=prompt, - strength=strength, - custom_pipeline=custom_pipeline, - negative_prompt=negative_prompt, - guidance_scale=guidance_scale, - seed=seed, - num_inference_steps=num_inference_steps, - custom_models=custom_models, - adapter_type=adapter_type, - low_threshold=low_threshold, - high_threshold=high_threshold - ) - dynamic_prompting(prompt, source_image, timing_uuid) - - timing = data_repo.get_timing_from_uuid(timing_uuid) - if transformation_stage == ImageStage.SOURCE_IMAGE.value: - source_image = timing.source_image - else: - variants: List[InternalFileObject] = timing.alternative_images_list - number_of_variants = len(variants) - primary_image = timing.primary_image - source_image = primary_image.location - - # if st.session_state['custom_pipeline'] == "Mystique": - # output_file = custom_pipeline_mystique(timing_uuid, source_image) - # else: - output_file = restyle_images(timing_uuid, source_image) - - if output_file != None: - add_image_variant(output_file.uuid, timing_uuid) - - if promote_new_generation == True: - timing = data_repo.get_timing_from_uuid(timing_uuid) - variants = timing.alternative_images_list - number_of_variants = len(variants) - if number_of_variants == 1: - print("No new generation to promote") - else: - promote_image_variant(timing_uuid, number_of_variants - 1) - else: - print("No new generation to promote") - def replace_image_widget(timing_uuid, stage): data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) @@ -1050,7 +894,6 @@ def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): if frame_idx < len(timing_details): data_repo.update_specific_timing(timing.uuid, timed_clip_id=None) - def extract_canny_lines(image_path_or_url, project_uuid, low_threshold=50, high_threshold=150) -> InternalFileObject: data_repo = DataRepo() @@ -1127,37 +970,6 @@ def create_or_update_mask(timing_uuid, image) -> InternalFileObject: timing = data_repo.get_timing_from_uuid(timing_uuid) return timing.mask - -def inpainting(input_image: str, prompt, negative_prompt, timing_uuid, invert_mask, pass_mask=False) -> InternalFileObject: - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - if pass_mask == False: - mask = timing.mask.location - else: - # TODO: store the local temp files in the db too - if SERVER != ServerType.DEVELOPMENT.value: - mask = timing.project.get_temp_mask_file(TEMP_MASK_FILE).location - else: - mask = MASK_IMG_LOCAL_PATH - - if not mask.startswith("http"): - mask = open(mask, "rb") - - if not input_image.startswith("http"): - input_image = open(input_image, "rb") - - ml_client = get_ml_client() - output = ml_client.predict_model_output(REPLICATE_MODEL.andreas_sd_inpainting, mask=mask, image=input_image, prompt=prompt, - invert_mask=invert_mask, negative_prompt=negative_prompt, num_inference_steps=25) - - file_name = str(uuid.uuid4()) + ".png" - image_file = data_repo.create_file( - name=file_name, type=InternalFileType.IMAGE.value, hosted_url=output[0]) - - return image_file - # adds the image file in variant (alternative images) list def drawing_mode(timing_details,project_settings,project_uuid,stage=WorkflowStageType.STYLED.value): @@ -1367,7 +1179,6 @@ def drawing_mode(timing_details,project_settings,project_uuid,stage=WorkflowStag time.sleep(1) st.experimental_rerun() - def add_image_variant(image_file_uuid: str, timing_uuid: str): data_repo = DataRepo() image_file: InternalFileObject = data_repo.get_file_from_uuid( @@ -1412,92 +1223,6 @@ def convert_image_list_to_file_list(image_list): file_list.append(image_file) return file_list - -# INFO: images_list passed here are converted to internal files after they are used for training -def train_dreambooth_model(instance_prompt, class_prompt, training_file_url, max_train_steps, model_name, images_list: List[str], controller_type, model_type_list): - ml_client = get_ml_client() - response = ml_client.dreambooth_training( - training_file_url, instance_prompt, class_prompt, max_train_steps, model_name, controller_type, len(images_list)) - training_status = response["status"] - - model_id = response["id"] - if training_status == "queued": - file_list = convert_image_list_to_file_list(images_list) - file_uuid_list = [file.uuid for file in file_list] - file_uuid_list = json.dumps(file_uuid_list) - - model_data = { - "name": model_name, - "user_id": get_current_user_uuid(), - "replicate_model_id": model_id, - "replicate_url": response["model"], - "diffusers_url": "", - "category": AIModelCategory.DREAMBOOTH.value, - "training_image_list": file_uuid_list, - "keyword": instance_prompt, - "custom_trained": True, - "model_type": model_type_list - } - - data_repo = DataRepo() - data_repo.create_ai_model(**model_data) - - return "Success - Training Started. Please wait 10-15 minutes for the model to be trained." - else: - return "Failed" - -# INFO: images_list passed here are converted to internal files after they are used for training -def train_lora_model(training_file_url, type_of_task, resolution, model_name, images_list, model_type_list): - data_repo = DataRepo() - ml_client = get_ml_client() - output = ml_client.predict_model_output(REPLICATE_MODEL.clones_lora_training, instance_data=training_file_url, - task=type_of_task, resolution=int(resolution)) - - file_list = convert_image_list_to_file_list(images_list) - file_uuid_list = [file.uuid for file in file_list] - file_uuid_list = json.dumps(file_uuid_list) - model_data = { - "name": model_name, - "user_id": get_current_user_uuid(), - "replicate_url": output, - "diffusers_url": "", - "category": AIModelCategory.LORA.value, - "training_image_list": file_uuid_list, - "custom_trained": True, - "model_type": model_type_list - } - - data_repo.create_ai_model(**model_data) - return f"Successfully trained - the model '{model_name}' is now available for use!" - -# TODO: making an exception for this, passing just the image urls instead of -# image files -def train_model(images_list, instance_prompt, class_prompt, max_train_steps, - model_name, type_of_model, type_of_task, resolution, controller_type, model_type_list): - # prepare and upload the training data (images.zip) - ml_client = get_ml_client() - try: - training_file_url = ml_client.upload_training_data(images_list) - except Exception as e: - raise e - - # training the model - model_name = model_name.replace(" ", "-").lower() - if type_of_model == "Dreambooth": - return train_dreambooth_model(instance_prompt, class_prompt, training_file_url, - max_train_steps, model_name, images_list, controller_type, model_type_list) - elif type_of_model == "LoRA": - return train_lora_model(training_file_url, type_of_task, resolution, model_name, images_list, model_type_list) - -def remove_background(input_image): - if not input_image.startswith("http"): - input_image = open(input_image, "rb") - - ml_client = get_ml_client() - output = ml_client.predict_model_output( - REPLICATE_MODEL.pollination_modnet, image=input_image) - return output - def replace_background(project_uuid, background_image) -> InternalFileObject: data_repo = DataRepo() project = data_repo.get_project_from_uuid(project_uuid) @@ -1534,38 +1259,6 @@ def replace_background(project_uuid, background_image) -> InternalFileObject: return image_file -def prompt_clip_interrogator(input_image, which_model, best_or_fast): - if which_model == "Stable Diffusion 1.5": - which_model = "ViT-L-14/openai" - elif which_model == "Stable Diffusion 2": - which_model = "ViT-H-14/laion2b_s32b_b79k" - - if not input_image.startswith("http"): - input_image = open(input_image, "rb") - - ml_client = get_ml_client() - output = ml_client.predict_model_output( - REPLICATE_MODEL.clip_interrogator, image=input_image, clip_model_name=which_model, mode=best_or_fast) - - return output - -def prompt_model_real_esrgan_upscaling(input_image): - data_repo = DataRepo() - app_settings = data_repo.get_app_setting_from_uuid() - - if not input_image.startswith("http"): - input_image = open(input_image, "rb") - - ml_client = get_ml_client() - output = ml_client.predict_model_output( - REPLICATE_MODEL.real_esrgan_upscale, image=input_image, upscale=2 - ) - - filename = str(uuid.uuid4()) + ".png" - image_file = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output) - return output - # TODO: don't save or upload image where just passing the PIL object can work def resize_image(video_name, new_width, new_height, image_file: InternalFileObject) -> InternalFileObject: if 'http' in image_file.location: @@ -1603,155 +1296,6 @@ def resize_image(video_name, new_width, new_height, image_file: InternalFileObje return image_file -# TODO: fix the options input, only certain words can be input in this -def prompt_model_stylegan_nada(timing_uuid, input_image): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - input_file = input_image.location - if 'http' in input_image.location: - input_file = open(input_image.location, 'rb') - - ml_client = get_ml_client() - output = ml_client.predict_model_output(REPLICATE_MODEL.stylegan_nada, input=input_file, - output_style=timing.prompt) - filename = str(uuid.uuid4()) + ".png" - image_file = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0]) - output_file = resize_image(timing.project.name, 512, 512, image_file) - - return output_file - -def prompt_model_stable_diffusion_xl(timing_uuid): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - ml_client = get_ml_client() - output = ml_client.predict_model_output(REPLICATE_MODEL.sdxl, prompt=timing.prompt) - filename = str(uuid.uuid4()) + ".png" - image_file = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0]) - output_file = resize_image(timing.project.name, 512, 512, image_file) - - return output_file - -def prompt_model_stability(timing_uuid, input_image_file: InternalFileObject): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - project_settings: InternalSettingObject = data_repo.get_project_setting( - timing.project.uuid) - - index_of_current_item = timing.aux_frame_index - input_image = input_image_file.location - prompt = timing.prompt - strength = timing.strength - if not input_image.startswith("http"): - input_image = open(input_image, "rb") - - ml_client = get_ml_client() - output = ml_client.predict_model_output( - REPLICATE_MODEL.img2img_sd_2_1, - image=input_image, - prompt_strength=float(strength), - prompt=prompt, - negative_prompt=timing.negative_prompt, - width=project_settings.width, - height=project_settings.height, - guidance_scale=timing.guidance_scale, - seed=timing.seed, - num_inference_steps=timing.num_inteference_steps - ) - - filename = str(uuid.uuid4()) + ".png" - image_file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0], tag=InternalFileTag.GENERATED_VIDEO.value) - - return image_file - - -def prompt_model_dreambooth(timing_uuid, source_image_file: InternalFileObject): - data_repo = DataRepo() - - if not ('dreambooth_model_uuid' in st.session_state and st.session_state['dreambooth_model_uuid']): - st.error('No dreambooth model selected') - return - - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - timing.project.uuid) - - project_settings: InternalSettingObject = data_repo.get_project_setting( - timing.project.uuid) - - dreambooth_model: InternalAIModelObject = data_repo.get_ai_model_from_uuid(st.session_state['dreambooth_model_uuid']) - - model_name = dreambooth_model.name - image_number = timing.aux_frame_index - prompt = timing.prompt - strength = timing.strength - negative_prompt = timing.negative_prompt - guidance_scale = timing.guidance_scale - seed = timing.seed - num_inference_steps = timing.num_inteference_steps - - model_id = dreambooth_model.replicate_url - - ml_client = get_ml_client() - - source_image = source_image_file.location - if timing_details[image_number].adapter_type == "Yes": - if source_image.startswith("http"): - control_image = source_image - else: - control_image = open(source_image, "rb") - else: - control_image = None - - # version of models that were custom created has to be fetched - if not dreambooth_model.version: - version = ml_client.get_model_version_from_id(model_id) - data_repo.update_ai_model(uuid=dreambooth_model.uuid, version=version) - else: - version = dreambooth_model.version - - model_version = ml_client.get_model_by_name( - f"{REPLICATE_USER}/{model_name}", version) - - if source_image.startswith("http"): - input_image = source_image - else: - input_image = open(source_image, "rb") - - input_data = { - "image": input_image, - "prompt": prompt, - "prompt_strength": float(strength), - "height": int(project_settings.height), - "width": int(project_settings.width), - "disable_safety_check": True, - "negative_prompt": negative_prompt, - "guidance_scale": float(guidance_scale), - "seed": int(seed), - "num_inference_steps": int(num_inference_steps) - } - - if control_image != None: - input_data['control_image'] = control_image - - output = model_version.predict(**input_data) - - for i in output: - filename = str(uuid.uuid4()) + ".png" - image_file = data_repo.create_file( - name=filename, type=InternalFileType.IMAGE.value, hosted_url=i, tag=InternalFileTag.GENERATED_VIDEO.value) - return image_file - - return None - def get_audio_bytes_for_slice(timing_uuid): data_repo = DataRepo() @@ -1771,7 +1315,6 @@ def get_audio_bytes_for_slice(timing_uuid): audio_bytes.seek(0) return audio_bytes - # calculates and updates clip duration of all the timings def update_clip_duration_of_all_timing_frames(project_uuid): data_repo = DataRepo() @@ -1806,129 +1349,6 @@ def update_clip_duration_of_all_timing_frames(project_uuid): data_repo.update_specific_timing( timing_item.uuid, clip_duration=total_duration_of_frame) -def prompt_model_depth2img(strength, timing_uuid, source_image) -> InternalFileObject: - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - prompt = timing.prompt - num_inference_steps = timing.num_inteference_steps - guidance_scale = timing.guidance_scale - negative_prompt = timing.negative_prompt - if not source_image.startswith("http"): - source_image = open(source_image, "rb") - - ml_client = get_ml_client() - output = ml_client.predict_model_output(REPLICATE_MODEL.jagilley_controlnet_depth2img, input_image=source_image, - prompt_strength=float(strength), prompt=prompt, negative_prompt=negative_prompt, - num_inference_steps=num_inference_steps, guidance_scale=guidance_scale) - - filename = str(uuid.uuid4()) + ".png" - image_file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0]) - return image_file - - -def prompt_model_blip2(input_image, query): - if not input_image.startswith("http"): - input_image = open(input_image, "rb") - - ml_client = get_ml_client() - output = ml_client.predict_model_output( - REPLICATE_MODEL.salesforce_blip_2, image=input_image, question=query) - - return output - - -def facial_expression_recognition(input_image): - if not input_image.startswith("http"): - input_image = open(input_image, "rb") - - ml_client = get_ml_client() - output = ml_client.predict_model_output( - REPLICATE_MODEL.phamquiluan_face_recognition, input_path=input_image) - - emo_label = output[0]["emo_label"] - if emo_label == "disgust": - emo_label = "disgusted" - elif emo_label == "fear": - emo_label = "fearful" - elif emo_label == "surprised": - emo_label = "surprised" - emo_proba = output[0]["emo_proba"] - if emo_proba > 0.95: - emotion = (f"very {emo_label} expression") - elif emo_proba > 0.85: - emotion = (f"{emo_label} expression") - elif emo_proba > 0.75: - emotion = (f"somewhat {emo_label} expression") - elif emo_proba > 0.65: - emotion = (f"slightly {emo_label} expression") - elif emo_proba > 0.55: - emotion = (f"{emo_label} expression") - else: - emotion = (f"neutral expression") - return emotion - - -def prompt_model_pix2pix(timing_uuid, input_image_file: InternalFileObject): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - prompt = timing.prompt - guidance_scale = timing.guidance_scale - seed = timing.seed - input_image = input_image_file.location - if not input_image.startswith("http"): - input_image = open(input_image, "rb") - - ml_client = get_ml_client() - output = ml_client.predict_model_output(REPLICATE_MODEL.arielreplicate, input_image=input_image, instruction_text=prompt, - seed=seed, cfg_image=1.2, cfg_text=guidance_scale, resolution=704) - - filename = str(uuid.uuid4()) + ".png" - image_file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output) - return image_file - - -def restyle_images(timing_uuid, source_image) -> InternalFileObject: - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - model_name = timing.model.name - strength = timing.strength - - if model_name == "stable-diffusion-img2img-v2.1": - output_file = prompt_model_stability(timing_uuid, source_image) - elif model_name == "depth2img": - output_file = prompt_model_depth2img( - strength, timing_uuid, source_image) - elif model_name == "pix2pix": - output_file = prompt_model_pix2pix(timing_uuid, source_image) - elif model_name == "LoRA": - output_file = prompt_model_lora(timing_uuid, source_image) - elif model_name == "controlnet": - output_file = prompt_model_controlnet(timing_uuid, source_image) - elif model_name == "Dreambooth": - output_file = prompt_model_dreambooth(timing_uuid, source_image) - elif model_name == 'StyleGAN-NADA': - output_file = prompt_model_stylegan_nada(timing_uuid, source_image) - elif model_name == "stable_diffusion_xl": - output_file = prompt_model_stable_diffusion_xl(timing_uuid) - elif model_name == "real-esrgan-upscaling": - output_file = prompt_model_real_esrgan_upscaling(source_image) - elif model_name == 'controlnet_1_1_x_realistic_vision_v2_0': - output_file = prompt_model_controlnet_1_1_x_realistic_vision_v2_0( - source_image) - elif model_name == 'urpm-v1.3': - output_file = prompt_model_urpm_v1_3(source_image) - - return output_file - - def create_timings_row_at_frame_number(project_uuid, index_of_frame, frame_time=0.0): data_repo = DataRepo() @@ -1956,255 +1376,6 @@ def create_timings_row_at_frame_number(project_uuid, index_of_frame, frame_time= return timing -def create_depth_mask_image(input_image, layer, timing_uuid): - if not input_image.startswith("http"): - input_image = open(input_image, "rb") - - ml_client = get_ml_client() - output = ml_client.predict_model_output( - REPLICATE_MODEL.cjwbw_midas, image=input_image, model_type="dpt_beit_large_512") - try: - urllib.request.urlretrieve(output, "videos/temp/depth.png") - except Exception as e: - print(e) - - depth_map = Image.open("videos/temp/depth.png") - depth_map = depth_map.convert("L") # Convert to grayscale image - pixels = depth_map.load() - mask = Image.new("L", depth_map.size) - mask_pixels = mask.load() - - fg_mask = Image.new("L", depth_map.size) if "Foreground" in layer else None - mg_mask = Image.new( - "L", depth_map.size) if "Middleground" in layer else None - bg_mask = Image.new("L", depth_map.size) if "Background" in layer else None - - fg_pixels = fg_mask.load() if fg_mask else None - mg_pixels = mg_mask.load() if mg_mask else None - bg_pixels = bg_mask.load() if bg_mask else None - - for i in range(depth_map.size[0]): - for j in range(depth_map.size[1]): - depth_value = pixels[i, j] - - if fg_pixels: - fg_pixels[i, j] = 0 if depth_value > 200 else 255 - if mg_pixels: - mg_pixels[i, j] = 0 if depth_value <= 200 and depth_value > 50 else 255 - if bg_pixels: - bg_pixels[i, j] = 0 if depth_value <= 50 else 255 - - mask_pixels[i, j] = 255 - if fg_pixels: - mask_pixels[i, j] &= fg_pixels[i, j] - if mg_pixels: - mask_pixels[i, j] &= mg_pixels[i, j] - if bg_pixels: - mask_pixels[i, j] &= bg_pixels[i, j] - - return create_or_update_mask(timing_uuid, mask) - - -def prompt_model_controlnet(timing_uuid, input_image): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - if timing.adapter_type == "normal": - model = REPLICATE_MODEL.jagilley_controlnet_normal - elif timing.adapter_type == "canny": - model = REPLICATE_MODEL.jagilley_controlnet_canny - elif timing.adapter_type == "hed": - model = REPLICATE_MODEL.jagilley_controlnet_hed - elif timing.adapter_type == "scribble": - model = REPLICATE_MODEL.jagilley_controlnet_scribble - if timing.canny_image != "": - input_image = timing.canny_image - elif timing.adapter_type == "seg": - model = REPLICATE_MODEL.jagilley_controlnet_seg - elif timing.adapter_type == "hough": - model = REPLICATE_MODEL.jagilley_controlnet_hough - elif timing.adapter_type == "depth2img": - model = REPLICATE_MODEL.jagilley_controlnet_depth2img - elif timing.adapter_type == "pose": - model = REPLICATE_MODEL.jagilley_controlnet_pose - - if not input_image.startswith("http"): - input_image = open(input_image, "rb") - - inputs = { - 'image': input_image, - 'prompt': timing.prompt, - 'num_samples': "1", - 'image_resolution': "512", - 'ddim_steps': timing.num_inteference_steps, - 'scale': timing.guidance_scale, - 'eta': 0, - 'seed': timing.seed, - 'a_prompt': "best quality, extremely detailed", - 'n_prompt': timing.negative_prompt + ", longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality", - 'detect_resolution': 512, - 'bg_threshold': 0, - 'low_threshold': timing.low_threshold, - 'high_threshold': timing.high_threshold, - } - - ml_client = get_ml_client() - output = ml_client.predict_model_output(model, **inputs) - - return output[1] - - -def prompt_model_urpm_v1_3(timing_uuid, source_image): - data_repo = DataRepo() - timing = data_repo.get_timing_from_uuid(timing_uuid) - - if not source_image.startswith("http"): - source_image = open(source_image, "rb") - - inputs = { - 'image': source_image, - 'prompt': timing.prompt, - 'negative_prompt': timing.negative_prompt, - 'strength': timing.strength, - 'guidance_scale': timing.guidance_scale, - 'num_inference_steps': timing.num_inference_steps, - 'upscale': 1, - 'seed': timing.seed, - } - - ml_client = get_ml_client() - output = ml_client.predict_model_output(REPLICATE_MODEL.urpm, **inputs) - - return output[0] - - -def prompt_model_controlnet_1_1_x_realistic_vision_v2_0(timing_uuid, input_image): - data_repo = DataRepo() - timing = data_repo.get_timing_from_uuid(timing_uuid) - - if not input_image.startswith("http"): - input_image = open(input_image, "rb") - - inputs = { - 'image': input_image, - 'prompt': timing.prompt, - 'ddim_steps': timing.num_inference_steps, - 'strength': timing.strength, - 'scale': timing.guidance_scale, - 'seed': timing.seed, - } - - ml_client = get_ml_client() - output = ml_client.predict_model_output( - REPLICATE_MODEL.controlnet_1_1_x_realistic_vision_v2_0, **inputs) - - return output[1] - - -def prompt_model_lora(timing_uuid, source_image_file: InternalFileObject) -> InternalFileObject: - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - project_settings: InternalSettingObject = data_repo.get_project_setting( - timing.project.uuid) - - # lora_models = timing.custom_model_id_list - # default_model_url = DEFAULT_LORA_MODEL_URL - - # lora_model_urls = [] - - # for lora_model_uuid in lora_models: - # if lora_model_uuid != "": - # lora_model: InternalAIModelObject = data_repo.get_ai_model_from_uuid( - # lora_model_uuid) - # print(lora_model) - # if lora_model.replicate_url != "": - # lora_model_url = lora_model.replicate_url - # else: - # lora_model_url = default_model_url - # else: - # lora_model_url = default_model_url - - # lora_model_urls.append(lora_model_url) - - lora_urls = "" - lora_scales = "" - if "lora_model_1_url" in st.session_state and st.session_state["lora_model_1_url"]: - lora_urls += st.session_state["lora_model_1_url"] - lora_scales += "0.5" - if "lora_model_2_url" in st.session_state and st.session_state["lora_model_2_url"]: - ctn = "" if not len(lora_urls) else " | " - lora_urls += ctn + st.session_state["lora_model_2_url"] - lora_scales += ctn + "0.5" - if st.session_state["lora_model_3_url"]: - ctn = "" if not len(lora_urls) else " | " - lora_urls += ctn + st.session_state["lora_model_3_url"] - lora_scales += ctn + "0.5" - - source_image = source_image_file.location - if source_image[:4] == "http": - input_image = source_image - else: - input_image = open(source_image, "rb") - - if timing.adapter_type != "None": - if source_image[:4] == "http": - adapter_condition_image = source_image - else: - adapter_condition_image = open(source_image, "rb") - else: - adapter_condition_image = "" - - inputs = { - 'prompt': timing.prompt, - 'negative_prompt': timing.negative_prompt, - 'width': project_settings.width, - 'height': project_settings.height, - 'num_outputs': 1, - 'image': input_image, - 'num_inference_steps': timing.num_inteference_steps, - 'guidance_scale': timing.guidance_scale, - 'prompt_strength': timing.strength, - 'scheduler': "DPMSolverMultistep", - 'lora_urls': lora_urls, - 'lora_scales': lora_scales, - 'adapter_type': timing.adapter_type, - 'adapter_condition_image': adapter_condition_image, - } - - ml_client = get_ml_client() - max_attempts = 3 - attempts = 0 - while attempts < max_attempts: - try: - output = ml_client.predict_model_output( - REPLICATE_MODEL.clones_lora_training_2, **inputs) - print(output) - filename = str(uuid.uuid4()) + ".png" - file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0]) - return file - except replicate.exceptions.ModelError as e: - if "NSFW content detected" in str(e): - print("NSFW content detected. Attempting to rerun code...") - attempts += 1 - continue - else: - raise e - except Exception as e: - raise e - - # filename = "default_3x_failed-656a7e5f-eca9-4f92-a06b-e1c6ff4a5f5e.png" # const filename - # file = data_repo.get_file_from_name(filename) - # if file: - # return file - # else: - # file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - # hosted_url="https://i.ibb.co/ZG0hxzj/Failed-3x-In-A-Row.png") - # return file - - def save_audio_file(uploaded_file, project_uuid): data_repo = DataRepo() diff --git a/ui_components/methods/file_methods.py b/ui_components/methods/file_methods.py new file mode 100644 index 00000000..52068765 --- /dev/null +++ b/ui_components/methods/file_methods.py @@ -0,0 +1,157 @@ +from io import BytesIO +import io +import json +import os +import tempfile +from typing import Union +from urllib.parse import urlparse +from PIL import Image +import numpy as np +import uuid +import requests +import streamlit as st +from shared.constants import SERVER, InternalFileType, ServerType +from utils.data_repo.data_repo import DataRepo + +# depending on the environment it will either save or host the PIL image object +def save_or_host_file(file, path, mime_type='image/png'): + data_repo = DataRepo() + # TODO: fix session state management, remove direct access out side the main code + project_setting = data_repo.get_project_setting(st.session_state['project_uuid']) + if project_setting: + file = zoom_and_crop(file, project_setting.width, project_setting.height) + else: + # new project + file = zoom_and_crop(file, 512, 512) + + uploaded_url = None + if SERVER != ServerType.DEVELOPMENT.value: + image_bytes = BytesIO() + file.save(image_bytes, format=mime_type.split('/')[1]) + image_bytes.seek(0) + + uploaded_url = data_repo.upload_file(image_bytes, '.png') + else: + os.makedirs(os.path.dirname(path), exist_ok=True) + file.save(path) + + return uploaded_url + +def zoom_and_crop(file, width, height): + # scaling + s_x = width / file.width + s_y = height / file.height + scale = max(s_x, s_y) + new_width = int(file.width * scale) + new_height = int(file.height * scale) + file = file.resize((new_width, new_height)) + + # cropping + left = (file.width - width) // 2 + top = (file.height - height) // 2 + right = (file.width + width) // 2 + bottom = (file.height + height) // 2 + file = file.crop((left, top, right, bottom)) + + return file + +def save_or_host_file_bytes(video_bytes, path, ext=".mp4"): + uploaded_url = None + if SERVER != ServerType.DEVELOPMENT.value: + data_repo = DataRepo() + uploaded_url = data_repo.upload_file(video_bytes, ext) + else: + os.makedirs(os.path.dirname(path), exist_ok=True) + with open(path, 'wb') as f: + f.write(video_bytes) + + return uploaded_url + +def add_temp_file_to_project(project_uuid, key, hosted_url): + data_repo = DataRepo() + + file_data = { + "name": str(uuid.uuid4()) + ".png", + "type": InternalFileType.IMAGE.value, + "project_id": project_uuid, + 'hosted_url': hosted_url + } + + temp_file = data_repo.create_file(**file_data) + project = data_repo.get_project_from_uuid(project_uuid) + temp_file_list = project.project_temp_file_list + temp_file_list.update({key: temp_file.uuid}) + temp_file_list = json.dumps(temp_file_list) + project_data = { + 'uuid': project_uuid, + 'temp_file_list': temp_file_list + } + data_repo.update_project(**project_data) + + +def generate_temp_file(url, ext=".mp4"): + response = requests.get(url) + if not response.ok: + raise ValueError(f"Could not download video from URL: {url}") + + temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=ext, mode='wb') + temp_file.write(response.content) + temp_file.close() + + return temp_file + + +def generate_pil_image(img: Union[Image.Image, str, np.ndarray, io.BytesIO]): + # Check if img is a PIL image + if isinstance(img, Image.Image): + pass + + # Check if img is a URL + elif isinstance(img, str) and bool(urlparse(img).netloc): + response = requests.get(img) + img = Image.open(BytesIO(response.content)) + + # Check if img is a local file + elif isinstance(img, str): + img = Image.open(img) + + # Check if img is a numpy ndarray + elif isinstance(img, np.ndarray): + img = Image.fromarray(img) + + # Check if img is a BytesIO stream + elif isinstance(img, io.BytesIO): + img = Image.open(img) + + else: + raise ValueError( + "Invalid image input. Must be a PIL image, a URL string, a local file path string or a numpy ndarray.") + + return img + +def generate_temp_file_from_uploaded_file(uploaded_file): + if uploaded_file is not None: + with tempfile.NamedTemporaryFile(delete=False) as temp_file: + temp_file.write(uploaded_file.read()) + return temp_file + + +def convert_bytes_to_file(file_location_to_save, mime_type, file_bytes, project_uuid, filename=None, tag=""): + data_repo = DataRepo() + + hosted_url = save_or_host_file_bytes(file_bytes, file_location_to_save, mime_type=mime_type) + file_data = { + "name": str(uuid.uuid4()) + "." + mime_type.split("/")[1] if not filename else filename, + "type": InternalFileType.IMAGE.value, + "project_id": project_uuid, + "tag": tag + } + + if hosted_url: + file_data.update({'hosted_url': hosted_url}) + else: + file_data.update({'local_path': file_location_to_save}) + + file = data_repo.create_file(**file_data) + + return file \ No newline at end of file diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py new file mode 100644 index 00000000..56f191a6 --- /dev/null +++ b/ui_components/methods/ml_methods.py @@ -0,0 +1,708 @@ + +import streamlit as st +import replicate +from typing import List +from PIL import Image +import uuid +import urllib +from backend.models import InternalFileObject +from shared.constants import REPLICATE_USER, SERVER, InternalFileTag, InternalFileType, ServerType +from ui_components.constants import MASK_IMG_LOCAL_PATH, TEMP_MASK_FILE +from ui_components.methods.common_methods import add_image_variant, create_or_update_mask, promote_image_variant, resize_image +from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject +from utils.constants import ImageStage +from utils.data_repo.data_repo import DataRepo +from utils.ml_processor.ml_interface import get_ml_client +from utils.ml_processor.replicate.constants import REPLICATE_MODEL + + +def trigger_restyling_process( + timing_uuid, + model_uuid, + prompt, + strength, + negative_prompt, + guidance_scale, + seed, + num_inference_steps, + transformation_stage, + promote_new_generation, + custom_models, + adapter_type, + update_inference_settings, + low_threshold, + high_threshold +): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + custom_pipeline = "" + + # TODO: add proper form validations throughout the code + if not prompt: + st.error("Please enter a prompt") + return + + if update_inference_settings is True: + prompt = prompt.replace(",", ".") + prompt = prompt.replace("\n", "") + data_repo.update_project_setting( + timing.project.uuid, + default_prompt=prompt, + default_strength=strength, + default_model_id=model_uuid, + default_custom_pipeline=custom_pipeline, + default_negative_prompt=negative_prompt, + default_guidance_scale=guidance_scale, + default_seed=seed, + default_num_inference_steps=num_inference_steps, + default_which_stage_to_run_on=transformation_stage, + default_custom_models=custom_models, + default_adapter_type=adapter_type + ) + + if low_threshold != "": + data_repo.update_project_setting( + timing.project.uuid, default_low_threshold=low_threshold) + if high_threshold != "": + data_repo.update_project_setting( + timing.project.uuid, default_high_threshold=high_threshold) + + if timing.source_image == "": + source_image = "" + else: + source_image = timing.source_image + + data_repo.update_specific_timing( + uuid=timing_uuid, + model_id=model_uuid, + source_image_id=timing.source_image.uuid, + prompt=prompt, + strength=strength, + custom_pipeline=custom_pipeline, + negative_prompt=negative_prompt, + guidance_scale=guidance_scale, + seed=seed, + num_inference_steps=num_inference_steps, + custom_models=custom_models, + adapter_type=adapter_type, + low_threshold=low_threshold, + high_threshold=high_threshold + ) + dynamic_prompting(prompt, source_image, timing_uuid) + + timing = data_repo.get_timing_from_uuid(timing_uuid) + if transformation_stage == ImageStage.SOURCE_IMAGE.value: + source_image = timing.source_image + else: + variants: List[InternalFileObject] = timing.alternative_images_list + number_of_variants = len(variants) + primary_image = timing.primary_image + source_image = primary_image.location + + output_file = restyle_images(timing_uuid, source_image) + + if output_file != None: + add_image_variant(output_file.uuid, timing_uuid) + + if promote_new_generation == True: + timing = data_repo.get_timing_from_uuid(timing_uuid) + variants = timing.alternative_images_list + number_of_variants = len(variants) + if number_of_variants == 1: + print("No new generation to promote") + else: + promote_image_variant(timing_uuid, number_of_variants - 1) + else: + print("No new generation to promote") + + +def restyle_images(timing_uuid, source_image) -> InternalFileObject: + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + + model_name = timing.model.name + strength = timing.strength + + if model_name == "stable-diffusion-img2img-v2.1": + output_file = prompt_model_stability(timing_uuid, source_image) + elif model_name == "depth2img": + output_file = prompt_model_depth2img( + strength, timing_uuid, source_image) + elif model_name == "pix2pix": + output_file = prompt_model_pix2pix(timing_uuid, source_image) + elif model_name == "LoRA": + output_file = prompt_model_lora(timing_uuid, source_image) + elif model_name == "controlnet": + output_file = prompt_model_controlnet(timing_uuid, source_image) + elif model_name == "Dreambooth": + output_file = prompt_model_dreambooth(timing_uuid, source_image) + elif model_name == 'StyleGAN-NADA': + output_file = prompt_model_stylegan_nada(timing_uuid, source_image) + elif model_name == "stable_diffusion_xl": + output_file = prompt_model_stable_diffusion_xl(timing_uuid) + elif model_name == "real-esrgan-upscaling": + output_file = prompt_model_real_esrgan_upscaling(source_image) + elif model_name == 'controlnet_1_1_x_realistic_vision_v2_0': + output_file = prompt_model_controlnet_1_1_x_realistic_vision_v2_0( + source_image) + elif model_name == 'urpm-v1.3': + output_file = prompt_model_urpm_v1_3(source_image) + + return output_file + + + +def prompt_clip_interrogator(input_image, which_model, best_or_fast): + if which_model == "Stable Diffusion 1.5": + which_model = "ViT-L-14/openai" + elif which_model == "Stable Diffusion 2": + which_model = "ViT-H-14/laion2b_s32b_b79k" + + if not input_image.startswith("http"): + input_image = open(input_image, "rb") + + ml_client = get_ml_client() + output = ml_client.predict_model_output( + REPLICATE_MODEL.clip_interrogator, image=input_image, clip_model_name=which_model, mode=best_or_fast) + + return output + +def prompt_model_real_esrgan_upscaling(input_image): + data_repo = DataRepo() + + if not input_image.startswith("http"): + input_image = open(input_image, "rb") + + ml_client = get_ml_client() + output = ml_client.predict_model_output( + REPLICATE_MODEL.real_esrgan_upscale, image=input_image, upscale=2 + ) + + filename = str(uuid.uuid4()) + ".png" + output_file = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, + hosted_url=output) + return output_file + +# TODO: fix the options input, only certain words can be input in this +def prompt_model_stylegan_nada(timing_uuid, input_image): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + + input_file = input_image.location + if 'http' in input_image.location: + input_file = open(input_image.location, 'rb') + + ml_client = get_ml_client() + output = ml_client.predict_model_output(REPLICATE_MODEL.stylegan_nada, input=input_file, + output_style=timing.prompt) + filename = str(uuid.uuid4()) + ".png" + image_file = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, + hosted_url=output[0]) + output_file = resize_image(timing.project.name, 512, 512, image_file) + + return output_file + +def prompt_model_stable_diffusion_xl(timing_uuid): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + + ml_client = get_ml_client() + output = ml_client.predict_model_output(REPLICATE_MODEL.sdxl, prompt=timing.prompt) + filename = str(uuid.uuid4()) + ".png" + image_file = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, + hosted_url=output[0]) + output_file = resize_image(timing.project.name, 512, 512, image_file) + + return output_file + +def prompt_model_stability(timing_uuid, input_image_file: InternalFileObject): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + project_settings: InternalSettingObject = data_repo.get_project_setting( + timing.project.uuid) + + index_of_current_item = timing.aux_frame_index + input_image = input_image_file.location + prompt = timing.prompt + strength = timing.strength + if not input_image.startswith("http"): + input_image = open(input_image, "rb") + + ml_client = get_ml_client() + output = ml_client.predict_model_output( + REPLICATE_MODEL.img2img_sd_2_1, + image=input_image, + prompt_strength=float(strength), + prompt=prompt, + negative_prompt=timing.negative_prompt, + width=project_settings.width, + height=project_settings.height, + guidance_scale=timing.guidance_scale, + seed=timing.seed, + num_inference_steps=timing.num_inteference_steps + ) + + filename = str(uuid.uuid4()) + ".png" + image_file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, + hosted_url=output[0], tag=InternalFileTag.GENERATED_VIDEO.value) + + return image_file + + +def prompt_model_dreambooth(timing_uuid, source_image_file: InternalFileObject): + data_repo = DataRepo() + + if not ('dreambooth_model_uuid' in st.session_state and st.session_state['dreambooth_model_uuid']): + st.error('No dreambooth model selected') + return + + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( + timing.project.uuid) + + project_settings: InternalSettingObject = data_repo.get_project_setting( + timing.project.uuid) + + dreambooth_model: InternalAIModelObject = data_repo.get_ai_model_from_uuid(st.session_state['dreambooth_model_uuid']) + + model_name = dreambooth_model.name + image_number = timing.aux_frame_index + prompt = timing.prompt + strength = timing.strength + negative_prompt = timing.negative_prompt + guidance_scale = timing.guidance_scale + seed = timing.seed + num_inference_steps = timing.num_inteference_steps + + model_id = dreambooth_model.replicate_url + + ml_client = get_ml_client() + + source_image = source_image_file.location + if timing_details[image_number].adapter_type == "Yes": + if source_image.startswith("http"): + control_image = source_image + else: + control_image = open(source_image, "rb") + else: + control_image = None + + # version of models that were custom created has to be fetched + if not dreambooth_model.version: + version = ml_client.get_model_version_from_id(model_id) + data_repo.update_ai_model(uuid=dreambooth_model.uuid, version=version) + else: + version = dreambooth_model.version + + model_version = ml_client.get_model_by_name( + f"{REPLICATE_USER}/{model_name}", version) + + if source_image.startswith("http"): + input_image = source_image + else: + input_image = open(source_image, "rb") + + input_data = { + "image": input_image, + "prompt": prompt, + "prompt_strength": float(strength), + "height": int(project_settings.height), + "width": int(project_settings.width), + "disable_safety_check": True, + "negative_prompt": negative_prompt, + "guidance_scale": float(guidance_scale), + "seed": int(seed), + "num_inference_steps": int(num_inference_steps) + } + + if control_image != None: + input_data['control_image'] = control_image + + output = model_version.predict(**input_data) + + for i in output: + filename = str(uuid.uuid4()) + ".png" + image_file = data_repo.create_file( + name=filename, type=InternalFileType.IMAGE.value, hosted_url=i, tag=InternalFileTag.GENERATED_VIDEO.value) + return image_file + + return None + + +def prompt_model_depth2img(strength, timing_uuid, source_image) -> InternalFileObject: + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + + prompt = timing.prompt + num_inference_steps = timing.num_inteference_steps + guidance_scale = timing.guidance_scale + negative_prompt = timing.negative_prompt + if not source_image.startswith("http"): + source_image = open(source_image, "rb") + + ml_client = get_ml_client() + output = ml_client.predict_model_output(REPLICATE_MODEL.jagilley_controlnet_depth2img, input_image=source_image, + prompt_strength=float(strength), prompt=prompt, negative_prompt=negative_prompt, + num_inference_steps=num_inference_steps, guidance_scale=guidance_scale) + + filename = str(uuid.uuid4()) + ".png" + image_file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, + hosted_url=output[0]) + return image_file + + +def prompt_model_blip2(input_image, query): + if not input_image.startswith("http"): + input_image = open(input_image, "rb") + + ml_client = get_ml_client() + output = ml_client.predict_model_output( + REPLICATE_MODEL.salesforce_blip_2, image=input_image, question=query) + + return output + + +def prompt_model_pix2pix(timing_uuid, input_image_file: InternalFileObject): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + + prompt = timing.prompt + guidance_scale = timing.guidance_scale + seed = timing.seed + input_image = input_image_file.location + if not input_image.startswith("http"): + input_image = open(input_image, "rb") + + ml_client = get_ml_client() + output = ml_client.predict_model_output(REPLICATE_MODEL.arielreplicate, input_image=input_image, instruction_text=prompt, + seed=seed, cfg_image=1.2, cfg_text=guidance_scale, resolution=704) + + filename = str(uuid.uuid4()) + ".png" + image_file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, + hosted_url=output) + return image_file + + +def facial_expression_recognition(input_image): + if not input_image.startswith("http"): + input_image = open(input_image, "rb") + + ml_client = get_ml_client() + output = ml_client.predict_model_output( + REPLICATE_MODEL.phamquiluan_face_recognition, input_path=input_image) + + emo_label = output[0]["emo_label"] + if emo_label == "disgust": + emo_label = "disgusted" + elif emo_label == "fear": + emo_label = "fearful" + elif emo_label == "surprised": + emo_label = "surprised" + emo_proba = output[0]["emo_proba"] + if emo_proba > 0.95: + emotion = (f"very {emo_label} expression") + elif emo_proba > 0.85: + emotion = (f"{emo_label} expression") + elif emo_proba > 0.75: + emotion = (f"somewhat {emo_label} expression") + elif emo_proba > 0.65: + emotion = (f"slightly {emo_label} expression") + elif emo_proba > 0.55: + emotion = (f"{emo_label} expression") + else: + emotion = (f"neutral expression") + return emotion + + +def prompt_model_controlnet(timing_uuid, input_image): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + + if timing.adapter_type == "normal": + model = REPLICATE_MODEL.jagilley_controlnet_normal + elif timing.adapter_type == "canny": + model = REPLICATE_MODEL.jagilley_controlnet_canny + elif timing.adapter_type == "hed": + model = REPLICATE_MODEL.jagilley_controlnet_hed + elif timing.adapter_type == "scribble": + model = REPLICATE_MODEL.jagilley_controlnet_scribble + if timing.canny_image != "": + input_image = timing.canny_image + elif timing.adapter_type == "seg": + model = REPLICATE_MODEL.jagilley_controlnet_seg + elif timing.adapter_type == "hough": + model = REPLICATE_MODEL.jagilley_controlnet_hough + elif timing.adapter_type == "depth2img": + model = REPLICATE_MODEL.jagilley_controlnet_depth2img + elif timing.adapter_type == "pose": + model = REPLICATE_MODEL.jagilley_controlnet_pose + + if not input_image.startswith("http"): + input_image = open(input_image, "rb") + + inputs = { + 'image': input_image, + 'prompt': timing.prompt, + 'num_samples': "1", + 'image_resolution': "512", + 'ddim_steps': timing.num_inteference_steps, + 'scale': timing.guidance_scale, + 'eta': 0, + 'seed': timing.seed, + 'a_prompt': "best quality, extremely detailed", + 'n_prompt': timing.negative_prompt + ", longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality", + 'detect_resolution': 512, + 'bg_threshold': 0, + 'low_threshold': timing.low_threshold, + 'high_threshold': timing.high_threshold, + } + + ml_client = get_ml_client() + output = ml_client.predict_model_output(model, **inputs) + + return output[1] + + +def prompt_model_urpm_v1_3(timing_uuid, source_image): + data_repo = DataRepo() + timing = data_repo.get_timing_from_uuid(timing_uuid) + + if not source_image.startswith("http"): + source_image = open(source_image, "rb") + + inputs = { + 'image': source_image, + 'prompt': timing.prompt, + 'negative_prompt': timing.negative_prompt, + 'strength': timing.strength, + 'guidance_scale': timing.guidance_scale, + 'num_inference_steps': timing.num_inference_steps, + 'upscale': 1, + 'seed': timing.seed, + } + + ml_client = get_ml_client() + output = ml_client.predict_model_output(REPLICATE_MODEL.urpm, **inputs) + + return output[0] + + +def prompt_model_controlnet_1_1_x_realistic_vision_v2_0(timing_uuid, input_image): + data_repo = DataRepo() + timing = data_repo.get_timing_from_uuid(timing_uuid) + + if not input_image.startswith("http"): + input_image = open(input_image, "rb") + + inputs = { + 'image': input_image, + 'prompt': timing.prompt, + 'ddim_steps': timing.num_inference_steps, + 'strength': timing.strength, + 'scale': timing.guidance_scale, + 'seed': timing.seed, + } + + ml_client = get_ml_client() + output = ml_client.predict_model_output( + REPLICATE_MODEL.controlnet_1_1_x_realistic_vision_v2_0, **inputs) + + return output[1] + + +def prompt_model_lora(timing_uuid, source_image_file: InternalFileObject) -> InternalFileObject: + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + project_settings: InternalSettingObject = data_repo.get_project_setting( + timing.project.uuid) + + lora_urls = "" + lora_scales = "" + if "lora_model_1_url" in st.session_state and st.session_state["lora_model_1_url"]: + lora_urls += st.session_state["lora_model_1_url"] + lora_scales += "0.5" + if "lora_model_2_url" in st.session_state and st.session_state["lora_model_2_url"]: + ctn = "" if not len(lora_urls) else " | " + lora_urls += ctn + st.session_state["lora_model_2_url"] + lora_scales += ctn + "0.5" + if st.session_state["lora_model_3_url"]: + ctn = "" if not len(lora_urls) else " | " + lora_urls += ctn + st.session_state["lora_model_3_url"] + lora_scales += ctn + "0.5" + + source_image = source_image_file.location + if source_image[:4] == "http": + input_image = source_image + else: + input_image = open(source_image, "rb") + + if timing.adapter_type != "None": + if source_image[:4] == "http": + adapter_condition_image = source_image + else: + adapter_condition_image = open(source_image, "rb") + else: + adapter_condition_image = "" + + inputs = { + 'prompt': timing.prompt, + 'negative_prompt': timing.negative_prompt, + 'width': project_settings.width, + 'height': project_settings.height, + 'num_outputs': 1, + 'image': input_image, + 'num_inference_steps': timing.num_inteference_steps, + 'guidance_scale': timing.guidance_scale, + 'prompt_strength': timing.strength, + 'scheduler': "DPMSolverMultistep", + 'lora_urls': lora_urls, + 'lora_scales': lora_scales, + 'adapter_type': timing.adapter_type, + 'adapter_condition_image': adapter_condition_image, + } + + ml_client = get_ml_client() + max_attempts = 3 + attempts = 0 + while attempts < max_attempts: + try: + output = ml_client.predict_model_output( + REPLICATE_MODEL.clones_lora_training_2, **inputs) + print(output) + filename = str(uuid.uuid4()) + ".png" + file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, + hosted_url=output[0]) + return file + except replicate.exceptions.ModelError as e: + if "NSFW content detected" in str(e): + print("NSFW content detected. Attempting to rerun code...") + attempts += 1 + continue + else: + raise e + except Exception as e: + raise e + +def inpainting(input_image: str, prompt, negative_prompt, timing_uuid, invert_mask, pass_mask=False) -> InternalFileObject: + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + + if pass_mask == False: + mask = timing.mask.location + else: + # TODO: store the local temp files in the db too + if SERVER != ServerType.DEVELOPMENT.value: + mask = timing.project.get_temp_mask_file(TEMP_MASK_FILE).location + else: + mask = MASK_IMG_LOCAL_PATH + + if not mask.startswith("http"): + mask = open(mask, "rb") + + if not input_image.startswith("http"): + input_image = open(input_image, "rb") + + ml_client = get_ml_client() + output = ml_client.predict_model_output(REPLICATE_MODEL.andreas_sd_inpainting, mask=mask, image=input_image, prompt=prompt, + invert_mask=invert_mask, negative_prompt=negative_prompt, num_inference_steps=25) + + file_name = str(uuid.uuid4()) + ".png" + image_file = data_repo.create_file( + name=file_name, type=InternalFileType.IMAGE.value, hosted_url=output[0]) + + return image_file + +def remove_background(input_image): + if not input_image.startswith("http"): + input_image = open(input_image, "rb") + + ml_client = get_ml_client() + output = ml_client.predict_model_output( + REPLICATE_MODEL.pollination_modnet, image=input_image) + return output + + +def create_depth_mask_image(input_image, layer, timing_uuid): + if not input_image.startswith("http"): + input_image = open(input_image, "rb") + + ml_client = get_ml_client() + output = ml_client.predict_model_output( + REPLICATE_MODEL.cjwbw_midas, image=input_image, model_type="dpt_beit_large_512") + try: + urllib.request.urlretrieve(output, "videos/temp/depth.png") + except Exception as e: + print(e) + + depth_map = Image.open("videos/temp/depth.png") + depth_map = depth_map.convert("L") # Convert to grayscale image + pixels = depth_map.load() + mask = Image.new("L", depth_map.size) + mask_pixels = mask.load() + + fg_mask = Image.new("L", depth_map.size) if "Foreground" in layer else None + mg_mask = Image.new( + "L", depth_map.size) if "Middleground" in layer else None + bg_mask = Image.new("L", depth_map.size) if "Background" in layer else None + + fg_pixels = fg_mask.load() if fg_mask else None + mg_pixels = mg_mask.load() if mg_mask else None + bg_pixels = bg_mask.load() if bg_mask else None + + for i in range(depth_map.size[0]): + for j in range(depth_map.size[1]): + depth_value = pixels[i, j] + + if fg_pixels: + fg_pixels[i, j] = 0 if depth_value > 200 else 255 + if mg_pixels: + mg_pixels[i, j] = 0 if depth_value <= 200 and depth_value > 50 else 255 + if bg_pixels: + bg_pixels[i, j] = 0 if depth_value <= 50 else 255 + + mask_pixels[i, j] = 255 + if fg_pixels: + mask_pixels[i, j] &= fg_pixels[i, j] + if mg_pixels: + mask_pixels[i, j] &= mg_pixels[i, j] + if bg_pixels: + mask_pixels[i, j] &= bg_pixels[i, j] + + return create_or_update_mask(timing_uuid, mask) + +def dynamic_prompting(prompt, source_image, timing_uuid): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + + if "[expression]" in prompt: + prompt_expression = facial_expression_recognition(source_image) + prompt = prompt.replace("[expression]", prompt_expression) + + if "[location]" in prompt: + prompt_location = prompt_model_blip2( + source_image, "What's surrounding the character?") + prompt = prompt.replace("[location]", prompt_location) + + if "[mouth]" in prompt: + prompt_mouth = prompt_model_blip2( + source_image, "is their mouth open or closed?") + prompt = prompt.replace("[mouth]", "mouth is " + str(prompt_mouth)) + + if "[looking]" in prompt: + prompt_looking = prompt_model_blip2( + source_image, "the person is looking") + prompt = prompt.replace("[looking]", "looking " + str(prompt_looking)) + + data_repo.update_specific_timing(timing_uuid, prompt=prompt) \ No newline at end of file diff --git a/ui_components/methods/training_methods.py b/ui_components/methods/training_methods.py new file mode 100644 index 00000000..3e5f210d --- /dev/null +++ b/ui_components/methods/training_methods.py @@ -0,0 +1,84 @@ +import json +from shared.constants import AIModelCategory +from ui_components.methods.common_methods import convert_image_list_to_file_list +from utils.common_utils import get_current_user_uuid +from utils.data_repo.data_repo import DataRepo +from utils.ml_processor.ml_interface import get_ml_client +from utils.ml_processor.replicate.constants import REPLICATE_MODEL + +# NOTE: making an exception for this function, passing just the image urls instead of +# image files +def train_model(images_list, instance_prompt, class_prompt, max_train_steps, + model_name, type_of_model, type_of_task, resolution, controller_type, model_type_list): + # prepare and upload the training data (images.zip) + ml_client = get_ml_client() + try: + training_file_url = ml_client.upload_training_data(images_list) + except Exception as e: + raise e + + # training the model + model_name = model_name.replace(" ", "-").lower() + if type_of_model == "Dreambooth": + return train_dreambooth_model(instance_prompt, class_prompt, training_file_url, + max_train_steps, model_name, images_list, controller_type, model_type_list) + elif type_of_model == "LoRA": + return train_lora_model(training_file_url, type_of_task, resolution, model_name, images_list, model_type_list) + + +# INFO: images_list passed here are converted to internal files after they are used for training +def train_dreambooth_model(instance_prompt, class_prompt, training_file_url, max_train_steps, model_name, images_list: List[str], controller_type, model_type_list): + ml_client = get_ml_client() + response = ml_client.dreambooth_training( + training_file_url, instance_prompt, class_prompt, max_train_steps, model_name, controller_type, len(images_list)) + training_status = response["status"] + + model_id = response["id"] + if training_status == "queued": + file_list = convert_image_list_to_file_list(images_list) + file_uuid_list = [file.uuid for file in file_list] + file_uuid_list = json.dumps(file_uuid_list) + + model_data = { + "name": model_name, + "user_id": get_current_user_uuid(), + "replicate_model_id": model_id, + "replicate_url": response["model"], + "diffusers_url": "", + "category": AIModelCategory.DREAMBOOTH.value, + "training_image_list": file_uuid_list, + "keyword": instance_prompt, + "custom_trained": True, + "model_type": model_type_list + } + + data_repo = DataRepo() + data_repo.create_ai_model(**model_data) + + return "Success - Training Started. Please wait 10-15 minutes for the model to be trained." + else: + return "Failed" + +# INFO: images_list passed here are converted to internal files after they are used for training +def train_lora_model(training_file_url, type_of_task, resolution, model_name, images_list, model_type_list): + data_repo = DataRepo() + ml_client = get_ml_client() + output = ml_client.predict_model_output(REPLICATE_MODEL.clones_lora_training, instance_data=training_file_url, + task=type_of_task, resolution=int(resolution)) + + file_list = convert_image_list_to_file_list(images_list) + file_uuid_list = [file.uuid for file in file_list] + file_uuid_list = json.dumps(file_uuid_list) + model_data = { + "name": model_name, + "user_id": get_current_user_uuid(), + "replicate_url": output, + "diffusers_url": "", + "category": AIModelCategory.LORA.value, + "training_image_list": file_uuid_list, + "custom_trained": True, + "model_type": model_type_list + } + + data_repo.create_ai_model(**model_data) + return f"Successfully trained - the model '{model_name}' is now available for use!" diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index aed50677..11a41506 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -1,16 +1,24 @@ import os +import random +import string import tempfile from typing import List +import ffmpeg +import streamlit as st import uuid -from moviepy.editor import concatenate_videoclips, TextClip, VideoFileClip, vfx +from moviepy.editor import concatenate_videoclips, TextClip, VideoFileClip, vfx, AudioFileClip from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip from backend.models import InternalFileObject -from ui_components.methods.common_methods import add_audio_to_video_slice, calculate_desired_duration_of_individual_clip, get_audio_bytes_for_slice, update_speed_of_video_clip +from shared.constants import InternalFileTag +from shared.file_upload.s3 import is_s3_image_url +from ui_components.constants import VideoQuality +from ui_components.methods.common_methods import get_audio_bytes_for_slice, update_clip_duration_of_all_timing_frames +from ui_components.methods.file_methods import convert_bytes_to_file, generate_temp_file, save_or_host_file_bytes from ui_components.models import InternalFrameTimingObject, InternalSettingObject -from utils.common_utils import convert_bytes_to_file, generate_temp_file, save_or_host_file_bytes from utils.data_repo.data_repo import DataRepo from utils.media_processor.interpolator import VideoInterpolator +from utils.media_processor.video import VideoProcessor # returns the timed_clip, which is the interpolated video with correct length @@ -177,7 +185,7 @@ def update_speed_of_video_clip(video_file: InternalFileObject, timing_uuid) -> I timing_uuid) desired_duration = timing.clip_duration - animation_style = st.session_state[f"{timing_uuid}_animation_style"] + animation_style = timing.animation_style temp_video_file = None if video_file.hosted_url and is_s3_image_url(video_file.hosted_url): @@ -193,15 +201,13 @@ def update_speed_of_video_clip(video_file: InternalFileObject, timing_uuid) -> I animation_style, desired_duration ) - - hosted_url = save_or_host_file_bytes(video_bytes, new_file_location) - if hosted_url: - video_file: InternalFileObject = data_repo.create_file( - name=new_file_name, type=InternalFileType.VIDEO.value, hosted_url=hosted_url) - else: - video_file: InternalFileObject = data_repo.create_file( - name=new_file_name, type=InternalFileType.VIDEO.value, local_path=new_file_location) + video_file = convert_bytes_to_file( + new_file_location, + "video/mp4", + video_bytes, + timing.project.uuid + ) if temp_video_file: os.remove(temp_video_file.name) @@ -279,7 +285,7 @@ def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileT if quality == VideoQuality.HIGH.value: data_repo.update_specific_timing( current_timing.uuid, timed_clip_id=None) - interpolation_steps = calculate_dynamic_interpolations_steps( + interpolation_steps = VideoInterpolator.calculate_dynamic_interpolations_steps( timing_details[index_of_current_item].clip_duration) if not timing.interpolation_steps or timing.interpolation_steps < interpolation_steps: data_repo.update_specific_timing( @@ -290,9 +296,22 @@ def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileT current_timing.uuid, interpolation_steps=3) if not timing.interpolated_clip: - video_location = create_interpolated_clip(current_timing.uuid) + next_timing = data_repo.get_next_timing(current_timing.uuid) + video_bytes = VideoInterpolator.create_interpolated_clip( + img_location_list=[current_timing.source_image.location, next_timing.source_image.location], + interpolation_steps=current_timing.interpolation_steps + ) + + file_location = "videos/" + current_timing.project.name + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" + video_file = convert_bytes_to_file( + file_location, + "video/mp4", + video_bytes, + current_timing.project.uuid + ) + data_repo.update_specific_timing( - current_timing.uuid, interpolated_clip_id=video_location.uuid) + current_timing.uuid, interpolated_clip_id=video_file.uuid) project_settings: InternalSettingObject = data_repo.get_project_setting( timing.project.uuid) @@ -384,22 +403,15 @@ def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileT video_bytes = None with open(temp_video_file.name, "rb") as f: video_bytes = f.read() - - hosted_url = save_or_host_file_bytes(video_bytes, output_video_file) - - file_data = { - "name": final_video_name, - "type": InternalFileType.VIDEO.value, - "tag": file_tag, - "project_id": project_uuid - } - - if hosted_url: - file_data.update({"hosted_url": hosted_url}) - else: - file_data.update({"local_path": output_video_file}) - data_repo.create_file(**file_data) + _ = convert_bytes_to_file( + output_video_file, + "video/mp4", + video_bytes, + project_uuid, + filename=final_video_name, + tag=file_tag + ) for file in temp_file_list: os.remove(file.name) diff --git a/utils/common_utils.py b/utils/common_utils.py index 790361f9..1c52c52d 100644 --- a/utils/common_utils.py +++ b/utils/common_utils.py @@ -1,22 +1,10 @@ -from io import BytesIO -import io from pathlib import Path import os import csv -import tempfile -from typing import Union -from urllib.parse import urlparse -import uuid -import requests import streamlit as st import json -from shared.constants import SERVER, InternalFileType, ServerType -from shared.logging.constants import LoggingType -from shared.logging.logging import AppLogger -from PIL import Image -import numpy as np +from shared.constants import SERVER, ServerType from ui_components.models import InternalUserObject - from utils.constants import LOGGED_USER from utils.data_repo.data_repo import DataRepo @@ -100,25 +88,6 @@ def copy_sample_assets(project_name): dest = "videos/" + project_name + "/assets/resources/input_videos/sample.mp4" shutil.copyfile(source, dest) - # copy selected frames - # select_samples_path = 'sample_assets/sample_images' - # file_list = os.listdir(select_samples_path) - # file_paths = [] - # for item in file_list: - # item_path = os.path.join(select_samples_path, item) - # if os.path.isfile(item_path): - # file_paths.append(item_path) - - # for idx in range(len(file_list)): - # source = file_paths[idx] - # dest = f"videos/{project_name}/assets/frames/1_selected/{file_list[idx]}" - # shutil.copyfile(source, dest) - - # copy timings file - # source = "sample_assets/frames/meta_data/timings.csv" - # dest = f"videos/{project_name}/timings.csv" - # shutil.copyfile(source, dest) - def create_working_assets(project_name): if SERVER != ServerType.DEVELOPMENT.value: return @@ -191,127 +160,6 @@ def get_current_user_uuid(): else: return None -def zoom_and_crop(file, width, height): - # scaling - s_x = width / file.width - s_y = height / file.height - scale = max(s_x, s_y) - new_width = int(file.width * scale) - new_height = int(file.height * scale) - file = file.resize((new_width, new_height)) - - # cropping - left = (file.width - width) // 2 - top = (file.height - height) // 2 - right = (file.width + width) // 2 - bottom = (file.height + height) // 2 - file = file.crop((left, top, right, bottom)) - - return file - - -# depending on the environment it will either save or host the PIL image object -def save_or_host_file(file, path, mime_type='image/png'): - data_repo = DataRepo() - # TODO: fix session state management, remove direct access out side the main code - project_setting = data_repo.get_project_setting(st.session_state['project_uuid']) - if project_setting: - file = zoom_and_crop(file, project_setting.width, project_setting.height) - else: - # new project - file = zoom_and_crop(file, 512, 512) - - uploaded_url = None - if SERVER != ServerType.DEVELOPMENT.value: - image_bytes = BytesIO() - file.save(image_bytes, format=mime_type.split('/')[1]) - image_bytes.seek(0) - - uploaded_url = data_repo.upload_file(image_bytes, '.png') - else: - os.makedirs(os.path.dirname(path), exist_ok=True) - file.save(path) - - return uploaded_url - -def save_or_host_file_bytes(video_bytes, path, ext=".mp4"): - uploaded_url = None - if SERVER != ServerType.DEVELOPMENT.value: - data_repo = DataRepo() - uploaded_url = data_repo.upload_file(video_bytes, ext) - else: - os.makedirs(os.path.dirname(path), exist_ok=True) - with open(path, 'wb') as f: - f.write(video_bytes) - - return uploaded_url - -def add_temp_file_to_project(project_uuid, key, hosted_url): - data_repo = DataRepo() - - file_data = { - "name": str(uuid.uuid4()) + ".png", - "type": InternalFileType.IMAGE.value, - "project_id": project_uuid, - 'hosted_url': hosted_url - } - - temp_file = data_repo.create_file(**file_data) - project = data_repo.get_project_from_uuid(project_uuid) - temp_file_list = project.project_temp_file_list - temp_file_list.update({key: temp_file.uuid}) - temp_file_list = json.dumps(temp_file_list) - project_data = { - 'uuid': project_uuid, - 'temp_file_list': temp_file_list - } - data_repo.update_project(**project_data) - - -def generate_temp_file(url, ext=".mp4"): - response = requests.get(url) - if not response.ok: - raise ValueError(f"Could not download video from URL: {url}") - - temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=ext, mode='wb') - temp_file.write(response.content) - temp_file.close() - - return temp_file - -def generate_pil_image(img: Union[Image.Image, str, np.ndarray, io.BytesIO]): - # Check if img is a PIL image - if isinstance(img, Image.Image): - pass - - # Check if img is a URL - elif isinstance(img, str) and bool(urlparse(img).netloc): - response = requests.get(img) - img = Image.open(BytesIO(response.content)) - - # Check if img is a local file - elif isinstance(img, str): - img = Image.open(img) - - # Check if img is a numpy ndarray - elif isinstance(img, np.ndarray): - img = Image.fromarray(img) - - # Check if img is a BytesIO stream - elif isinstance(img, io.BytesIO): - img = Image.open(img) - - else: - raise ValueError( - "Invalid image input. Must be a PIL image, a URL string, a local file path string or a numpy ndarray.") - - return img - -def generate_temp_file_from_uploaded_file(uploaded_file): - if uploaded_file is not None: - with tempfile.NamedTemporaryFile(delete=False) as temp_file: - temp_file.write(uploaded_file.read()) - return temp_file def reset_project_state(): keys_to_delete = [ @@ -380,22 +228,3 @@ def reset_project_state(): if key in st.session_state: del st.session_state[key] - -def convert_bytes_to_file(file_location_to_save, mime_type, file_bytes, project_uuid): - data_repo = DataRepo() - - hosted_url = save_or_host_file_bytes(file_bytes, file_location_to_save, mime_type=mime_type) - file_data = { - "name": str(uuid.uuid4()), - "type": InternalFileType.IMAGE.value, - "project_id": project_uuid.uuid - } - - if hosted_url: - file_data.update({'hosted_url': hosted_url}) - else: - file_data.update({'local_path': file_location_to_save}) - - file = data_repo.create_file(**file_data) - - return file \ No newline at end of file From ed02598a78b04e78dc22b5bfaedb11c16f8a9a33 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 13 Sep 2023 14:21:46 +0530 Subject: [PATCH 005/164] file imports fixed --- banodoco_settings.py | 3 ++- ui_components/components/custom_models_page.py | 2 +- ui_components/components/frame_styling_page.py | 8 +++++--- ui_components/components/video_rendering_page.py | 2 +- ui_components/methods/ml_methods.py | 9 ++++++++- ui_components/methods/training_methods.py | 6 +++++- ui_components/methods/video_methods.py | 12 ++++++++++-- ui_components/widgets/cropping_element.py | 2 +- .../widgets/frame_clip_generation_elements.py | 4 ++-- ui_components/widgets/frame_selector.py | 8 +------- ui_components/widgets/prompt_finder.py | 4 ++-- ui_components/widgets/styling_element.py | 2 +- utils/media_processor/interpolator.py | 2 +- 13 files changed, 40 insertions(+), 24 deletions(-) diff --git a/banodoco_settings.py b/banodoco_settings.py index 29518cfb..f0046045 100644 --- a/banodoco_settings.py +++ b/banodoco_settings.py @@ -8,8 +8,9 @@ from shared.logging.logging import AppLogger from shared.constants import AnimationStyleType from ui_components.methods.common_methods import add_image_variant +from ui_components.methods.file_methods import save_or_host_file from ui_components.models import InternalAppSettingObject, InternalFrameTimingObject, InternalProjectObject, InternalUserObject -from utils.common_utils import copy_sample_assets, create_working_assets, save_or_host_file +from utils.common_utils import create_working_assets from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo from utils.ml_processor.replicate.constants import REPLICATE_MODEL diff --git a/ui_components/components/custom_models_page.py b/ui_components/components/custom_models_page.py index 4f52467e..769132cb 100644 --- a/ui_components/components/custom_models_page.py +++ b/ui_components/components/custom_models_page.py @@ -2,7 +2,7 @@ from typing import List import streamlit as st from shared.constants import AIModelCategory, AIModelType -from ui_components.methods.common_methods import train_model +from ui_components.methods.training_methods import train_model from ui_components.models import InternalAIModelObject from utils.common_utils import get_current_user_uuid diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 3fef1207..ea50c915 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -3,11 +3,14 @@ import time from PIL import Image from ui_components.methods.common_methods import delete_frame, drawing_mode, promote_image_variant, save_uploaded_image, \ - trigger_restyling_process, create_timings_row_at_frame_number, move_frame, \ - calculate_desired_duration_of_individual_clip, create_or_get_single_preview_video, \ + create_timings_row_at_frame_number, move_frame, calculate_desired_duration_of_individual_clip, \ calculate_desired_duration_of_individual_clip, apply_image_transformations, \ ai_frame_editing_element, clone_styling_settings, zoom_inputs +from ui_components.methods.file_methods import generate_pil_image, save_or_host_file +from ui_components.methods.ml_methods import trigger_restyling_process +from ui_components.methods.video_methods import create_or_get_single_preview_video from ui_components.widgets.cropping_element import manual_cropping_element, precision_cropping_element +from ui_components.widgets.frame_clip_generation_elements import current_individual_clip_element, current_preview_video_element, update_animation_style_element from ui_components.widgets.frame_time_selector import single_frame_time_selector, update_frame_time from ui_components.widgets.frame_selector import frame_selector_widget from ui_components.widgets.image_carousal import display_image @@ -19,7 +22,6 @@ import math from ui_components.constants import WorkflowStageType -from utils.common_utils import generate_pil_image, generate_temp_file, save_or_host_file from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo diff --git a/ui_components/components/video_rendering_page.py b/ui_components/components/video_rendering_page.py index 621ba78e..9832cf1a 100644 --- a/ui_components/components/video_rendering_page.py +++ b/ui_components/components/video_rendering_page.py @@ -2,11 +2,11 @@ import datetime import streamlit as st from shared.constants import InternalFileTag, InternalFileType -from ui_components.methods.common_methods import render_video import random import time import os import re +from ui_components.methods.video_methods import render_video from ui_components.models import InternalFileObject, InternalFrameTimingObject from ui_components.widgets.attach_audio_element import attach_audio_element diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index 56f191a6..14a68c04 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -8,7 +8,6 @@ from backend.models import InternalFileObject from shared.constants import REPLICATE_USER, SERVER, InternalFileTag, InternalFileType, ServerType from ui_components.constants import MASK_IMG_LOCAL_PATH, TEMP_MASK_FILE -from ui_components.methods.common_methods import add_image_variant, create_or_update_mask, promote_image_variant, resize_image from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo @@ -33,6 +32,8 @@ def trigger_restyling_process( low_threshold, high_threshold ): + from ui_components.methods.common_methods import add_image_variant, promote_image_variant + data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) @@ -187,6 +188,8 @@ def prompt_model_real_esrgan_upscaling(input_image): # TODO: fix the options input, only certain words can be input in this def prompt_model_stylegan_nada(timing_uuid, input_image): + from ui_components.methods.common_methods import resize_image + data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) @@ -206,6 +209,8 @@ def prompt_model_stylegan_nada(timing_uuid, input_image): return output_file def prompt_model_stable_diffusion_xl(timing_uuid): + from ui_components.methods.common_methods import resize_image + data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) @@ -634,6 +639,8 @@ def remove_background(input_image): def create_depth_mask_image(input_image, layer, timing_uuid): + from ui_components.methods.common_methods import create_or_update_mask + if not input_image.startswith("http"): input_image = open(input_image, "rb") diff --git a/ui_components/methods/training_methods.py b/ui_components/methods/training_methods.py index 3e5f210d..b331064c 100644 --- a/ui_components/methods/training_methods.py +++ b/ui_components/methods/training_methods.py @@ -1,6 +1,6 @@ import json +from typing import List from shared.constants import AIModelCategory -from ui_components.methods.common_methods import convert_image_list_to_file_list from utils.common_utils import get_current_user_uuid from utils.data_repo.data_repo import DataRepo from utils.ml_processor.ml_interface import get_ml_client @@ -28,6 +28,8 @@ def train_model(images_list, instance_prompt, class_prompt, max_train_steps, # INFO: images_list passed here are converted to internal files after they are used for training def train_dreambooth_model(instance_prompt, class_prompt, training_file_url, max_train_steps, model_name, images_list: List[str], controller_type, model_type_list): + from ui_components.methods.common_methods import convert_image_list_to_file_list + ml_client = get_ml_client() response = ml_client.dreambooth_training( training_file_url, instance_prompt, class_prompt, max_train_steps, model_name, controller_type, len(images_list)) @@ -61,6 +63,8 @@ def train_dreambooth_model(instance_prompt, class_prompt, training_file_url, max # INFO: images_list passed here are converted to internal files after they are used for training def train_lora_model(training_file_url, type_of_task, resolution, model_name, images_list, model_type_list): + from ui_components.methods.common_methods import convert_image_list_to_file_list + data_repo = DataRepo() ml_client = get_ml_client() output = ml_client.predict_model_output(REPLICATE_MODEL.clones_lora_training, instance_data=training_file_url, diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index 11a41506..3042c161 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -13,8 +13,6 @@ from shared.constants import InternalFileTag from shared.file_upload.s3 import is_s3_image_url from ui_components.constants import VideoQuality -from ui_components.methods.common_methods import get_audio_bytes_for_slice, update_clip_duration_of_all_timing_frames -from ui_components.methods.file_methods import convert_bytes_to_file, generate_temp_file, save_or_host_file_bytes from ui_components.models import InternalFrameTimingObject, InternalSettingObject from utils.data_repo.data_repo import DataRepo from utils.media_processor.interpolator import VideoInterpolator @@ -23,6 +21,9 @@ # returns the timed_clip, which is the interpolated video with correct length def create_or_get_single_preview_video(timing_uuid): + from ui_components.methods.file_methods import generate_temp_file, save_or_host_file_bytes + from ui_components.methods.common_methods import get_audio_bytes_for_slice + data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( @@ -90,6 +91,8 @@ def create_or_get_single_preview_video(timing_uuid): # preview_clips have frame numbers on them. Preview clip is generated from index-2 to index+2 frames def create_full_preview_video(timing_uuid, speed=1) -> InternalFileObject: + from ui_components.methods.file_methods import save_or_host_file_bytes, convert_bytes_to_file, generate_temp_file + data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) @@ -179,6 +182,8 @@ def create_full_preview_video(timing_uuid, speed=1) -> InternalFileObject: return video_file def update_speed_of_video_clip(video_file: InternalFileObject, timing_uuid) -> InternalFileObject: + from ui_components.methods.file_methods import generate_temp_file, convert_bytes_to_file + data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( @@ -267,6 +272,9 @@ def add_audio_to_video_slice(video_file, audio_bytes): def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileTag.GENERATED_VIDEO.value): + from ui_components.methods.common_methods import update_clip_duration_of_all_timing_frames + from ui_components.methods.file_methods import convert_bytes_to_file, generate_temp_file + data_repo = DataRepo() timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( diff --git a/ui_components/widgets/cropping_element.py b/ui_components/widgets/cropping_element.py index 1f6b2055..7b49b552 100644 --- a/ui_components/widgets/cropping_element.py +++ b/ui_components/widgets/cropping_element.py @@ -10,8 +10,8 @@ from ui_components.methods.common_methods import apply_image_transformations, fetch_image_by_stage, inpaint_in_black_space_element, reset_zoom_element, save_zoomed_image, zoom_inputs from ui_components.constants import WorkflowStageType +from ui_components.methods.file_methods import generate_pil_image, save_or_host_file from ui_components.models import InternalProjectObject, InternalSettingObject -from utils.common_utils import generate_pil_image, save_or_host_file from utils.data_repo.data_repo import DataRepo diff --git a/ui_components/widgets/frame_clip_generation_elements.py b/ui_components/widgets/frame_clip_generation_elements.py index a73f3216..6989aea8 100644 --- a/ui_components/widgets/frame_clip_generation_elements.py +++ b/ui_components/widgets/frame_clip_generation_elements.py @@ -1,8 +1,8 @@ import streamlit as st from shared.constants import AnimationStyleType -from ui_components.methods.common_methods import create_full_preview_video, create_interpolated_clip, update_speed_of_video_clip +from ui_components.methods.file_methods import convert_bytes_to_file +from ui_components.methods.video_methods import create_full_preview_video, update_speed_of_video_clip from ui_components.models import InternalFrameTimingObject -from utils.common_utils import convert_bytes_to_file from utils.data_repo.data_repo import DataRepo from utils.media_processor.interpolator import VideoInterpolator diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index 4b5d7171..53572b45 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -1,15 +1,9 @@ - - import streamlit as st -import time -from shared.constants import InternalFileType from ui_components.widgets.frame_time_selector import single_frame_time_selector from ui_components.widgets.image_carousal import display_image -from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo from ui_components.constants import WorkflowStageType -from shared.file_upload.s3 import upload_file -from ui_components.methods.common_methods import delete_frame, add_image_variant, promote_image_variant, save_uploaded_image, replace_image_widget +from ui_components.methods.common_methods import delete_frame, replace_image_widget def frame_selector_widget(): diff --git a/ui_components/widgets/prompt_finder.py b/ui_components/widgets/prompt_finder.py index 69223145..2930b143 100644 --- a/ui_components/widgets/prompt_finder.py +++ b/ui_components/widgets/prompt_finder.py @@ -2,8 +2,8 @@ from PIL import Image import streamlit as st -from ui_components.methods.common_methods import prompt_clip_interrogator -from utils.common_utils import save_or_host_file +from ui_components.methods.file_methods import save_or_host_file +from ui_components.methods.ml_methods import prompt_clip_interrogator def prompt_finder_element(project_uuid): diff --git a/ui_components/widgets/styling_element.py b/ui_components/widgets/styling_element.py index 88598244..6a604662 100644 --- a/ui_components/widgets/styling_element.py +++ b/ui_components/widgets/styling_element.py @@ -4,7 +4,7 @@ from typing import List from shared.constants import AIModelCategory, AIModelType -from ui_components.methods.common_methods import trigger_restyling_process +from ui_components.methods.ml_methods import trigger_restyling_process from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index 1760794d..086445e7 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -4,7 +4,7 @@ import requests as r import numpy as np from shared.constants import AnimationStyleType -from utils.common_utils import generate_temp_file +from ui_components.methods.file_methods import generate_temp_file from utils.data_repo.data_repo import DataRepo from utils.ml_processor.ml_interface import get_ml_client From 78125a7d81d7beadea995a5da11c3da77e3065a5 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 13 Sep 2023 17:03:39 +0530 Subject: [PATCH 006/164] minor fixes --- repository/data_logger.py | 7 +--- .../components/frame_styling_page.py | 32 ++++++++-------- ui_components/methods/common_methods.py | 38 ++++++++----------- ui_components/methods/file_methods.py | 2 +- .../widgets/frame_clip_generation_elements.py | 5 ++- utils/data_repo/data_repo.py | 3 ++ 6 files changed, 40 insertions(+), 47 deletions(-) diff --git a/repository/data_logger.py b/repository/data_logger.py index b720c964..947ec636 100644 --- a/repository/data_logger.py +++ b/repository/data_logger.py @@ -1,5 +1,6 @@ import json import time +from backend.db_repo import DBRepo from shared.logging.constants import LoggingPayload, LoggingType from shared.logging.logging import AppLogger @@ -30,9 +31,5 @@ def log_model_inference(model: ReplicateModel, time_taken, **kwargs): # logging in console system_logger.log(LoggingType.INFERENCE_CALL, logging_payload) - # logging data - # log_inference_data_in_csv(logging_payload.data) - - # TODO: streamline the logging part # db_repo = DBRepo() - # db_repo.log_inference_data_in_local_db(logging_payload.data) \ No newline at end of file + # db_repo.create_inference_log(logging_payload.data) \ No newline at end of file diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index ea50c915..53a3bf70 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -237,22 +237,22 @@ def frame_styling_page(mainheader2, project_uuid: str): if st.button(f"Generate variants", key=f"new_variations_{st.session_state['current_frame_index']}", help="This will generate new variants based on the settings to the left."): for i in range(0, st.session_state['individual_number_of_variants']): trigger_restyling_process( - st.session_state['current_frame_uuid'], - st.session_state['model'], - st.session_state['prompt'], - st.session_state['strength'], - st.session_state['negative_prompt'], - st.session_state['guidance_scale'], - st.session_state['seed'], - st.session_state['num_inference_steps'], - st.session_state['transformation_stage'], - st.session_state["promote_new_generation"], - st.session_state['custom_models'], - st.session_state['adapter_type'], - True, - st.session_state['low_threshold'], - st.session_state['high_threshold'] - ) + st.session_state['current_frame_uuid'], + st.session_state['model'], + st.session_state['prompt'], + st.session_state['strength'], + st.session_state['negative_prompt'], + st.session_state['guidance_scale'], + st.session_state['seed'], + st.session_state['num_inference_steps'], + st.session_state['transformation_stage'], + st.session_state["promote_new_generation"], + st.session_state['custom_models'], + st.session_state['adapter_type'], + True, + st.session_state['low_threshold'], + st.session_state['high_threshold'] + ) st.experimental_rerun() st.markdown("***") diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 13f6aa2a..1fc8554f 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -38,32 +38,24 @@ def clone_styling_settings(source_frame_number, target_frame_uuid): target_timing.project.uuid) data_repo.update_specific_timing( - target_frame_uuid, custom_pipeline=timing_details[source_frame_number].custom_pipeline) - data_repo.update_specific_timing( - target_frame_uuid, negative_prompt=timing_details[source_frame_number].negative_prompt) - data_repo.update_specific_timing( - target_frame_uuid, guidance_scale=timing_details[source_frame_number].guidance_scale) - data_repo.update_specific_timing( - target_frame_uuid, seed=timing_details[source_frame_number].seed) - data_repo.update_specific_timing( - target_frame_uuid, num_inteference_steps=timing_details[source_frame_number].num_inteference_steps) - data_repo.update_specific_timing( - target_frame_uuid, transformation_stage=timing_details[source_frame_number].transformation_stage) + target_frame_uuid, + custom_pipeline=timing_details[source_frame_number].custom_pipeline, + negative_prompt=timing_details[source_frame_number].negative_prompt, + guidance_scale=timing_details[source_frame_number].guidance_scale, + seed=timing_details[source_frame_number].seed, + num_inteference_steps=timing_details[source_frame_number].num_inteference_steps, + transformation_stage=timing_details[source_frame_number].transformation_stage, + strength=timing_details[source_frame_number].strength, + custom_models=timing_details[source_frame_number].custom_model_id_list, + adapter_type=timing_details[source_frame_number].adapter_type, + low_threshold=timing_details[source_frame_number].low_threshold, + high_threshold=timing_details[source_frame_number].high_threshold, + prompt=timing_details[source_frame_number].prompt + ) + if timing_details[source_frame_number].model: data_repo.update_specific_timing( target_frame_uuid, model_id=timing_details[source_frame_number].model.uuid) - data_repo.update_specific_timing( - target_frame_uuid, strength=timing_details[source_frame_number].strength) - data_repo.update_specific_timing( - target_frame_uuid, custom_models=timing_details[source_frame_number].custom_model_id_list) - data_repo.update_specific_timing( - target_frame_uuid, adapter_type=timing_details[source_frame_number].adapter_type) - data_repo.update_specific_timing( - target_frame_uuid, low_threshold=timing_details[source_frame_number].low_threshold) - data_repo.update_specific_timing( - target_frame_uuid, high_threshold=timing_details[source_frame_number].high_threshold) - data_repo.update_specific_timing( - target_frame_uuid, prompt=timing_details[source_frame_number].prompt) # TODO: image format is assumed to be PNG, change this later def save_new_image(img: Union[Image.Image, str, np.ndarray, io.BytesIO], project_uuid) -> InternalFileObject: diff --git a/ui_components/methods/file_methods.py b/ui_components/methods/file_methods.py index 52068765..8cbf69d0 100644 --- a/ui_components/methods/file_methods.py +++ b/ui_components/methods/file_methods.py @@ -139,7 +139,7 @@ def generate_temp_file_from_uploaded_file(uploaded_file): def convert_bytes_to_file(file_location_to_save, mime_type, file_bytes, project_uuid, filename=None, tag=""): data_repo = DataRepo() - hosted_url = save_or_host_file_bytes(file_bytes, file_location_to_save, mime_type=mime_type) + hosted_url = save_or_host_file_bytes(file_bytes, file_location_to_save, "." + mime_type.split("/")[1]) file_data = { "name": str(uuid.uuid4()) + "." + mime_type.split("/")[1] if not filename else filename, "type": InternalFileType.IMAGE.value, diff --git a/ui_components/widgets/frame_clip_generation_elements.py b/ui_components/widgets/frame_clip_generation_elements.py index 6989aea8..eb4820aa 100644 --- a/ui_components/widgets/frame_clip_generation_elements.py +++ b/ui_components/widgets/frame_clip_generation_elements.py @@ -1,3 +1,4 @@ +import uuid import streamlit as st from shared.constants import AnimationStyleType from ui_components.methods.file_methods import convert_bytes_to_file @@ -28,7 +29,7 @@ def generate_individual_clip(timing_uuid, quality): settings ) - video_location = "" + video_location = "videos/" + timing.project.name + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" video = convert_bytes_to_file( video_location, "video/mp4", @@ -36,7 +37,7 @@ def generate_individual_clip(timing_uuid, quality): timing.project.uuid ) - data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video.uuid) + data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video.uuid, clip_settings=settings) output_video = update_speed_of_video_clip(timing.interpolated_clip, timing_uuid) data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) return output_video diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 43c2acfd..54201750 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -247,6 +247,9 @@ def update_specific_timing(self, uuid, **kwargs): res = self.db_repo.update_specific_timing(uuid, **kwargs) return res.status + def add_interpolated_clip(self, timing_uuid, **kwargs): + pass + def delete_timing_from_uuid(self, uuid): res = self.db_repo.delete_timing_from_uuid(uuid) return res.status From 9443be1a13ced372661dd85f202eea4aa345eab1 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 13 Sep 2023 19:55:52 +0530 Subject: [PATCH 007/164] inference logging fixed --- backend/db_repo.py | 2 +- .../0006_inference_time_converted_to_float.py | 23 +++++++++++++++++++ backend/models.py | 2 +- repository/data_logger.py | 19 ++++++++++++--- utils/data_repo/api_repo.py | 2 +- utils/data_repo/data_repo.py | 3 ++- 6 files changed, 44 insertions(+), 7 deletions(-) create mode 100644 backend/migrations/0006_inference_time_converted_to_float.py diff --git a/backend/db_repo.py b/backend/db_repo.py index a86ce3b7..ab5c4c5f 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -463,7 +463,7 @@ def get_ai_model_from_name(self, name): # DBRepo._count += 1 # cls_name = inspect.currentframe().f_code.co_name # print("db call: ", DBRepo._count, " class name: ", cls_name) - ai_model = AIModel.objects.filter(name=name, is_disabled=False).first() + ai_model = AIModel.objects.filter(replicate_url=name, is_disabled=False).first() if not ai_model: return InternalResponse({}, 'invalid ai model name', False) diff --git a/backend/migrations/0006_inference_time_converted_to_float.py b/backend/migrations/0006_inference_time_converted_to_float.py new file mode 100644 index 00000000..b099f1f5 --- /dev/null +++ b/backend/migrations/0006_inference_time_converted_to_float.py @@ -0,0 +1,23 @@ +# Generated by Django 4.2.1 on 2023-09-13 09:23 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('backend', '0005_model_type_added'), + ] + + operations = [ + migrations.AlterField( + model_name='inferencelog', + name='total_inference_time', + field=models.FloatField(default=0), + ), + migrations.AlterField( + model_name='timing', + name='strength', + field=models.FloatField(default=1), + ), + ] diff --git a/backend/models.py b/backend/models.py index 3e1bcf8f..a48a6606 100644 --- a/backend/models.py +++ b/backend/models.py @@ -105,7 +105,7 @@ class InferenceLog(BaseModel): model = models.ForeignKey(AIModel, on_delete=models.DO_NOTHING, null=True) input_params = models.TextField(default="", blank=True) output_details = models.TextField(default="", blank=True) - total_inference_time = models.IntegerField(default=0) + total_inference_time = models.FloatField(default=0) class Meta: app_label = 'backend' diff --git a/repository/data_logger.py b/repository/data_logger.py index 947ec636..13712967 100644 --- a/repository/data_logger.py +++ b/repository/data_logger.py @@ -1,8 +1,9 @@ import json +import streamlit as st import time -from backend.db_repo import DBRepo from shared.logging.constants import LoggingPayload, LoggingType from shared.logging.logging import AppLogger +from utils.data_repo.data_repo import DataRepo from utils.ml_processor.replicate.constants import ReplicateModel @@ -16,6 +17,7 @@ def log_model_inference(model: ReplicateModel, time_taken, **kwargs): del kwargs_dict[key] data_str = json.dumps(kwargs_dict) + time_taken = round(time_taken, 2) data = { 'model_name': model.name, @@ -31,5 +33,16 @@ def log_model_inference(model: ReplicateModel, time_taken, **kwargs): # logging in console system_logger.log(LoggingType.INFERENCE_CALL, logging_payload) - # db_repo = DBRepo() - # db_repo.create_inference_log(logging_payload.data) \ No newline at end of file + # storing the log in db + data_repo = DataRepo() + ai_model = data_repo.get_ai_model_from_name(model.name) + + log_data = { + "project_id" : st.session_state["project_uuid"], + "model_id" : ai_model.uuid if ai_model else None, + "input_params" : data_str, + "output_details" : json.dumps({"model_name": model.name, "version": model.version}), + "total_inference_time" : time_taken, + } + + data_repo.create_inference_log(**log_data) \ No newline at end of file diff --git a/utils/data_repo/api_repo.py b/utils/data_repo/api_repo.py index af339786..c7f87a2b 100644 --- a/utils/data_repo/api_repo.py +++ b/utils/data_repo/api_repo.py @@ -242,7 +242,7 @@ def get_ai_model_from_uuid(self, uuid): # TODO: remove this method from everywhere def get_ai_model_from_name(self, name): - res = self.http_get(self.MODEL_URL, params={'name': name}) + res = self.http_get(self.MODEL_URL, params={'replicate_url': name}) return InternalResponse(res['payload'], 'success', res['status']) diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 54201750..d5425edb 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -156,7 +156,8 @@ def get_ai_model_from_uuid(self, uuid): return InternalAIModelObject(**model) if model else None def get_ai_model_from_name(self, name): - model = self.db_repo.get_ai_model_from_name(name).data['data'] + res = self.db_repo.get_ai_model_from_name(name) + model = res.data['data'] if res.status else None return InternalAIModelObject(**model) if model else None def get_all_ai_model_list(self, model_category_list=None, user_id=None, custom_trained=None, model_type_list=None): From dc4c600b3d1e5d6209e54efb472b8272d83b26ba Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Fri, 15 Sep 2023 15:01:30 +0530 Subject: [PATCH 008/164] wip: log linked to file + minor bug fixes --- backend/db_repo.py | 17 ++- backend/migrations/0007_log_mapped_to_file.py | 19 +++ backend/models.py | 61 ++++----- backend/serializers/dto.py | 41 +++--- repository/data_logger.py | 3 +- ui_components/methods/common_methods.py | 2 +- ui_components/methods/ml_methods.py | 73 +++++++---- ui_components/methods/video_methods.py | 39 ++++-- ui_components/models.py | 1 + .../widgets/frame_clip_generation_elements.py | 11 +- utils/common_utils.py | 124 ++++-------------- utils/media_processor/interpolator.py | 14 +- utils/ml_processor/replicate/replicate.py | 22 +--- 13 files changed, 200 insertions(+), 227 deletions(-) create mode 100644 backend/migrations/0007_log_mapped_to_file.py diff --git a/backend/db_repo.py b/backend/db_repo.py index ab5c4c5f..475de8fb 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -264,6 +264,7 @@ def create_file(self, **kwargs): if not data.is_valid(): return InternalResponse({}, data.errors, False) + print(data.data) # hosting the file if only local path is provided and it's a production environment if 'hosted_url' not in kwargs and AUTOMATIC_FILE_HOSTING: # this is the user who is uploading the file @@ -279,7 +280,6 @@ def create_file(self, **kwargs): hosted_url = upload_file(filename, app_setting.aws_access_key_decrypted, \ app_setting.aws_secret_access_key_decrypted) - print(data.data) data._data['hosted_url'] = hosted_url if 'project_id' in kwargs and kwargs['project_id']: @@ -287,8 +287,14 @@ def create_file(self, **kwargs): if not project: return InternalResponse({}, 'invalid project', False) - print(data.data) data._data['project_id'] = project.id + + if 'inference_log_id' in kwargs and kwargs['inference_log_id']: + inference_log = InferenceLog.objects.filter(uuid=kwargs['inference_log_id'], is_disabled=False).first() + if not inference_log: + return InternalResponse({}, 'invalid log id', False) + + data._data['inference_log_id'] = inference_log.id if not data.is_valid(): @@ -346,6 +352,13 @@ def update_file(self, **kwargs): return InternalResponse({}, 'invalid project', False) kwargs['project_id'] = project.id + + if 'inference_log_id' in kwargs and kwargs['inference_log_id']: + inference_log = InferenceLog.objects.filter(uuid=kwargs['inference_log_id'], is_disabled=False).first() + if not inference_log: + return InternalResponse({}, 'invalid log id', False) + + kwargs['inference_log_id'] = inference_log.id for k,v in kwargs.items(): setattr(file, k, v) diff --git a/backend/migrations/0007_log_mapped_to_file.py b/backend/migrations/0007_log_mapped_to_file.py new file mode 100644 index 00000000..a19f18ea --- /dev/null +++ b/backend/migrations/0007_log_mapped_to_file.py @@ -0,0 +1,19 @@ +# Generated by Django 4.2.1 on 2023-09-15 04:30 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('backend', '0006_inference_time_converted_to_float'), + ] + + operations = [ + migrations.AddField( + model_name='internalfileobject', + name='inference_log', + field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='backend.inferencelog'), + ), + ] diff --git a/backend/models.py b/backend/models.py index a48a6606..fce84993 100644 --- a/backend/models.py +++ b/backend/models.py @@ -46,6 +46,36 @@ class Meta: db_table = 'project' +class AIModel(BaseModel): + name = models.CharField(max_length=255, default="") + user = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True) + custom_trained = models.BooleanField(default=False) + version = models.CharField(max_length=255, default="", blank=True, null=True) + replicate_model_id = models.CharField(max_length=255, default="", blank=True) # for models which were custom created + replicate_url = models.TextField(default="", blank=True) + diffusers_url = models.TextField(default="", blank=True) # for downloading and running models offline + category = models.CharField(max_length=255,default="", blank=True) # Lora, Dreambooth.. + model_type = models.TextField(default="", blank=True) # [txt2img, img2img..] array of types + training_image_list = models.TextField(default="", blank=True) # contains an array of uuid of file objects + keyword = models.CharField(max_length=255,default="", blank=True) + + class Meta: + app_label = 'backend' + db_table = 'ai_model' + + +class InferenceLog(BaseModel): + project = models.ForeignKey(Project, on_delete=models.CASCADE, null=True) + model = models.ForeignKey(AIModel, on_delete=models.DO_NOTHING, null=True) + input_params = models.TextField(default="", blank=True) + output_details = models.TextField(default="", blank=True) + total_inference_time = models.FloatField(default=0) + + class Meta: + app_label = 'backend' + db_table = 'inference_log' + + class InternalFileObject(BaseModel): name = models.TextField(default="") type = models.CharField(max_length=255, default="") # image, video, audio @@ -53,6 +83,7 @@ class InternalFileObject(BaseModel): hosted_url = models.TextField(default="") tag = models.CharField(max_length=255,default="") # background_image, mask_image, canny_image etc.. project = models.ForeignKey(Project, on_delete=models.SET_NULL, default=None, null=True) + inference_log = models.ForeignKey(InferenceLog, on_delete=models.SET_NULL, default=None, null=True) class Meta: app_label = 'backend' @@ -82,36 +113,6 @@ def location(self): return self.local_path if self.local_path else self.hosted_url -class AIModel(BaseModel): - name = models.CharField(max_length=255, default="") - user = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True) - custom_trained = models.BooleanField(default=False) - version = models.CharField(max_length=255, default="", blank=True, null=True) - replicate_model_id = models.CharField(max_length=255, default="", blank=True) # for models which were custom created - replicate_url = models.TextField(default="", blank=True) - diffusers_url = models.TextField(default="", blank=True) # for downloading and running models offline - category = models.CharField(max_length=255,default="", blank=True) # Lora, Dreambooth.. - model_type = models.TextField(default="", blank=True) # [txt2img, img2img..] array of types - training_image_list = models.TextField(default="", blank=True) # contains an array of uuid of file objects - keyword = models.CharField(max_length=255,default="", blank=True) - - class Meta: - app_label = 'backend' - db_table = 'ai_model' - - -class InferenceLog(BaseModel): - project = models.ForeignKey(Project, on_delete=models.CASCADE, null=True) - model = models.ForeignKey(AIModel, on_delete=models.DO_NOTHING, null=True) - input_params = models.TextField(default="", blank=True) - output_details = models.TextField(default="", blank=True) - total_inference_time = models.FloatField(default=0) - - class Meta: - app_label = 'backend' - db_table = 'inference_log' - - class AIModelParamMap(BaseModel): model = models.ForeignKey(AIModel, on_delete=models.DO_NOTHING, null=True) standard_param_key = models.CharField(max_length=255, blank=True) diff --git a/backend/serializers/dto.py b/backend/serializers/dto.py index 38462ada..be7a4f1d 100644 --- a/backend/serializers/dto.py +++ b/backend/serializers/dto.py @@ -2,10 +2,6 @@ from backend.models import AIModel, AppSetting, BackupTiming, InferenceLog, InternalFileObject, Project, Setting, Timing, User -class InternalFileDto(serializers.ModelSerializer): - class Meta: - model = InternalFileObject - fields = ('uuid', 'name', 'local_path', 'type', 'hosted_url', 'created_on') class UserDto(serializers.ModelSerializer): class Meta: @@ -50,6 +46,28 @@ class Meta: def get_user_uuid(self, obj): return obj.user.uuid +class InferenceLogDto(serializers.ModelSerializer): + project = ProjectDto() + model = AIModelDto() + + class Meta: + model = InferenceLog + fields = ( + "project", + "model", + "input_params", + "output_details", + "total_inference_time", + "created_on" + ) + + +class InternalFileDto(serializers.ModelSerializer): + inference_log = InferenceLogDto() + class Meta: + model = InternalFileObject + fields = ('uuid', 'name', 'local_path', 'type', 'hosted_url', 'created_on', 'inference_log') + class TimingDto(serializers.ModelSerializer): project = ProjectDto() @@ -153,21 +171,6 @@ class Meta: ) -class InferenceLogDto(serializers.ModelSerializer): - project = ProjectDto() - model = AIModelDto() - - class Meta: - model = InferenceLog - fields = ( - "project", - "model", - "input_params", - "output_details", - "total_inference_time", - "created_on" - ) - class BackupDto(serializers.ModelSerializer): project = ProjectDto() class Meta: diff --git a/repository/data_logger.py b/repository/data_logger.py index 13712967..f99abc69 100644 --- a/repository/data_logger.py +++ b/repository/data_logger.py @@ -45,4 +45,5 @@ def log_model_inference(model: ReplicateModel, time_taken, **kwargs): "total_inference_time" : time_taken, } - data_repo.create_inference_log(**log_data) \ No newline at end of file + log = data_repo.create_inference_log(**log_data) + return log \ No newline at end of file diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 1fc8554f..2c400ad7 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -960,7 +960,7 @@ def create_or_update_mask(timing_uuid, image) -> InternalFileObject: data_repo.update_file(timing.mask.uuid, local_path=file_location) timing = data_repo.get_timing_from_uuid(timing_uuid) - return timing.mask + return timing.mask.location # adds the image file in variant (alternative images) list def drawing_mode(timing_details,project_settings,project_uuid,stage=WorkflowStageType.STYLED.value): diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index 14a68c04..053d42cd 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -1,4 +1,6 @@ +import os +import tempfile import streamlit as st import replicate from typing import List @@ -165,7 +167,7 @@ def prompt_clip_interrogator(input_image, which_model, best_or_fast): input_image = open(input_image, "rb") ml_client = get_ml_client() - output = ml_client.predict_model_output( + output, _ = ml_client.predict_model_output( REPLICATE_MODEL.clip_interrogator, image=input_image, clip_model_name=which_model, mode=best_or_fast) return output @@ -177,13 +179,13 @@ def prompt_model_real_esrgan_upscaling(input_image): input_image = open(input_image, "rb") ml_client = get_ml_client() - output = ml_client.predict_model_output( + output, log = ml_client.predict_model_output( REPLICATE_MODEL.real_esrgan_upscale, image=input_image, upscale=2 ) filename = str(uuid.uuid4()) + ".png" output_file = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output) + hosted_url=output, inference_log_id=log.uuid) return output_file # TODO: fix the options input, only certain words can be input in this @@ -199,11 +201,11 @@ def prompt_model_stylegan_nada(timing_uuid, input_image): input_file = open(input_image.location, 'rb') ml_client = get_ml_client() - output = ml_client.predict_model_output(REPLICATE_MODEL.stylegan_nada, input=input_file, + output, log = ml_client.predict_model_output(REPLICATE_MODEL.stylegan_nada, input=input_file, output_style=timing.prompt) filename = str(uuid.uuid4()) + ".png" image_file = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0]) + hosted_url=output[0], inference_log_id=log.uuid) output_file = resize_image(timing.project.name, 512, 512, image_file) return output_file @@ -216,10 +218,10 @@ def prompt_model_stable_diffusion_xl(timing_uuid): timing_uuid) ml_client = get_ml_client() - output = ml_client.predict_model_output(REPLICATE_MODEL.sdxl, prompt=timing.prompt) + output, log = ml_client.predict_model_output(REPLICATE_MODEL.sdxl, prompt=timing.prompt) filename = str(uuid.uuid4()) + ".png" image_file = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0]) + hosted_url=output[0], inference_log_id=log.uuid) output_file = resize_image(timing.project.name, 512, 512, image_file) return output_file @@ -239,7 +241,7 @@ def prompt_model_stability(timing_uuid, input_image_file: InternalFileObject): input_image = open(input_image, "rb") ml_client = get_ml_client() - output = ml_client.predict_model_output( + output, log = ml_client.predict_model_output( REPLICATE_MODEL.img2img_sd_2_1, image=input_image, prompt_strength=float(strength), @@ -254,7 +256,7 @@ def prompt_model_stability(timing_uuid, input_image_file: InternalFileObject): filename = str(uuid.uuid4()) + ".png" image_file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0], tag=InternalFileTag.GENERATED_VIDEO.value) + hosted_url=output[0], inference_log_id=log.uuid) return image_file @@ -353,13 +355,13 @@ def prompt_model_depth2img(strength, timing_uuid, source_image) -> InternalFileO source_image = open(source_image, "rb") ml_client = get_ml_client() - output = ml_client.predict_model_output(REPLICATE_MODEL.jagilley_controlnet_depth2img, input_image=source_image, + output, log = ml_client.predict_model_output(REPLICATE_MODEL.jagilley_controlnet_depth2img, input_image=source_image, prompt_strength=float(strength), prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale) filename = str(uuid.uuid4()) + ".png" image_file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0]) + hosted_url=output[0], inference_log_id=log.uuid) return image_file @@ -368,7 +370,7 @@ def prompt_model_blip2(input_image, query): input_image = open(input_image, "rb") ml_client = get_ml_client() - output = ml_client.predict_model_output( + output, _ = ml_client.predict_model_output( REPLICATE_MODEL.salesforce_blip_2, image=input_image, question=query) return output @@ -387,12 +389,12 @@ def prompt_model_pix2pix(timing_uuid, input_image_file: InternalFileObject): input_image = open(input_image, "rb") ml_client = get_ml_client() - output = ml_client.predict_model_output(REPLICATE_MODEL.arielreplicate, input_image=input_image, instruction_text=prompt, + output, log = ml_client.predict_model_output(REPLICATE_MODEL.arielreplicate, input_image=input_image, instruction_text=prompt, seed=seed, cfg_image=1.2, cfg_text=guidance_scale, resolution=704) filename = str(uuid.uuid4()) + ".png" image_file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output) + hosted_url=output[0], inference_log_id=log.uuid) return image_file @@ -401,7 +403,7 @@ def facial_expression_recognition(input_image): input_image = open(input_image, "rb") ml_client = get_ml_client() - output = ml_client.predict_model_output( + output, _ = ml_client.predict_model_output( REPLICATE_MODEL.phamquiluan_face_recognition, input_path=input_image) emo_label = output[0]["emo_label"] @@ -472,9 +474,12 @@ def prompt_model_controlnet(timing_uuid, input_image): } ml_client = get_ml_client() - output = ml_client.predict_model_output(model, **inputs) + output, log = ml_client.predict_model_output(model, **inputs) - return output[1] + filename = str(uuid.uuid4()) + ".png" + output_file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, + hosted_url=output[0], inference_log_id=log.uuid) + return output_file def prompt_model_urpm_v1_3(timing_uuid, source_image): @@ -496,9 +501,12 @@ def prompt_model_urpm_v1_3(timing_uuid, source_image): } ml_client = get_ml_client() - output = ml_client.predict_model_output(REPLICATE_MODEL.urpm, **inputs) + output, log = ml_client.predict_model_output(REPLICATE_MODEL.urpm, **inputs) - return output[0] + filename = str(uuid.uuid4()) + ".png" + output_file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, + hosted_url=output[0], inference_log_id=log.uuid) + return output_file def prompt_model_controlnet_1_1_x_realistic_vision_v2_0(timing_uuid, input_image): @@ -518,10 +526,13 @@ def prompt_model_controlnet_1_1_x_realistic_vision_v2_0(timing_uuid, input_image } ml_client = get_ml_client() - output = ml_client.predict_model_output( + output, log = ml_client.predict_model_output( REPLICATE_MODEL.controlnet_1_1_x_realistic_vision_v2_0, **inputs) - return output[1] + filename = str(uuid.uuid4()) + ".png" + output_file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, + hosted_url=output[1], inference_log_id=log.uuid) + return output_file def prompt_model_lora(timing_uuid, source_image_file: InternalFileObject) -> InternalFileObject: @@ -581,12 +592,12 @@ def prompt_model_lora(timing_uuid, source_image_file: InternalFileObject) -> Int attempts = 0 while attempts < max_attempts: try: - output = ml_client.predict_model_output( + output, log = ml_client.predict_model_output( REPLICATE_MODEL.clones_lora_training_2, **inputs) print(output) filename = str(uuid.uuid4()) + ".png" file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0]) + hosted_url=output[0], inference_log_id=log.uuid) return file except replicate.exceptions.ModelError as e: if "NSFW content detected" in str(e): @@ -619,12 +630,12 @@ def inpainting(input_image: str, prompt, negative_prompt, timing_uuid, invert_ma input_image = open(input_image, "rb") ml_client = get_ml_client() - output = ml_client.predict_model_output(REPLICATE_MODEL.andreas_sd_inpainting, mask=mask, image=input_image, prompt=prompt, + output, log = ml_client.predict_model_output(REPLICATE_MODEL.andreas_sd_inpainting, mask=mask, image=input_image, prompt=prompt, invert_mask=invert_mask, negative_prompt=negative_prompt, num_inference_steps=25) file_name = str(uuid.uuid4()) + ".png" image_file = data_repo.create_file( - name=file_name, type=InternalFileType.IMAGE.value, hosted_url=output[0]) + name=file_name, type=InternalFileType.IMAGE.value, hosted_url=output[0], inference_log_id=log.uuid) return image_file @@ -633,7 +644,7 @@ def remove_background(input_image): input_image = open(input_image, "rb") ml_client = get_ml_client() - output = ml_client.predict_model_output( + output, _ = ml_client.predict_model_output( REPLICATE_MODEL.pollination_modnet, image=input_image) return output @@ -645,14 +656,18 @@ def create_depth_mask_image(input_image, layer, timing_uuid): input_image = open(input_image, "rb") ml_client = get_ml_client() - output = ml_client.predict_model_output( + output, log = ml_client.predict_model_output( REPLICATE_MODEL.cjwbw_midas, image=input_image, model_type="dpt_beit_large_512") try: - urllib.request.urlretrieve(output, "videos/temp/depth.png") + temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png", mode='wb') + # urllib.request.urlretrieve(output, "videos/temp/depth.png") + with urllib.request.urlopen(output) as response, open(temp_file.name, 'wb') as out_file: + out_file.write(response.read()) except Exception as e: print(e) - depth_map = Image.open("videos/temp/depth.png") + depth_map = Image.open(temp_file.name) + os.remove(temp_file.name) depth_map = depth_map.convert("L") # Convert to grayscale image pixels = depth_map.load() mask = Image.new("L", depth_map.size) diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index 3042c161..3b625621 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -13,6 +13,7 @@ from shared.constants import InternalFileTag from shared.file_upload.s3 import is_s3_image_url from ui_components.constants import VideoQuality +from ui_components.methods.file_methods import convert_bytes_to_file from ui_components.models import InternalFrameTimingObject, InternalSettingObject from utils.data_repo.data_repo import DataRepo from utils.media_processor.interpolator import VideoInterpolator @@ -32,11 +33,21 @@ def create_or_get_single_preview_video(timing_uuid): timing.project.uuid) if not timing.interpolated_clip: - data_repo.update_specific_timing(timing_uuid, interpolation_steps=3) - interpolated_video: InternalFileObject = VideoInterpolator.video_through_frame_interpolation( - timing_uuid) + timing.interpolation_steps = 3 + next_timing = data_repo.get_next_timing(timing.uuid) + img_list = [timing.source_image.location, next_timing.source_image.location] + video_bytes, log = VideoInterpolator.video_through_frame_interpolation(img_list, {"interpolation_steps": timing.interpolation_steps}) + file_data = { + "file_location_to_save": "videos/" + timing.project.uuid + "/assets/videos" + (str(uuid.uuid4())) + ".mp4", + "mime_type": "video/mp4", + "file_bytes": video_bytes, + "project_uuid": timing.project.uuid, + "inference_log_id": log.uuid + } + + video_fie = convert_bytes_to_file(**file_data) data_repo.update_specific_timing( - timing_uuid, interpolated_clip_id=interpolated_video.uuid) + timing_uuid, interpolated_clip_id=video_fie.uuid) if not timing.timed_clip: timing = data_repo.get_timing_from_uuid(timing_uuid) @@ -305,17 +316,18 @@ def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileT if not timing.interpolated_clip: next_timing = data_repo.get_next_timing(current_timing.uuid) - video_bytes = VideoInterpolator.create_interpolated_clip( + video_bytes, log = VideoInterpolator.create_interpolated_clip( img_location_list=[current_timing.source_image.location, next_timing.source_image.location], interpolation_steps=current_timing.interpolation_steps ) file_location = "videos/" + current_timing.project.name + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" video_file = convert_bytes_to_file( - file_location, - "video/mp4", - video_bytes, - current_timing.project.uuid + file_location_to_save=file_location, + mime_type="video/mp4", + file_bytes=video_bytes, + project_uuid=current_timing.project.uuid, + inference_log_id=log.uuid ) data_repo.update_specific_timing( @@ -413,10 +425,11 @@ def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileT video_bytes = f.read() _ = convert_bytes_to_file( - output_video_file, - "video/mp4", - video_bytes, - project_uuid, + file_location_to_save=output_video_file, + mime_type="video/mp4", + file_bytes=video_bytes, + project_uuid=project_uuid, + inference_log_id=None, filename=final_video_name, tag=file_tag ) diff --git a/ui_components/models.py b/ui_components/models.py index 4335d6c2..7afd48dd 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -280,6 +280,7 @@ def to_json(self): class InferenceLogObject: def __init__(self, **kwargs): + self.uuid = kwargs['uuid'] if 'uuid' in kwargs else None self.project = InternalProjectObject( **kwargs["project"]) if 'project' in kwargs else None self.model = InternalAIModelObject( diff --git a/ui_components/widgets/frame_clip_generation_elements.py b/ui_components/widgets/frame_clip_generation_elements.py index eb4820aa..d989a1a8 100644 --- a/ui_components/widgets/frame_clip_generation_elements.py +++ b/ui_components/widgets/frame_clip_generation_elements.py @@ -23,7 +23,7 @@ def generate_individual_clip(timing_uuid, quality): timing.interpolated_steps = interpolation_steps img_list = [timing.source_image.location, next_timing.source_image.location] settings = {"interpolation_steps": timing.interpolation_steps} - video_bytes = VideoInterpolator.create_interpolated_clip( + video_bytes, log = VideoInterpolator.create_interpolated_clip( img_list, timing.animation_style, settings @@ -31,10 +31,11 @@ def generate_individual_clip(timing_uuid, quality): video_location = "videos/" + timing.project.name + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" video = convert_bytes_to_file( - video_location, - "video/mp4", - video_bytes, - timing.project.uuid + file_location_to_save=video_location, + mime_type="video/mp4", + file_bytes=video_bytes, + project_uuid=timing.project.uuid, + inference_log_id=log.uuid ) data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video.uuid, clip_settings=settings) diff --git a/utils/common_utils.py b/utils/common_utils.py index 1c52c52d..660104ae 100644 --- a/utils/common_utils.py +++ b/utils/common_utils.py @@ -8,113 +8,41 @@ from utils.constants import LOGGED_USER from utils.data_repo.data_repo import DataRepo -# creates a file path if it's not already present -def create_file_path(path): - if not path: - return - - file = Path(path) - if not file.is_file(): - last_slash_index = path.rfind('/') - if last_slash_index != -1: - directory_path = path[:last_slash_index] - file_name = path[last_slash_index + 1:] - - # creating directory if not present - if not os.path.exists(directory_path): - os.makedirs(directory_path) - else: - directory_path = './' - file_name = path - - # creating file - file_path = os.path.join(directory_path, file_name) - with open(file_path, 'w') as f: - pass - - # adding columns/rows in the file - if file_name == 'timings.csv': - data = [ - ['frame_time', 'frame_number', 'primary_image', 'alternative_images', 'custom_pipeline', 'negative_prompt', 'guidance_scale', 'seed', 'num_inference_steps', - 'model_id', 'strength', 'notes', 'source_image', 'custom_models', 'adapter_type', 'clip_duration', 'interpolated_video', 'timed_clip', 'prompt', 'mask'], - ] - elif file_name == 'settings.csv': - data = [ - ['key', 'value'], - ['last_prompt', ''], - ['default_model', 'controlnet'], - ['last_strength', '0.5'], - ['last_custom_pipeline', 'None'], - ['audio', ''], - ['input_type', 'Video'], - ['input_video', ''], - ['extraction_type', 'Regular intervals'], - ['width', '704'], - ['height', '512'], - ['last_negative_prompt', '"nudity, boobs, breasts, naked, nsfw"'], - ['last_guidance_scale', '7.5'], - ['last_seed', '0'], - ['last_num_inference_steps', '100'], - ['last_which_stage_to_run_on', 'Current Main Variants'], - ['last_custom_models', '[]'], - ['last_adapter_type', 'normal'] - ] - elif file_name == 'app_settings.csv': - data = [ - ['key', 'value'], - ['replicate_com_api_key', ''], - ['aws_access_key_id', ''], - ['aws_secret_access_key', ''], - ['previous_project', ''], - ['replicate_username', ''], - ['welcome_state', '0'] - ] - elif file_name == 'log.csv': - data = [ - ['model_name', 'model_version', 'total_inference_time', 'input_params', 'created_on'], - ] - - - if len(data): - with open(file_path, 'w', newline='') as csv_file: - writer = csv.writer(csv_file) - writer.writerows(data) - -def copy_sample_assets(project_name): +def copy_sample_assets(project_uuid): import shutil # copy sample video source = "sample_assets/sample_videos/sample.mp4" - dest = "videos/" + project_name + "/assets/resources/input_videos/sample.mp4" + dest = "videos/" + project_uuid + "/assets/resources/input_videos/sample.mp4" shutil.copyfile(source, dest) -def create_working_assets(project_name): +def create_working_assets(project_uuid): if SERVER != ServerType.DEVELOPMENT.value: return new_project = True - if os.path.exists("videos/"+project_name): + if os.path.exists("videos/"+project_uuid): new_project = False directory_list = [ # project specific files - "videos/" + project_name, - "videos/" + project_name + "/assets", - "videos/" + project_name + "/assets/frames", - "videos/" + project_name + "/assets/frames/0_extracted", - "videos/" + project_name + "/assets/frames/1_selected", - "videos/" + project_name + "/assets/frames/2_character_pipeline_completed", - "videos/" + project_name + "/assets/frames/3_backdrop_pipeline_completed", - "videos/" + project_name + "/assets/resources", - "videos/" + project_name + "/assets/resources/backgrounds", - "videos/" + project_name + "/assets/resources/masks", - "videos/" + project_name + "/assets/resources/audio", - "videos/" + project_name + "/assets/resources/input_videos", - "videos/" + project_name + "/assets/resources/prompt_images", - "videos/" + project_name + "/assets/videos", - "videos/" + project_name + "/assets/videos/0_raw", - "videos/" + project_name + "/assets/videos/1_final", - "videos/" + project_name + "/assets/videos/2_completed", + "videos/" + project_uuid, + "videos/" + project_uuid + "/assets", + "videos/" + project_uuid + "/assets/frames", + "videos/" + project_uuid + "/assets/frames/0_extracted", + "videos/" + project_uuid + "/assets/frames/1_selected", + "videos/" + project_uuid + "/assets/frames/2_character_pipeline_completed", + "videos/" + project_uuid + "/assets/frames/3_backdrop_pipeline_completed", + "videos/" + project_uuid + "/assets/resources", + "videos/" + project_uuid + "/assets/resources/backgrounds", + "videos/" + project_uuid + "/assets/resources/masks", + "videos/" + project_uuid + "/assets/resources/audio", + "videos/" + project_uuid + "/assets/resources/input_videos", + "videos/" + project_uuid + "/assets/resources/prompt_images", + "videos/" + project_uuid + "/assets/videos", + "videos/" + project_uuid + "/assets/videos/0_raw", + "videos/" + project_uuid + "/assets/videos/1_final", + "videos/" + project_uuid + "/assets/videos/2_completed", # app data "inference_log", # temp folder @@ -128,16 +56,8 @@ def create_working_assets(project_name): # copying sample assets for new project if new_project: - copy_sample_assets(project_name) - - csv_file_list = [ - f'videos/{project_name}/settings.csv', - f'videos/{project_name}/timings.csv', - 'inference_log/log.csv' - ] + copy_sample_assets(project_uuid) - for csv_file in csv_file_list: - create_file_path(csv_file) # fresh_fetch - bypasses the cache def get_current_user(fresh_fetch=False) -> InternalUserObject: diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index 086445e7..bdb4b5e6 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -5,6 +5,7 @@ import numpy as np from shared.constants import AnimationStyleType from ui_components.methods.file_methods import generate_temp_file +from ui_components.models import InferenceLogObject from utils.data_repo.data_repo import DataRepo from utils.ml_processor.ml_interface import get_ml_client @@ -39,18 +40,17 @@ def create_interpolated_clip(img_location_list, animation_style, settings): animation_style = project_setting.default_animation_style if animation_style == AnimationStyleType.INTERPOLATION.value: - output_video_bytes = VideoInterpolator.video_through_frame_interpolation( + return VideoInterpolator.video_through_frame_interpolation( img_location_list, settings ) elif animation_style == AnimationStyleType.DIRECT_MORPHING.value: - output_video_bytes = VideoInterpolator.video_through_direct_morphing( + return VideoInterpolator.video_through_direct_morphing( img_location_list, settings ) - - return output_video_bytes + # returns a video bytes generated through interpolating frames between the given list of frames @staticmethod @@ -66,7 +66,7 @@ def video_through_frame_interpolation(img_location_list, settings): img2 = open(img2, "rb") ml_client = get_ml_client() - output = ml_client.predict_model_output(REPLICATE_MODEL.google_frame_interpolation, frame1=img1, frame2=img2, + output, log = ml_client.predict_model_output(REPLICATE_MODEL.google_frame_interpolation, frame1=img1, frame2=img2, times_to_interpolate=settings['interpolation_steps']) temp_output_file = generate_temp_file(output, '.mp4') @@ -76,7 +76,7 @@ def video_through_frame_interpolation(img_location_list, settings): os.remove(temp_output_file.name) - return video_bytes + return video_bytes, log @staticmethod def video_through_direct_morphing(img_location_list, settings): @@ -112,5 +112,5 @@ def load_image(image_path_or_url): video_bytes.append(frame_bytes.tobytes()) video_data = b''.join(video_bytes) - return video_data + return video_data, InferenceLogObject({}) # returning None for inference log diff --git a/utils/ml_processor/replicate/replicate.py b/utils/ml_processor/replicate/replicate.py index f1105312..f5756536 100644 --- a/utils/ml_processor/replicate/replicate.py +++ b/utils/ml_processor/replicate/replicate.py @@ -57,10 +57,10 @@ def predict_model_output(self, model: ReplicateModel, **kwargs): start_time = time.time() output = model_version.predict(**kwargs) end_time = time.time() - log_model_inference(model, end_time - start_time, **kwargs) + log = log_model_inference(model, end_time - start_time, **kwargs) self._update_usage_credits(end_time - start_time) - return output + return output, log @check_user_credits def inpainting(self, video_name, input_image, prompt, negative_prompt): @@ -75,10 +75,10 @@ def inpainting(self, video_name, input_image, prompt, negative_prompt): start_time = time.time() output = model.predict(mask=mask, image=input_image,prompt=prompt, invert_mask=True, negative_prompt=negative_prompt,num_inference_steps=25) end_time = time.time() - log_model_inference(model, end_time - start_time, prompt=prompt, invert_mask=True, negative_prompt=negative_prompt,num_inference_steps=25) + log = log_model_inference(model, end_time - start_time, prompt=prompt, invert_mask=True, negative_prompt=negative_prompt,num_inference_steps=25) self._update_usage_credits(end_time - start_time) - return output[0] + return output[0], log # TODO: separate image compression from this function @check_user_credits @@ -182,20 +182,6 @@ def dreambooth_training(self, training_file_url, instance_prompt, \ return response - @check_user_credits - def remove_background(self, project_name, input_image): - if not input_image.startswith("http"): - input_image = open(input_image, "rb") - - model = self.get_model(REPLICATE_MODEL.pollination_modnet) - start_time = time.time() - output = model.predict(image=input_image) - end_time = time.time() - log_model_inference(model, end_time - start_time, image=input_image) - self._update_usage_credits(end_time - start_time) - - return output - def get_model_version_from_id(self, model_id): api_key = os.environ.get("REPLICATE_API_TOKEN") headers = {"Authorization": f"Token {api_key}"} From 77f33fa5d920e85f079638f7a809a72eb5dc7904 Mon Sep 17 00:00:00 2001 From: peter942 Date: Fri, 15 Sep 2023 17:17:34 +0200 Subject: [PATCH 009/164] Rearranging interpolation section --- shared/constants.py | 1 + .../components/frame_styling_page.py | 100 +++++------------- ui_components/setup.py | 2 +- .../widgets/animation_style_element.py | 91 ++++++++++++++++ .../widgets/compare_to_other_variants.py | 79 ++++++++++++++ 5 files changed, 200 insertions(+), 73 deletions(-) create mode 100644 ui_components/widgets/animation_style_element.py create mode 100644 ui_components/widgets/compare_to_other_variants.py diff --git a/shared/constants.py b/shared/constants.py index 34b998ad..21db07b7 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -63,6 +63,7 @@ class InternalFileTag(ExtendedEnum): class AnimationStyleType(ExtendedEnum): INTERPOLATION = "Interpolation" DIRECT_MORPHING = "Direct Morphing" + IMAGE_TO_VIDEO = "Image to Video" ##################### global constants ##################### diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 53a3bf70..b6f1a14a 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -16,6 +16,8 @@ from ui_components.widgets.image_carousal import display_image from ui_components.widgets.prompt_finder import prompt_finder_element from ui_components.widgets.styling_element import styling_element +from ui_components.widgets.compare_to_other_variants import compare_to_other_variants +from ui_components.widgets.animation_style_element import animation_style_element from streamlit_option_menu import option_menu from utils import st_memory @@ -77,83 +79,43 @@ def frame_styling_page(mainheader2, project_uuid: str): if st.session_state['page'] == "Motion": idx = st.session_state['current_frame_index'] - 1 - timing1, timing2, timing3 = st.columns([0.5, 1,1]) - - with timing1: - update_animation_style_element(st.session_state['current_frame_uuid'], horizontal=False) - with timing2: - current_individual_clip_element(st.session_state['current_frame_uuid']) - - with timing3: - current_preview_video_element(st.session_state['current_frame_uuid']) + motion_sections = ["Other Variants", "Preview Video in Context"] + + st.session_state['show_comparison'] = st_memory.radio("Show:", options=motion_sections, horizontal=True, project_settings=project_settings, key="show_comparison_radio_motion") - elif st.session_state['page'] == "Styling": - # carousal_of_images_element(project_uuid, stage=WorkflowStageType.STYLED.value) - comparison_values = [ - "Other Variants", "Source Frame", "Previous & Next Frame", "None"] + if st.session_state['show_comparison'] == "Other Variants": + compare_to_other_variants(timing_details, project_uuid, data_repo,stage="Motion") - st.session_state['show_comparison'] = st_memory.radio( - "Show comparison to:", options=comparison_values, horizontal=True, project_settings=project_settings, key="show_comparison_radio") + elif st.session_state['show_comparison'] == "Preview Video in Context": + current_preview_video_element(st.session_state['current_frame_uuid']) + + update_animation_style_element(st.session_state['current_frame_uuid'], horizontal=False) - timing = data_repo.get_timing_from_uuid( - st.session_state['current_frame_uuid']) - variants = timing.alternative_images_list + st.markdown("***") - if st.session_state['show_comparison'] == "Other Variants": - mainimages1, mainimages2 = st.columns([1, 1]) - aboveimage1, aboveimage2, aboveimage3 = st.columns([1, 0.25, 0.75]) + with st.expander("🎬 Choose Animation Style & Create Variants", expanded=True): - with aboveimage1: - st.info( - f"Current variant = {timing_details[st.session_state['current_frame_index'] - 1].primary_variant_index + 1}") + animation_style_element(st.session_state['current_frame_uuid'], project_settings) - with aboveimage2: - show_more_than_10_variants = st.checkbox( - "Show >10 variants", key="show_more_than_10_variants") + - with aboveimage3: - number_of_variants = len(variants) + - if show_more_than_10_variants is True: - current_variant = int( - timing_details[st.session_state['current_frame_index'] - 1].primary_variant_index) - which_variant = st.radio(f'Main variant = {current_variant + 1}', range(1, - number_of_variants + 1), index=number_of_variants-1, horizontal=True, key=f"Main variant for {st.session_state['current_frame_index']}") - else: - last_ten_variants = range( - max(1, number_of_variants - 10), number_of_variants + 1) - current_variant = int( - timing_details[st.session_state['current_frame_index'] - 1].primary_variant_index) - which_variant = st.radio(f'Main variant = {current_variant + 1}', last_ten_variants, index=len( - last_ten_variants)-1, horizontal=True, key=f"Main variant for {st.session_state['current_frame_index']}") + - with mainimages1: - project_settings = data_repo.get_project_setting(project_uuid) - st.success("**Main variant**") - if len(timing_details[st.session_state['current_frame_index'] - 1].alternative_images_list): - st.image(timing_details[st.session_state['current_frame_index'] - 1].primary_image_location, - use_column_width=True) - else: - st.error("No variants found for this frame") + elif st.session_state['page'] == "Styling": + # carousal_of_images_element(project_uuid, stage=WorkflowStageType.STYLED.value) + comparison_values = [ + "Other Variants", "Source Frame", "Previous & Next Frame", "None"] + + st.session_state['show_comparison'] = st_memory.radio("Show comparison to:", options=comparison_values, horizontal=True, project_settings=project_settings, key="show_comparison_radio") - with mainimages2: - if len(timing_details[st.session_state['current_frame_index'] - 1].alternative_images_list): - if which_variant - 1 == current_variant: - st.success("**Main variant**") - else: - st.info(f"**Variant #{which_variant}**") - - st.image(variants[which_variant- 1].location, - use_column_width=True) - - if which_variant- 1 != current_variant: - if st.button(f"Promote Variant #{which_variant}", key=f"Promote Variant #{which_variant} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image"): - promote_image_variant( - st.session_state['current_frame_uuid'], which_variant - 1) - time.sleep(0.5) - st.experimental_rerun() + + if st.session_state['show_comparison'] == "Other Variants": + compare_to_other_variants(timing_details, project_uuid, data_repo,stage="Styling") + elif st.session_state['show_comparison'] == "Source Frame": if timing_details[st.session_state['current_frame_index']- 1].primary_image: img2 = timing_details[st.session_state['current_frame_index'] - 1].primary_image_location @@ -553,13 +515,7 @@ def frame_styling_page(mainheader2, project_uuid: str): if apply_zoom_effects == "Yes": image_preview = generate_pil_image(selected_image_location) selected_image = apply_image_transformations(image_preview, st.session_state['zoom_level_input'], st.session_state['rotation_angle_input'], st.session_state['x_shift'], st.session_state['y_shift']) - # project_update_data = { - # "zoom_level": st.session_state['zoom_level_input'], - # "rotation_angle_value": st.session_state['rotation_angle_input'], - # "x_shift": st.session_state['x_shift'], - # "y_shift": st.session_state['y_shift'] - # } - # data_repo.update_project_setting(project_uuid, **project_update_data) + else: selected_image = generate_pil_image(selected_image_location) st.info("Starting Image:") diff --git a/ui_components/setup.py b/ui_components/setup.py index 54285593..b37889da 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -29,7 +29,7 @@ def setup_app_ui(): h1, h2 = st.columns([1, 3]) with h1: - st.markdown("# :red[Ba]:green[no]:orange[do]:blue[co]") + st.markdown("# :red[ba]:green[no]:orange[do]:blue[co]") sections = ["Open Project", "App Settings", "New Project"] diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py new file mode 100644 index 00000000..751adbf3 --- /dev/null +++ b/ui_components/widgets/animation_style_element.py @@ -0,0 +1,91 @@ +import json +import streamlit as st +import uuid +from typing import List +from utils.data_repo.data_repo import DataRepo + + +def animation_style_element(current_frame_uuid, project_settings): + motion_modules = ["mm-v15-v2", "AD_Stabilized_Motion","TemporalDiff"] + data_repo = DataRepo() + current_animation_style = data_repo.get_timing_from_uuid(current_frame_uuid).animation_style + + if current_animation_style == "Interpolation": + animation_tool = st.radio("Animation Tool:", options=['Animatediff', 'Google FiLM'], key="animation_tool", horizontal=True) + video_resolution = st.radio("Video Resolution:", options=["Preview Resolution", "Full Resolution"], key="video_resolution", horizontal=True) + + if animation_tool == "Animatediff": + which_motion_module = st.selectbox("Which motion module would you like to use?", options=motion_modules, key="which_motion_module") + prompt_column_1, prompt_column_2 = st.columns([1, 1]) + + with prompt_column_1: + starting_prompt = st.text_area("Starting Prompt:", value=project_settings.default_prompt, key="starting_prompt") + + with prompt_column_2: + ending_prompt = st.text_area("Ending Prompt:", value=project_settings.default_prompt, key="ending_prompt") + + animate_col_1, animate_col_2 = st.columns([1, 3]) + + with animate_col_1: + how_many_variants = st.number_input("How many variants?", min_value=1, max_value=100, value=1, step=1, key="how_many_variants") + + normalise_speed = st.checkbox("Normalise Speed", value=True, key="normalise_speed") + + if st.button("Generate Animation Clip", key="generate_animation_clip"): + for i in range(0, how_many_variants=0): + st.write("Generating animation clip...") + time.sleep(2) + st.write("Lol, jk, this isn't done yet") + time.sleep(2) + st.experimental_rerun() + + elif current_animation_style == "Image to Video": + st.info("For image to video, you can select one or more prompts, and how many frames you want to generate for each prompt - it'll attempt to travel from one prompt to the next.") + which_motion_module = st.selectbox("Which motion module would you like to use?", options=motion_modules, key="which_motion_module") + + # Initialize the list of dictionaries if it doesn't exist + if 'travel_list' not in st.session_state: + st.session_state['travel_list'] = [] + + st.markdown("### Add to Prompt Travel List") + prompt = st.text_area("Prompt") + frame_count = st.number_input("How many frames would you like?", min_value=1, value=1, step=1) + + if st.button("Add to travel"): + st.session_state['travel_list'].append({'prompt': prompt, 'frame_count': frame_count}) + + st.markdown("***") + st.markdown("### Travel List") + + # Display each item in the list + if not st.session_state['travel_list']: + st.error("The travel list is empty.") + else: + for i, item in enumerate(st.session_state['travel_list']): + new_prompt = st.text_area(f"Prompt {i+1}", value=item['prompt']) + bottom1, bottom2,bottom3 = st.columns([1, 2,1]) + with bottom1: + new_frame_count = st.number_input(f"Frame Count {i+1}", min_value=1, value=item['frame_count'], step=1) + with bottom3: + if st.button(f"Delete Prompt {i+1}"): + del st.session_state['travel_list'][i] + st.experimental_rerun() + # Update the item if it has been edited + if new_prompt != item['prompt'] or new_frame_count != item['frame_count']: + st.session_state['travel_list'][i] = {'prompt': new_prompt, 'frame_count': new_frame_count} + st.markdown("***") + + animate_col_1, animate_col_2 = st.columns([1, 3]) + + with animate_col_1: + how_many_variants = st.number_input("How many variants?", min_value=1, max_value=100, value=1, step=1, key="how_many_variants") + + if st.button("Generate Animation Clip", key="generate_animation_clip"): + for i in range(0, how_many_variants=0): + st.write("Generating animation clip...") + time.sleep(2) + st.write("Lol, jk, this isn't done yet") + time.sleep(2) + st.experimental_rerun() + else: + st.error("No animation style selected") \ No newline at end of file diff --git a/ui_components/widgets/compare_to_other_variants.py b/ui_components/widgets/compare_to_other_variants.py new file mode 100644 index 00000000..cf36b0ac --- /dev/null +++ b/ui_components/widgets/compare_to_other_variants.py @@ -0,0 +1,79 @@ +import json +import streamlit as st +import uuid +from typing import List +from utils.data_repo.data_repo import DataRepo + + +def compare_to_other_variants(timing_details, project_uuid, data_repo, stage="Motion"): + + main_video = 'https://www.youtube.com/watch?v=dQw4w9WgXcQ' + alternative_videos = ['https://www.youtube.com/watch?v=kib6uXQsxBA','https://www.youtube.com/watch?v=ehWD5kG4xws','https://www.youtube.com/watch?v=zkTf0LmDqKI'] + primary_video_variant_index = 0 + + timing = data_repo.get_timing_from_uuid( + st.session_state['current_frame_uuid']) + variants = timing.alternative_images_list + mainimages1, mainimages2 = st.columns([1, 1]) + aboveimage1, aboveimage2, aboveimage3 = st.columns([1, 0.25, 0.75]) + + which_variant = None + + with aboveimage1: + st.info(f"Current variant = {timing_details[st.session_state['current_frame_index'] - 1].primary_variant_index + 1}") + + with aboveimage2: + show_more_than_10_variants = st.checkbox("Show >10 variants", key="show_more_than_10_variants") + + with aboveimage3: + number_of_variants = len(alternative_videos) if stage == "Motion" else len(variants) + + if show_more_than_10_variants is True: + current_variant = primary_video_variant_index if stage == "Motion" else int( + timing_details[st.session_state['current_frame_index'] - 1].primary_variant_index) + which_variant = st.radio(f'Main variant = {current_variant + 1}', range(1, + number_of_variants + 1), index=number_of_variants-1, horizontal=True, key=f"Main variant for {st.session_state['current_frame_index']}") + else: + last_ten_variants = range( + max(1, number_of_variants - 10), number_of_variants + 1) + current_variant = primary_video_variant_index if stage == "Motion" else int( + timing_details[st.session_state['current_frame_index'] - 1].primary_variant_index) + which_variant = st.radio(f'Main variant = {current_variant + 1}', last_ten_variants, index=len( + last_ten_variants)-1, horizontal=True, key=f"Main variant for {st.session_state['current_frame_index']}") + + with mainimages1: + project_settings = data_repo.get_project_setting(project_uuid) + st.success("**Main variant**") + if stage == "Motion": + st.video(main_video, format='mp4', start_time=0) + else: + if len(timing_details[st.session_state['current_frame_index'] - 1].alternative_images_list): + st.image(timing_details[st.session_state['current_frame_index'] - 1].primary_image_location, + use_column_width=True) + else: + st.error("No variants found for this frame") + + with mainimages2: + if stage == "Motion": + if which_variant - 1 == current_variant: + st.success("**Main variant**") + else: + st.info(f"**Variant #{which_variant}**") + + st.video(alternative_videos[which_variant- 1], format='mp4', start_time=0) + else: + if len(timing_details[st.session_state['current_frame_index'] - 1].alternative_images_list): + if which_variant - 1 == current_variant: + st.success("**Main variant**") + else: + st.info(f"**Variant #{which_variant}**") + + st.image(variants[which_variant- 1].location, + use_column_width=True) + + if which_variant- 1 != current_variant: + if st.button(f"Promote Variant #{which_variant}", key=f"Promote Variant #{which_variant} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image"): + promote_image_variant( + st.session_state['current_frame_uuid'], which_variant - 1) + time.sleep(0.5) + st.experimental_rerun() \ No newline at end of file From 5bdf0dedbf65a0a3824493acede6cde9c6591049 Mon Sep 17 00:00:00 2001 From: peter942 Date: Sat, 16 Sep 2023 01:45:16 +0200 Subject: [PATCH 010/164] Moving around stuff --- .../components/frame_styling_page.py | 69 ++----------------- ui_components/methods/common_methods.py | 53 ++++++++++++++ .../widgets/animation_style_element.py | 7 +- 3 files changed, 60 insertions(+), 69 deletions(-) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index b6f1a14a..4efc44a9 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -5,7 +5,7 @@ from ui_components.methods.common_methods import delete_frame, drawing_mode, promote_image_variant, save_uploaded_image, \ create_timings_row_at_frame_number, move_frame, calculate_desired_duration_of_individual_clip, \ calculate_desired_duration_of_individual_clip, apply_image_transformations, \ - ai_frame_editing_element, clone_styling_settings, zoom_inputs + ai_frame_editing_element, clone_styling_settings, zoom_inputs,add_key_frame from ui_components.methods.file_methods import generate_pil_image, save_or_host_file from ui_components.methods.ml_methods import trigger_restyling_process from ui_components.methods.video_methods import create_or_get_single_preview_video @@ -78,11 +78,10 @@ def frame_styling_page(mainheader2, project_uuid: str): frame_selector_widget() if st.session_state['page'] == "Motion": + idx = st.session_state['current_frame_index'] - 1 - - motion_sections = ["Other Variants", "Preview Video in Context"] - - st.session_state['show_comparison'] = st_memory.radio("Show:", options=motion_sections, horizontal=True, project_settings=project_settings, key="show_comparison_radio_motion") + + st.session_state['show_comparison'] = st_memory.radio("Show:", options=["Other Variants", "Preview Video in Context"], horizontal=True, project_settings=project_settings, key="show_comparison_radio_motion") if st.session_state['show_comparison'] == "Other Variants": compare_to_other_variants(timing_details, project_uuid, data_repo,stage="Motion") @@ -98,11 +97,6 @@ def frame_styling_page(mainheader2, project_uuid: str): animation_style_element(st.session_state['current_frame_uuid'], project_settings) - - - - - elif st.session_state['page'] == "Styling": # carousal_of_images_element(project_uuid, stage=WorkflowStageType.STYLED.value) @@ -110,7 +104,6 @@ def frame_styling_page(mainheader2, project_uuid: str): "Other Variants", "Source Frame", "Previous & Next Frame", "None"] st.session_state['show_comparison'] = st_memory.radio("Show comparison to:", options=comparison_values, horizontal=True, project_settings=project_settings, key="show_comparison_radio") - if st.session_state['show_comparison'] == "Other Variants": @@ -127,6 +120,7 @@ def frame_styling_page(mainheader2, project_uuid: str): image_comparison(starting_position=50, img1=img1, img2=img2, make_responsive=False, label1=WorkflowStageType.SOURCE.value, label2=WorkflowStageType.STYLED.value) + elif st.session_state['show_comparison'] == "Previous & Next Frame": mainimages1, mainimages2, mainimages3 = st.columns([1, 1, 1]) @@ -525,59 +519,6 @@ def frame_styling_page(mainheader2, project_uuid: str): if st.button(f"Add key frame",type="primary",use_container_width=True): - def add_key_frame(selected_image, inherit_styling_settings, how_long_after): - data_repo = DataRepo() - project_uuid = st.session_state['project_uuid'] - timing_details = data_repo.get_timing_list_from_project(project_uuid) - project_settings = data_repo.get_project_setting(project_uuid) - - - if len(timing_details) == 0: - index_of_current_item = 1 - else: - index_of_current_item = min(len(timing_details), st.session_state['current_frame_index']) - - timing_details = data_repo.get_timing_list_from_project(project_uuid) - - if len(timing_details) == 0: - key_frame_time = 0.0 - elif index_of_current_item == len(timing_details): - key_frame_time = float(timing_details[index_of_current_item - 1].frame_time) + how_long_after - else: - key_frame_time = (float(timing_details[index_of_current_item - 1].frame_time) + float( - timing_details[index_of_current_item].frame_time)) / 2.0 - - if len(timing_details) == 0: - new_timing = create_timings_row_at_frame_number(project_uuid, 0) - else: - new_timing = create_timings_row_at_frame_number(project_uuid, index_of_current_item, frame_time=key_frame_time) - - clip_duration = calculate_desired_duration_of_individual_clip(new_timing.uuid) - data_repo.update_specific_timing(new_timing.uuid, clip_duration=clip_duration) - - timing_details = data_repo.get_timing_list_from_project(project_uuid) - if selected_image: - save_uploaded_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, "source") - save_uploaded_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, "styled") - - if inherit_styling_settings == "Yes": - index = which_stage_for_starting_image or index_of_current_item - clone_styling_settings(index - 1, timing_details[index_of_current_item].uuid) - - data_repo.update_specific_timing(timing_details[index_of_current_item].uuid, \ - animation_style=project_settings.default_animation_style) - - if len(timing_details) == 1: - st.session_state['current_frame_index'] = 1 - st.session_state['current_frame_uuid'] = timing_details[0].uuid - else: - st.session_state['current_frame_index'] = min(len(timing_details), st.session_state['current_frame_index']) - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid - - st.session_state['page'] = "Styling" - st.session_state['section_index'] = 0 - st.experimental_rerun() - add_key_frame(selected_image, inherit_styling_settings, how_long_after) st.experimental_rerun() diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 1fc8554f..b60c063a 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -31,6 +31,59 @@ from streamlit_image_comparison import image_comparison +def add_key_frame(selected_image, inherit_styling_settings, how_long_after): + data_repo = DataRepo() + project_uuid = st.session_state['project_uuid'] + timing_details = data_repo.get_timing_list_from_project(project_uuid) + project_settings = data_repo.get_project_setting(project_uuid) + + + if len(timing_details) == 0: + index_of_current_item = 1 + else: + index_of_current_item = min(len(timing_details), st.session_state['current_frame_index']) + + timing_details = data_repo.get_timing_list_from_project(project_uuid) + + if len(timing_details) == 0: + key_frame_time = 0.0 + elif index_of_current_item == len(timing_details): + key_frame_time = float(timing_details[index_of_current_item - 1].frame_time) + how_long_after + else: + key_frame_time = (float(timing_details[index_of_current_item - 1].frame_time) + float( + timing_details[index_of_current_item].frame_time)) / 2.0 + + if len(timing_details) == 0: + new_timing = create_timings_row_at_frame_number(project_uuid, 0) + else: + new_timing = create_timings_row_at_frame_number(project_uuid, index_of_current_item, frame_time=key_frame_time) + + clip_duration = calculate_desired_duration_of_individual_clip(new_timing.uuid) + data_repo.update_specific_timing(new_timing.uuid, clip_duration=clip_duration) + + timing_details = data_repo.get_timing_list_from_project(project_uuid) + if selected_image: + save_uploaded_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, "source") + save_uploaded_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, "styled") + + if inherit_styling_settings == "Yes": + index = which_stage_for_starting_image or index_of_current_item + clone_styling_settings(index - 1, timing_details[index_of_current_item].uuid) + + data_repo.update_specific_timing(timing_details[index_of_current_item].uuid, \ + animation_style=project_settings.default_animation_style) + + if len(timing_details) == 1: + st.session_state['current_frame_index'] = 1 + st.session_state['current_frame_uuid'] = timing_details[0].uuid + else: + st.session_state['current_frame_index'] = min(len(timing_details), st.session_state['current_frame_index']) + st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid + + st.session_state['page'] = "Styling" + st.session_state['section_index'] = 0 + st.experimental_rerun() + def clone_styling_settings(source_frame_number, target_frame_uuid): data_repo = DataRepo() target_timing = data_repo.get_timing_from_uuid(target_frame_uuid) diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 751adbf3..3c53705b 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -1,10 +1,7 @@ -import json import streamlit as st -import uuid from typing import List from utils.data_repo.data_repo import DataRepo - def animation_style_element(current_frame_uuid, project_settings): motion_modules = ["mm-v15-v2", "AD_Stabilized_Motion","TemporalDiff"] data_repo = DataRepo() @@ -32,7 +29,7 @@ def animation_style_element(current_frame_uuid, project_settings): normalise_speed = st.checkbox("Normalise Speed", value=True, key="normalise_speed") if st.button("Generate Animation Clip", key="generate_animation_clip"): - for i in range(0, how_many_variants=0): + for _ in range(how_many_variants): st.write("Generating animation clip...") time.sleep(2) st.write("Lol, jk, this isn't done yet") @@ -81,7 +78,7 @@ def animation_style_element(current_frame_uuid, project_settings): how_many_variants = st.number_input("How many variants?", min_value=1, max_value=100, value=1, step=1, key="how_many_variants") if st.button("Generate Animation Clip", key="generate_animation_clip"): - for i in range(0, how_many_variants=0): + for _ in range(how_many_variants): st.write("Generating animation clip...") time.sleep(2) st.write("Lol, jk, this isn't done yet") From e3994face7e3bcc432a1d1e92c8afae6dba3902b Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 16 Sep 2023 13:53:13 +0530 Subject: [PATCH 011/164] interpolated clip list added --- backend/db_repo.py | 41 ++++++++++++++----- .../0008_interpolated_clip_list_added.py | 22 ++++++++++ backend/models.py | 7 +++- backend/serializers/dao.py | 7 ++-- backend/serializers/dto.py | 12 +++++- .../components/frame_styling_page.py | 3 +- ui_components/methods/common_methods.py | 10 ++--- ui_components/methods/file_methods.py | 3 +- ui_components/methods/video_methods.py | 23 ++++++----- ui_components/models.py | 33 +++++++-------- .../widgets/frame_clip_generation_elements.py | 4 +- utils/data_repo/data_repo.py | 6 ++- 12 files changed, 116 insertions(+), 55 deletions(-) create mode 100644 backend/migrations/0008_interpolated_clip_list_added.py diff --git a/backend/db_repo.py b/backend/db_repo.py index 475de8fb..f8b82e11 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -865,13 +865,13 @@ def create_timing(self, **kwargs): attributes._data['source_image_id'] = source_image.id - if 'interpolated_clip_id' in attributes.data: - if attributes.data['interpolated_clip_id'] != None: - interpolated_clip: InternalFileObject = InternalFileObject.objects.filter(uuid=attributes.data['interpolated_clip_id'], is_disabled=False).first() + if 'interpolated_clip_list' in attributes.data and attributes.data['interpolated_clip_list'] != None: + for clip_uuid in attributes.data['interpolated_clip_list']: + interpolated_clip: InternalFileObject = InternalFileObject.objects.filter(uuid=clip_uuid, is_disabled=False).first() if not interpolated_clip: return InternalResponse({}, 'invalid interpolated clip uuid', False) - attributes._data['interpolated_clip_id'] = interpolated_clip.id + attributes._data['interpolated_clip_list'] = list(set(attributes._data['interpolated_clip_list'])) if 'timed_clip_id' in attributes.data: @@ -941,6 +941,21 @@ def remove_existing_timing(self, project_uuid): return InternalResponse({}, 'timing removed successfully', True) + def add_interpolated_clip(self, uuid, **kwargs): + timing = Timing.objects.filter(uuid=uuid, is_disabled=False).first() + if not timing: + return InternalResponse({}, 'invalid timing uuid', False) + + if 'interpolated_clip_id' in kwargs and kwargs['interpolated_clip_id'] != None: + interpolated_clip: InternalFileObject = InternalFileObject.objects.filter(uuid=kwargs['interpolated_clip_id'], is_disabled=False).first() + if not interpolated_clip: + return InternalResponse({}, 'invalid interpolated clip uuid', False) + + timing.add_interpolated_clip_list([interpolated_clip.uuid.hex]) + timing.save() + + return InternalResponse({}, 'success', True) + # TODO: add dao in this method def update_specific_timing(self, uuid, **kwargs): # DBRepo._count += 1 @@ -976,13 +991,15 @@ def update_specific_timing(self, uuid, **kwargs): kwargs['source_image_id'] = source_image.id - if 'interpolated_clip_id' in kwargs: - if kwargs['interpolated_clip_id'] != None: - interpolated_clip: InternalFileObject = InternalFileObject.objects.filter(uuid=kwargs['interpolated_clip_id'], is_disabled=False).first() + if 'interpolated_clip_list' in kwargs and kwargs['interpolated_clip_list'] != None: + cur_list = [] + for clip_uuid in kwargs['interpolated_clip_list']: + interpolated_clip: InternalFileObject = InternalFileObject.objects.filter(uuid=clip_uuid, is_disabled=False).first() if not interpolated_clip: return InternalResponse({}, 'invalid interpolated clip uuid', False) - kwargs['interpolated_clip_id'] = interpolated_clip.id + cur_list.append(interpolated_clip.uuid) + kwargs['interpolated_clip_list'] = list(set(kwargs['interpolated_clip_list'])) if 'timed_clip_id' in kwargs: @@ -1411,8 +1428,8 @@ def create_backup(self, project_uuid, backup_name): if timing.source_image: file_uuid_list.add(timing.source_image.uuid) - if timing.interpolated_clip: - file_uuid_list.add(timing.interpolated_clip.uuid) + if timing.interpolated_clip_list: + file_uuid_list.extend(json.loads(timing.interpolated_clip_list)) if timing.timed_clip: file_uuid_list.add(timing.timed_clip.uuid) @@ -1453,6 +1470,7 @@ def create_backup(self, project_uuid, backup_name): timing['source_image_uuid'] = str(id_file_dict[timing['source_image_id']].uuid) if timing['source_image_id'] else None del timing['source_image_id'] + # TODO: fix this code using interpolated_clip_list timing['interpolated_clip_uuid'] = str(id_file_dict[timing['interpolated_clip_id']].uuid) if timing['interpolated_clip_id'] else None del timing['interpolated_clip_id'] @@ -1552,11 +1570,12 @@ def restore_backup(self, backup_uuid: str): if len(matching_timing_list): backup_timing = matching_timing_list[0] + # TODO: fix this code using interpolated_clip_list self.update_specific_timing( timing.uuid, model_uuid=backup_timing['model_uuid'], source_image_uuid=backup_timing['source_image_uuid'], - interpolated_clip=backup_timing['interpolated_clip_uuid'], + interpolated_clip_list=backup_timing['interpolated_clip_list'], timed_clip=backup_timing['timed_clip_uuid'], mask=backup_timing['mask_uuid'], canny_image=backup_timing['canny_image_uuid'], diff --git a/backend/migrations/0008_interpolated_clip_list_added.py b/backend/migrations/0008_interpolated_clip_list_added.py new file mode 100644 index 00000000..cbf8ad59 --- /dev/null +++ b/backend/migrations/0008_interpolated_clip_list_added.py @@ -0,0 +1,22 @@ +# Generated by Django 4.2.1 on 2023-09-16 03:09 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('backend', '0007_log_mapped_to_file'), + ] + + operations = [ + migrations.RemoveField( + model_name='timing', + name='interpolated_clip', + ), + migrations.AddField( + model_name='timing', + name='interpolated_clip_list', + field=models.TextField(default=None, null=True), + ), + ] diff --git a/backend/models.py b/backend/models.py index fce84993..b319f01e 100644 --- a/backend/models.py +++ b/backend/models.py @@ -140,7 +140,7 @@ class Timing(BaseModel): project = models.ForeignKey(Project, on_delete=models.CASCADE, null=True) model = models.ForeignKey(AIModel, on_delete=models.DO_NOTHING, null=True) source_image = models.ForeignKey(InternalFileObject, related_name="source_image", on_delete=models.DO_NOTHING, null=True) - interpolated_clip = models.ForeignKey(InternalFileObject, related_name="interpolated_clip", on_delete=models.DO_NOTHING, null=True) + interpolated_clip_list = models.TextField(default=None, null=True) timed_clip = models.ForeignKey(InternalFileObject, related_name="timed_clip", on_delete=models.DO_NOTHING, null=True) mask = models.ForeignKey(InternalFileObject, related_name="mask", on_delete=models.DO_NOTHING, null=True) canny_image = models.ForeignKey(InternalFileObject, related_name="canny_image", on_delete=models.DO_NOTHING, null=True) @@ -236,6 +236,11 @@ def save(self, *args, **kwargs): super().save(*args, **kwargs) + def add_interpolated_clip_list(self, clip_uuid_list): + cur_list = json.loads(self.interpolated_clip_list) if self.interpolated_clip_list else [] + cur_list.extend(clip_uuid_list) + cur_list = list(set(cur_list)) + self.interpolated_clip_list = json.dumps(cur_list) @property def alternative_images_list(self): diff --git a/backend/serializers/dao.py b/backend/serializers/dao.py index fcf1619c..ccd42c44 100644 --- a/backend/serializers/dao.py +++ b/backend/serializers/dao.py @@ -15,8 +15,9 @@ class CreateFileDao(serializers.Serializer): type = serializers.ChoiceField(choices=InternalFileType.value_list()) local_path = serializers.CharField(max_length=512, required=False) hosted_url = serializers.CharField(max_length=512, required=False) - tag = serializers.CharField(max_length=100, required=False) + tag = serializers.CharField(max_length=100, allow_blank=True, required=False) project_id = serializers.CharField(max_length=100, required=False) + inference_log_id = serializers.CharField(max_length=100, allow_null=True, required=False) def validate(self, data): local_path = data.get('local_path') @@ -60,7 +61,7 @@ class UpdateAIModelDao(serializers.Serializer): class CreateInferenceLogDao(serializers.Serializer): project_id = serializers.CharField(max_length=100, required=False) - model_id = serializers.CharField(max_length=100, required=False) + model_id = serializers.CharField(max_length=100, allow_null=True, required=False) input_params = serializers.CharField(required=False) output_details = serializers.CharField(required=False) total_inference_time = serializers.CharField(required=False) @@ -76,7 +77,7 @@ class CreateTimingDao(serializers.Serializer): project_id = serializers.CharField(max_length=100) model_id = serializers.CharField(max_length=100, required=False) source_image_id = serializers.CharField(max_length=100, required=False) - interpolated_clip_id = serializers.CharField(max_length=100, required=False) + interpolated_clip_list = serializers.CharField(max_length=None, required=False) timed_clip_id = serializers.CharField(max_length=100, required=False) mask_id = serializers.CharField(max_length=100, required=False) canny_image_id = serializers.CharField(max_length=100, required=False) diff --git a/backend/serializers/dto.py b/backend/serializers/dto.py index be7a4f1d..df78cc09 100644 --- a/backend/serializers/dto.py +++ b/backend/serializers/dto.py @@ -1,3 +1,4 @@ +import json from rest_framework import serializers from backend.models import AIModel, AppSetting, BackupTiming, InferenceLog, InternalFileObject, Project, Setting, Timing, User @@ -73,7 +74,7 @@ class TimingDto(serializers.ModelSerializer): project = ProjectDto() model = AIModelDto() source_image = InternalFileDto() - interpolated_clip = InternalFileDto() + interpolated_clip_list = serializers.SerializerMethodField() timed_clip = InternalFileDto() mask = InternalFileDto() canny_image = InternalFileDto() @@ -87,7 +88,7 @@ class Meta: "project", "model", "source_image", - "interpolated_clip", + "interpolated_clip_list", "timed_clip", "mask", "canny_image", @@ -116,6 +117,13 @@ class Meta: "transformation_stage" ) + def get_interpolated_clip_list(self, obj): + res = [] + id_list = json.loads(obj.interpolated_clip_list) if obj.interpolated_clip_list else [] + file_list = InternalFileObject.objects.filter(uuid__in=id_list, is_disabled=False).all() + return [InternalFileDto(file).data for file in file_list] + + class AppSettingDto(serializers.ModelSerializer): user = UserDto() diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 53a3bf70..6bd0f741 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -46,11 +46,10 @@ def frame_styling_page(mainheader2, project_uuid: str): st.session_state['transformation_stage'] = project_settings.default_stage st.session_state['show_comparison'] = "Don't show" - ''' if "current_frame_uuid" not in st.session_state: timing = data_repo.get_timing_list_from_project(project_uuid)[0] st.session_state['current_frame_uuid'] = timing.uuid - ''' + if 'frame_styling_view_type' not in st.session_state: st.session_state['frame_styling_view_type'] = "Individual View" diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 2c400ad7..b36e7222 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -864,9 +864,9 @@ def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): prev_timing = data_repo.get_prev_timing(timing_uuid) if prev_timing: data_repo.update_specific_timing( - prev_timing.uuid, interpolated_clip_id=None) + prev_timing.uuid, interpolated_clip_list=None) data_repo.update_specific_timing( - timing_uuid, interpolated_clip_id=None) + timing_uuid, interpolated_clip_list=None) timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( timing.project.uuid) @@ -875,7 +875,7 @@ def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): # DOUBT: setting last interpolated_video to empty? if frame_idx < len(timing_details): data_repo.update_specific_timing( - timing.uuid, interpolated_clip_id=None) + timing.uuid, interpolated_clip_list=None) if frame_idx > 1: data_repo.update_specific_timing( @@ -1358,12 +1358,12 @@ def create_timings_row_at_frame_number(project_uuid, index_of_frame, frame_time= if prev_timing: prev_clip_duration = calculate_desired_duration_of_individual_clip(prev_timing.uuid) data_repo.update_specific_timing( - prev_timing.uuid, interpolated_clip_id=None, clip_duration=prev_clip_duration) + prev_timing.uuid, interpolated_clip_list=None, clip_duration=prev_clip_duration) next_timing: InternalAIModelObject = data_repo.get_next_timing(timing.uuid) if next_timing: data_repo.update_specific_timing( - next_timing.uuid, interpolated_clip_id=None) + next_timing.uuid, interpolated_clip_list=None) return timing diff --git a/ui_components/methods/file_methods.py b/ui_components/methods/file_methods.py index 8cbf69d0..044175bc 100644 --- a/ui_components/methods/file_methods.py +++ b/ui_components/methods/file_methods.py @@ -136,7 +136,7 @@ def generate_temp_file_from_uploaded_file(uploaded_file): return temp_file -def convert_bytes_to_file(file_location_to_save, mime_type, file_bytes, project_uuid, filename=None, tag=""): +def convert_bytes_to_file(file_location_to_save, mime_type, file_bytes, project_uuid, inference_log_id=None, filename=None, tag=""): data_repo = DataRepo() hosted_url = save_or_host_file_bytes(file_bytes, file_location_to_save, "." + mime_type.split("/")[1]) @@ -144,6 +144,7 @@ def convert_bytes_to_file(file_location_to_save, mime_type, file_bytes, project_ "name": str(uuid.uuid4()) + "." + mime_type.split("/")[1] if not filename else filename, "type": InternalFileType.IMAGE.value, "project_id": project_uuid, + "inference_log_id": inference_log_id, "tag": tag } diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index 3b625621..6c451077 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -21,7 +21,8 @@ # returns the timed_clip, which is the interpolated video with correct length -def create_or_get_single_preview_video(timing_uuid): +# interpolated_clip_uuid signals which clip to promote to timed clip +def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None): from ui_components.methods.file_methods import generate_temp_file, save_or_host_file_bytes from ui_components.methods.common_methods import get_audio_bytes_for_slice @@ -32,7 +33,7 @@ def create_or_get_single_preview_video(timing_uuid): project_details: InternalSettingObject = data_repo.get_project_setting( timing.project.uuid) - if not timing.interpolated_clip: + if not len(timing.interpolated_clip_list): timing.interpolation_steps = 3 next_timing = data_repo.get_next_timing(timing.uuid) img_list = [timing.source_image.location, next_timing.source_image.location] @@ -46,17 +47,19 @@ def create_or_get_single_preview_video(timing_uuid): } video_fie = convert_bytes_to_file(**file_data) - data_repo.update_specific_timing( - timing_uuid, interpolated_clip_id=video_fie.uuid) + data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video_fie.uuid) if not timing.timed_clip: timing = data_repo.get_timing_from_uuid(timing_uuid) + interpolated_clip = data_repo.get_file_from_uuid(interpolated_clip_uuid) if interpolated_clip_uuid \ + else timing.interpolated_clip_list[0] + temp_video_file = None - if timing.interpolated_clip.hosted_url: - temp_video_file = generate_temp_file(timing.interpolated_clip.hosted_url, '.mp4') + if interpolated_clip.hosted_url: + temp_video_file = generate_temp_file(interpolated_clip.hosted_url, '.mp4') - file_path = temp_video_file.name if temp_video_file else timing.interpolated_clip.local_path + file_path = temp_video_file.name if temp_video_file else interpolated_clip.local_path clip = VideoFileClip(file_path) number_text = TextClip(str(timing.aux_frame_index), @@ -76,8 +79,8 @@ def create_or_get_single_preview_video(timing_uuid): with open(file_path, 'rb') as f: video_bytes = f.read() - hosted_url = save_or_host_file_bytes(video_bytes, timing.interpolated_clip.local_path) - data_repo.update_file(timing.interpolated_clip.uuid, hosted_url=hosted_url) + hosted_url = save_or_host_file_bytes(video_bytes, interpolated_clip.local_path) + data_repo.update_file(interpolated_clip.uuid, hosted_url=hosted_url) os.remove(temp_video_file.name) @@ -87,7 +90,7 @@ def create_or_get_single_preview_video(timing_uuid): data_repo.update_specific_timing(timing_uuid, clip_duration=clip_duration) timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - output_video = update_speed_of_video_clip(timing.interpolated_clip, timing_uuid) + output_video = update_speed_of_video_clip(interpolated_clip, timing_uuid) data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) # adding audio if the audio file is present diff --git a/ui_components/models.py b/ui_components/models.py index 7afd48dd..379e4429 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -7,14 +7,15 @@ class InternalFileObject: - def __init__(self, uuid, name, type, local_path, hosted_url, created_on, tag=""): - self.uuid = uuid - self.name = name - self.type = type - self.local_path = local_path - self.hosted_url = hosted_url - self.tag = tag - self.created_on = created_on + def __init__(self, **kwargs): + self.uuid = kwargs['uuid'] if key_present('uuid', kwargs) else None + self.name = kwargs['name'] if key_present('name', kwargs) else None + self.type = kwargs['type'] if key_present('type', kwargs) else None + self.local_path = kwargs['local_path'] if key_present('local_path', kwargs) else None + self.hosted_url = kwargs['hosted_url'] if key_present('hosted_url', kwargs) else None + self.tag = kwargs['tag'] if key_present('tag', kwargs) else None + self.created_on = kwargs['created_on'] if key_present('created_on', kwargs) else None + self.inference_log = InferenceLogObject(kwargs['inference_log']) if key_present('inference_log', kwargs) else None @property def location(self): @@ -86,8 +87,8 @@ def __init__(self, **kwargs): **kwargs["model"]) if 'model' in kwargs and kwargs["model"] else None self.source_image = InternalFileObject( **kwargs["source_image"]) if 'source_image' in kwargs and kwargs["source_image"] else None - self.interpolated_clip = InternalFileObject( - **kwargs["interpolated_clip"]) if 'interpolated_clip' in kwargs and kwargs["interpolated_clip"] else None + self.interpolated_clip_list = [InternalFileObject(**file) for file in kwargs["interpolated_clip_list"]] \ + if 'interpolated_clip_list' in kwargs and kwargs["interpolated_clip_list"] else [] self.timed_clip = InternalFileObject( **kwargs["timed_clip"]) if 'timed_clip' in kwargs and kwargs["timed_clip"] else None self.mask = InternalFileObject( @@ -280,14 +281,14 @@ def to_json(self): class InferenceLogObject: def __init__(self, **kwargs): - self.uuid = kwargs['uuid'] if 'uuid' in kwargs else None + self.uuid = kwargs['uuid'] if key_present('uuid', kwargs) else None self.project = InternalProjectObject( - **kwargs["project"]) if 'project' in kwargs else None + **kwargs["project"]) if key_present('project', kwargs) else None self.model = InternalAIModelObject( - **kwargs["model"]) if 'model' in kwargs else None - self.input_params = kwargs['input_params'] if 'input_params' in kwargs else None - self.output_details = kwargs['output_details'] if 'output_details' in kwargs else None - self.total_inference_time = kwargs['total_inference_time'] if 'total_inference_time' in kwargs else None + **kwargs["model"]) if key_present('model', kwargs) else None + self.input_params = kwargs['input_params'] if key_present('input_params', kwargs) else None + self.output_details = kwargs['output_details'] if key_present('output_details', kwargs) else None + self.total_inference_time = kwargs['total_inference_time'] if key_present('total_inference_time', kwargs) else None def key_present(key, dict): diff --git a/ui_components/widgets/frame_clip_generation_elements.py b/ui_components/widgets/frame_clip_generation_elements.py index d989a1a8..b6a50010 100644 --- a/ui_components/widgets/frame_clip_generation_elements.py +++ b/ui_components/widgets/frame_clip_generation_elements.py @@ -38,8 +38,8 @@ def generate_individual_clip(timing_uuid, quality): inference_log_id=log.uuid ) - data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video.uuid, clip_settings=settings) - output_video = update_speed_of_video_clip(timing.interpolated_clip, timing_uuid) + data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video.uuid) + output_video = update_speed_of_video_clip(video, timing_uuid) data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) return output_video diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index d5425edb..6c77e96d 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -191,7 +191,8 @@ def get_all_inference_log_list(self, project_id=None): return [InferenceLogObject(**log) for log in log_list] if log_list else None def create_inference_log(self, **kwargs): - log = self.db_repo.create_inference_log(**kwargs).data['data'] + res = self.db_repo.create_inference_log(**kwargs) + log = res.data['data'] if res else None return InferenceLogObject(**log) if log else None def delete_inference_log_from_uuid(self, uuid): @@ -249,7 +250,8 @@ def update_specific_timing(self, uuid, **kwargs): return res.status def add_interpolated_clip(self, timing_uuid, **kwargs): - pass + res = self.db_repo.add_interpolated_clip(timing_uuid, **kwargs) + return res.status def delete_timing_from_uuid(self, uuid): res = self.db_repo.delete_timing_from_uuid(uuid) From 233894ba45e5b76f1d163ed9b702db1f9e1579e2 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 16 Sep 2023 23:21:26 +0530 Subject: [PATCH 012/164] wip: new ui integration --- .../components/frame_styling_page.py | 18 +++---- ui_components/constants.py | 4 ++ ui_components/methods/common_methods.py | 34 +++++++++++- ui_components/methods/video_methods.py | 3 +- ui_components/models.py | 11 ++++ ui_components/setup.py | 3 +- .../widgets/animation_style_element.py | 4 +- ...iants.py => variant_comparison_element.py} | 53 ++++++++++--------- utils/media_processor/video.py | 3 +- 9 files changed, 93 insertions(+), 40 deletions(-) rename ui_components/widgets/{compare_to_other_variants.py => variant_comparison_element.py} (52%) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index dd33a02d..03525f7c 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -16,14 +16,14 @@ from ui_components.widgets.image_carousal import display_image from ui_components.widgets.prompt_finder import prompt_finder_element from ui_components.widgets.styling_element import styling_element -from ui_components.widgets.compare_to_other_variants import compare_to_other_variants +from ui_components.widgets.variant_comparison_element import variant_comparison_element from ui_components.widgets.animation_style_element import animation_style_element from streamlit_option_menu import option_menu from utils import st_memory import math -from ui_components.constants import WorkflowStageType +from ui_components.constants import CreativeProcessType, WorkflowStageType from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo @@ -76,14 +76,14 @@ def frame_styling_page(mainheader2, project_uuid: str): with st.sidebar: frame_selector_widget() - if st.session_state['page'] == "Motion": + if st.session_state['page'] == CreativeProcessType.MOTION.value: idx = st.session_state['current_frame_index'] - 1 st.session_state['show_comparison'] = st_memory.radio("Show:", options=["Other Variants", "Preview Video in Context"], horizontal=True, project_settings=project_settings, key="show_comparison_radio_motion") if st.session_state['show_comparison'] == "Other Variants": - compare_to_other_variants(timing_details, project_uuid, data_repo,stage="Motion") + variant_comparison_element(st.session_state['current_frame_uuid']) elif st.session_state['show_comparison'] == "Preview Video in Context": current_preview_video_element(st.session_state['current_frame_uuid']) @@ -94,10 +94,10 @@ def frame_styling_page(mainheader2, project_uuid: str): with st.expander("🎬 Choose Animation Style & Create Variants", expanded=True): - animation_style_element(st.session_state['current_frame_uuid'], project_settings) + animation_style_element(st.session_state['current_frame_uuid'], project_uuid) - elif st.session_state['page'] == "Styling": + elif st.session_state['page'] == CreativeProcessType.STYLING.value: # carousal_of_images_element(project_uuid, stage=WorkflowStageType.STYLED.value) comparison_values = [ "Other Variants", "Source Frame", "Previous & Next Frame", "None"] @@ -106,7 +106,7 @@ def frame_styling_page(mainheader2, project_uuid: str): if st.session_state['show_comparison'] == "Other Variants": - compare_to_other_variants(timing_details, project_uuid, data_repo,stage="Styling") + variant_comparison_element(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) elif st.session_state['show_comparison'] == "Source Frame": if timing_details[st.session_state['current_frame_index']- 1].primary_image: @@ -309,7 +309,7 @@ def frame_styling_page(mainheader2, project_uuid: str): len(timing_details)) - if st.session_state['page'] == "Styling": + if st.session_state['page'] == CreativeProcessType.STYLING.value: with st.sidebar: styling_element(st.session_state['current_frame_uuid'], view_type="List") @@ -369,7 +369,7 @@ def frame_styling_page(mainheader2, project_uuid: str): st.markdown("***") # Update the current page in session state - elif st.session_state['page'] == "Motion": + elif st.session_state['page'] == CreativeProcessType.MOTION.value: num_timing_details = len(timing_details) shift1, shift2 = st.columns([2, 1.2]) diff --git a/ui_components/constants.py b/ui_components/constants.py index 242882ee..dc148052 100644 --- a/ui_components/constants.py +++ b/ui_components/constants.py @@ -11,6 +11,10 @@ class VideoQuality(ExtendedEnum): PREVIEW = "Preview" LOW = "Low" +class CreativeProcessType(ExtendedEnum): + STYLING = "Styling" + MOTION = "Motion" + # TODO: make proper paths for every file CROPPED_IMG_LOCAL_PATH = "videos/temp/cropped.png" diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 18d91449..633d9e4e 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -13,10 +13,11 @@ import uuid from io import BytesIO import numpy as np +import urllib3 from shared.constants import SERVER, InternalFileType, ServerType from pydub import AudioSegment from backend.models import InternalFileObject -from ui_components.constants import CROPPED_IMG_LOCAL_PATH, MASK_IMG_LOCAL_PATH, SECOND_MASK_FILE, SECOND_MASK_FILE_PATH, TEMP_MASK_FILE, WorkflowStageType +from ui_components.constants import CROPPED_IMG_LOCAL_PATH, MASK_IMG_LOCAL_PATH, SECOND_MASK_FILE, SECOND_MASK_FILE_PATH, TEMP_MASK_FILE, CreativeProcessType, WorkflowStageType from ui_components.methods.file_methods import add_temp_file_to_project, generate_pil_image, save_or_host_file, save_or_host_file_bytes from ui_components.methods.ml_methods import create_depth_mask_image, inpainting, remove_background from ui_components.methods.video_methods import calculate_desired_duration_of_individual_clip @@ -30,6 +31,8 @@ from typing import Union from streamlit_image_comparison import image_comparison +from utils.media_processor.video import VideoProcessor + def add_key_frame(selected_image, inherit_styling_settings, how_long_after): data_repo = DataRepo() @@ -80,7 +83,7 @@ def add_key_frame(selected_image, inherit_styling_settings, how_long_after): st.session_state['current_frame_index'] = min(len(timing_details), st.session_state['current_frame_index']) st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid - st.session_state['page'] = "Styling" + st.session_state['page'] = CreativeProcessType.STYLING.value st.session_state['section_index'] = 0 st.experimental_rerun() @@ -939,6 +942,33 @@ def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): if frame_idx < len(timing_details): data_repo.update_specific_timing(timing.uuid, timed_clip_id=None) +def promote_video_variant(timing_uuid, variant_to_promote_frame_number: str): + data_repo = DataRepo() + timing = data_repo.get_timing_from_uuid(timing_uuid) + + variant_to_promote = timing.interpolated_clip_list[variant_to_promote_frame_number] + + if variant_to_promote.location.startswith(('http://', 'https://')): + temp_video_path, _ = urllib3.request.urlretrieve(variant_to_promote.location) + video = VideoFileClip(temp_video_path) + else: + video = VideoFileClip(variant_to_promote.location) + + if video.duration != timing.clip_duration: + video_bytes = VideoProcessor.update_video_speed( + variant_to_promote.location, + timing.animation_style, + timing.clip_duration + ) + + hosted_url = save_or_host_file_bytes(video_bytes, variant_to_promote.local_path) + if hosted_url: + data_repo.update_file(video.uuid, hosted_url=hosted_url) + + data_repo.update_specific_timing(timing.uuid, timed_clip_id=variant_to_promote.uuid) + + + def extract_canny_lines(image_path_or_url, project_uuid, low_threshold=50, high_threshold=150) -> InternalFileObject: data_repo = DataRepo() diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index 6c451077..375ceabe 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -80,7 +80,8 @@ def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None) video_bytes = f.read() hosted_url = save_or_host_file_bytes(video_bytes, interpolated_clip.local_path) - data_repo.update_file(interpolated_clip.uuid, hosted_url=hosted_url) + if hosted_url: + data_repo.update_file(interpolated_clip.uuid, hosted_url=hosted_url) os.remove(temp_video_file.name) diff --git a/ui_components/models.py b/ui_components/models.py index 379e4429..4179e33b 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -158,6 +158,17 @@ def primary_variant_index(self): return -1 + @property + def primary_interpolated_video_index(self): + if not (self.interpolated_clip_list and len(self.interpolated_clip_list)) or not self.timed_clip: + return -1 + + for idx, img in enumerate(self.interpolated_clip_list): + if img.uuid == self.timed_clip.uuid: + return idx + + return -1 + @property def animation_style(self): key = f"{self.uuid}_animation_style" diff --git a/ui_components/setup.py b/ui_components/setup.py index b37889da..3d81da1a 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -11,6 +11,7 @@ from ui_components.components.project_settings_page import project_settings_page from ui_components.components.video_rendering_page import video_rendering_page from streamlit_option_menu import option_menu +from ui_components.constants import CreativeProcessType from ui_components.models import InternalAppSettingObject from utils.common_utils import create_working_assets, get_current_user_uuid, reset_project_state @@ -130,7 +131,7 @@ def setup_app_ui(): with st.sidebar: - pages = ["Styling", "Motion"] + pages = CreativeProcessType.value_list() if 'page' not in st.session_state: st.session_state["page"] = pages[0] diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 3c53705b..f0878ecd 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -1,10 +1,12 @@ +import time import streamlit as st from typing import List from utils.data_repo.data_repo import DataRepo -def animation_style_element(current_frame_uuid, project_settings): +def animation_style_element(current_frame_uuid, project_uuid): motion_modules = ["mm-v15-v2", "AD_Stabilized_Motion","TemporalDiff"] data_repo = DataRepo() + project_settings = data_repo.get_project_setting(project_uuid) current_animation_style = data_repo.get_timing_from_uuid(current_frame_uuid).animation_style if current_animation_style == "Interpolation": diff --git a/ui_components/widgets/compare_to_other_variants.py b/ui_components/widgets/variant_comparison_element.py similarity index 52% rename from ui_components/widgets/compare_to_other_variants.py rename to ui_components/widgets/variant_comparison_element.py index cf36b0ac..a29a28d6 100644 --- a/ui_components/widgets/compare_to_other_variants.py +++ b/ui_components/widgets/variant_comparison_element.py @@ -1,68 +1,69 @@ +import time import json import streamlit as st import uuid from typing import List +from ui_components.constants import CreativeProcessType +from ui_components.methods.common_methods import promote_image_variant, promote_video_variant from utils.data_repo.data_repo import DataRepo -def compare_to_other_variants(timing_details, project_uuid, data_repo, stage="Motion"): +def variant_comparison_element(timing_uuid, stage=CreativeProcessType.MOTION.value): + data_repo = DataRepo() - main_video = 'https://www.youtube.com/watch?v=dQw4w9WgXcQ' - alternative_videos = ['https://www.youtube.com/watch?v=kib6uXQsxBA','https://www.youtube.com/watch?v=ehWD5kG4xws','https://www.youtube.com/watch?v=zkTf0LmDqKI'] - primary_video_variant_index = 0 - - timing = data_repo.get_timing_from_uuid( - st.session_state['current_frame_uuid']) + timing = data_repo.get_timing_from_uuid(timing_uuid) variants = timing.alternative_images_list mainimages1, mainimages2 = st.columns([1, 1]) aboveimage1, aboveimage2, aboveimage3 = st.columns([1, 0.25, 0.75]) - which_variant = None + which_variant = 1 with aboveimage1: - st.info(f"Current variant = {timing_details[st.session_state['current_frame_index'] - 1].primary_variant_index + 1}") + st.info(f"Current variant = {timing.primary_variant_index + 1}") with aboveimage2: show_more_than_10_variants = st.checkbox("Show >10 variants", key="show_more_than_10_variants") with aboveimage3: - number_of_variants = len(alternative_videos) if stage == "Motion" else len(variants) + number_of_variants = len(timing.interpolated_clip_list) if stage == CreativeProcessType.MOTION.value else len(variants) if show_more_than_10_variants is True: - current_variant = primary_video_variant_index if stage == "Motion" else int( - timing_details[st.session_state['current_frame_index'] - 1].primary_variant_index) + current_variant = timing.primary_interpolated_video_index if stage == CreativeProcessType.MOTION.value else int( + timing.primary_variant_index) which_variant = st.radio(f'Main variant = {current_variant + 1}', range(1, number_of_variants + 1), index=number_of_variants-1, horizontal=True, key=f"Main variant for {st.session_state['current_frame_index']}") else: last_ten_variants = range( max(1, number_of_variants - 10), number_of_variants + 1) - current_variant = primary_video_variant_index if stage == "Motion" else int( - timing_details[st.session_state['current_frame_index'] - 1].primary_variant_index) + current_variant = timing.primary_interpolated_video_index if stage == CreativeProcessType.MOTION.value else int( + timing.primary_variant_index) which_variant = st.radio(f'Main variant = {current_variant + 1}', last_ten_variants, index=len( last_ten_variants)-1, horizontal=True, key=f"Main variant for {st.session_state['current_frame_index']}") with mainimages1: - project_settings = data_repo.get_project_setting(project_uuid) st.success("**Main variant**") - if stage == "Motion": - st.video(main_video, format='mp4', start_time=0) + if stage == CreativeProcessType.MOTION.value: + st.video(timing.timed_clip.location, format='mp4', start_time=0) if timing.timed_clip else st.error("No video present") else: - if len(timing_details[st.session_state['current_frame_index'] - 1].alternative_images_list): - st.image(timing_details[st.session_state['current_frame_index'] - 1].primary_image_location, - use_column_width=True) + if len(timing.alternative_images_list): + st.image(timing.primary_image_location, use_column_width=True) else: st.error("No variants found for this frame") with mainimages2: - if stage == "Motion": + if stage == CreativeProcessType.MOTION.value: + if not (timing.interpolated_clip_list and len(timing.interpolated_clip_list)): + st.error("No variant for this frame") + if which_variant - 1 == current_variant: st.success("**Main variant**") else: st.info(f"**Variant #{which_variant}**") - st.video(alternative_videos[which_variant- 1], format='mp4', start_time=0) + st.video(timing.interpolated_clip_list[which_variant - 1].location, format='mp4', start_time=0) if \ + (timing.interpolated_clip_list and len(timing.interpolated_clip_list)) else st.error("No video present") else: - if len(timing_details[st.session_state['current_frame_index'] - 1].alternative_images_list): + if len(timing.alternative_images_list): if which_variant - 1 == current_variant: st.success("**Main variant**") else: @@ -73,7 +74,9 @@ def compare_to_other_variants(timing_details, project_uuid, data_repo, stage="Mo if which_variant- 1 != current_variant: if st.button(f"Promote Variant #{which_variant}", key=f"Promote Variant #{which_variant} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image"): - promote_image_variant( - st.session_state['current_frame_uuid'], which_variant - 1) + if stage == CreativeProcessType.MOTION.value: + promote_video_variant(timing.uuid, which_variant - 1) + else: + promote_image_variant(timing.uuid, which_variant - 1) time.sleep(0.5) st.experimental_rerun() \ No newline at end of file diff --git a/utils/media_processor/video.py b/utils/media_processor/video.py index baa5411b..254dc79c 100644 --- a/utils/media_processor/video.py +++ b/utils/media_processor/video.py @@ -30,7 +30,8 @@ def update_video_speed(video_location, animation_style, desired_duration): output_clip.write_videofile(filename=temp_output_file.name, codec="libx265") - elif animation_style == AnimationStyleType.INTERPOLATION.value: + # modifying speed for any other animation method + else: clip = VideoFileClip(video_location) input_video_duration = clip.duration desired_speed_change = float( From b841c37a31e04bab56eb528aa80c9aa781fde7cc Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Tue, 19 Sep 2023 22:34:51 +0530 Subject: [PATCH 013/164] wip: multi api call for replicate added --- shared/constants.py | 4 ++ ui_components/methods/video_methods.py | 59 ++++++++++++++++++- ui_components/models.py | 10 +++- .../widgets/animation_style_element.py | 58 ++++++++++-------- .../widgets/frame_clip_generation_elements.py | 41 ++----------- utils/media_processor/interpolator.py | 48 +++++++++++---- utils/media_processor/video.py | 19 ++++-- utils/ml_processor/motion_module.py | 31 ++++++++++ utils/ml_processor/replicate/replicate.py | 28 ++++++++- utils/ml_processor/replicate/utils.py | 10 ++++ 10 files changed, 224 insertions(+), 84 deletions(-) create mode 100644 utils/ml_processor/motion_module.py diff --git a/shared/constants.py b/shared/constants.py index 21db07b7..98711ce8 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -65,6 +65,10 @@ class AnimationStyleType(ExtendedEnum): DIRECT_MORPHING = "Direct Morphing" IMAGE_TO_VIDEO = "Image to Video" +class AnimationToolType(ExtendedEnum): + ANIMATEDIFF = 'Animatediff' + G_FILM = "Google FiLM" + ##################### global constants ##################### SERVER = os.getenv('SERVER', ServerType.PRODUCTION.value) diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index 375ceabe..457db77e 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -10,7 +10,7 @@ from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip from backend.models import InternalFileObject -from shared.constants import InternalFileTag +from shared.constants import AnimationToolType, InternalFileTag from shared.file_upload.s3 import is_s3_image_url from ui_components.constants import VideoQuality from ui_components.methods.file_methods import convert_bytes_to_file @@ -103,6 +103,51 @@ def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None) timing_uuid) return timing.timed_clip +# this includes all the animation styles [direct morphing, interpolation, image to video] +def create_single_interpolated_clip(timing_uuid, quality, settings={}, variant_count=1): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + next_timing: InternalFrameTimingObject = data_repo.get_next_timing(timing_uuid) + + if quality == 'full': + interpolation_steps = VideoInterpolator.calculate_dynamic_interpolations_steps(timing.clip_duration) + elif quality == 'preview': + interpolation_steps = 3 + + timing.interpolated_steps = interpolation_steps + img_list = [timing.source_image.location, next_timing.source_image.location] + settings.update(interpolation_steps=timing.interpolation_steps) + + # res is an array of tuples (video_bytes, log) + res = VideoInterpolator.create_interpolated_clip( + img_list, + timing.animation_style, + settings, + variant_count + ) + + output_video_list = [] + for (video_bytes, log) in res: + if 'normalise_speed' in settings and settings['normalise_speed']: + video_bytes = VideoProcessor.update_video_bytes_speed(video_bytes, timing.animation_style, timing.clip_duration) + + video_location = "videos/" + str(timing.project.uuid) + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" + video = convert_bytes_to_file( + file_location_to_save=video_location, + mime_type="video/mp4", + file_bytes=video_bytes, + project_uuid=timing.project.uuid, + inference_log_id=log.uuid + ) + + data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video.uuid) + output_video = update_speed_of_video_clip(video, timing_uuid) + data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) + + output_video_list.append(output_video) + + return output_video_list + # preview_clips have frame numbers on them. Preview clip is generated from index-2 to index+2 frames def create_full_preview_video(timing_uuid, speed=1) -> InternalFileObject: @@ -285,7 +330,7 @@ def add_audio_to_video_slice(video_file, audio_bytes): os.rename("output_with_audio.mp4", video_location) - +# final video rendering of all the frames involved def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileTag.GENERATED_VIDEO.value): from ui_components.methods.common_methods import update_clip_duration_of_all_timing_frames from ui_components.methods.file_methods import convert_bytes_to_file, generate_temp_file @@ -320,11 +365,19 @@ def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileT if not timing.interpolated_clip: next_timing = data_repo.get_next_timing(current_timing.uuid) - video_bytes, log = VideoInterpolator.create_interpolated_clip( + settings = { + "animation_tool": current_timing.animation_tool, + "interpolation_steps": current_timing.interpolation_steps + } + + res = VideoInterpolator.create_interpolated_clip( img_location_list=[current_timing.source_image.location, next_timing.source_image.location], + settings=settings, interpolation_steps=current_timing.interpolation_steps ) + video_bytes, log = res[0] + file_location = "videos/" + current_timing.project.name + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" video_file = convert_bytes_to_file( file_location_to_save=file_location, diff --git a/ui_components/models.py b/ui_components/models.py index 4179e33b..6639ea6d 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -1,7 +1,7 @@ import datetime import streamlit as st import json -from shared.constants import AnimationStyleType +from shared.constants import AnimationStyleType, AnimationToolType from ui_components.constants import TEMP_MASK_FILE @@ -169,6 +169,14 @@ def primary_interpolated_video_index(self): return -1 + @property + def animation_tool(self): + key = f"{self.uuid}_animation_tool" + if not (key in st.session_state and st.session_state[key]): + st.session_state[key] = AnimationToolType.G_FILM.value + + return st.session_state[key] + @property def animation_style(self): key = f"{self.uuid}_animation_style" diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index f0878ecd..0e7c1afa 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -1,20 +1,27 @@ import time import streamlit as st from typing import List +from shared.constants import AnimationToolType +from ui_components.methods.video_methods import create_single_interpolated_clip from utils.data_repo.data_repo import DataRepo +from utils.ml_processor.motion_module import AnimateDiffCheckpoint -def animation_style_element(current_frame_uuid, project_uuid): - motion_modules = ["mm-v15-v2", "AD_Stabilized_Motion","TemporalDiff"] +def animation_style_element(timing_uuid, project_uuid): + motion_modules = AnimateDiffCheckpoint.get_name_list() data_repo = DataRepo() project_settings = data_repo.get_project_setting(project_uuid) - current_animation_style = data_repo.get_timing_from_uuid(current_frame_uuid).animation_style + current_animation_style = data_repo.get_timing_from_uuid(timing_uuid).animation_style + variant_count = 1 if current_animation_style == "Interpolation": - animation_tool = st.radio("Animation Tool:", options=['Animatediff', 'Google FiLM'], key="animation_tool", horizontal=True) + animation_tool = st.radio("Animation Tool:", options=AnimationToolType.value_list(), key="animation_tool", horizontal=True) video_resolution = st.radio("Video Resolution:", options=["Preview Resolution", "Full Resolution"], key="video_resolution", horizontal=True) - if animation_tool == "Animatediff": - which_motion_module = st.selectbox("Which motion module would you like to use?", options=motion_modules, key="which_motion_module") + settings = { + "animation_tool": animation_tool + } + if animation_tool == AnimationToolType.ANIMATEDIFF.value: + motion_module = st.selectbox("Which motion module would you like to use?", options=motion_modules, key="motion_module") prompt_column_1, prompt_column_2 = st.columns([1, 1]) with prompt_column_1: @@ -26,17 +33,16 @@ def animation_style_element(current_frame_uuid, project_uuid): animate_col_1, animate_col_2 = st.columns([1, 3]) with animate_col_1: - how_many_variants = st.number_input("How many variants?", min_value=1, max_value=100, value=1, step=1, key="how_many_variants") + variant_count = st.number_input("How many variants?", min_value=1, max_value=100, value=1, step=1, key="variant_count") normalise_speed = st.checkbox("Normalise Speed", value=True, key="normalise_speed") - - if st.button("Generate Animation Clip", key="generate_animation_clip"): - for _ in range(how_many_variants): - st.write("Generating animation clip...") - time.sleep(2) - st.write("Lol, jk, this isn't done yet") - time.sleep(2) - st.experimental_rerun() + + settings.update( + motion_module=AnimateDiffCheckpoint.get_model_from_name(motion_module), + starting_prompt=starting_prompt, + ending_prompt=ending_prompt, + normalise_speed=normalise_speed + ) elif current_animation_style == "Image to Video": st.info("For image to video, you can select one or more prompts, and how many frames you want to generate for each prompt - it'll attempt to travel from one prompt to the next.") @@ -77,14 +83,16 @@ def animation_style_element(current_frame_uuid, project_uuid): animate_col_1, animate_col_2 = st.columns([1, 3]) with animate_col_1: - how_many_variants = st.number_input("How many variants?", min_value=1, max_value=100, value=1, step=1, key="how_many_variants") + variant_count = st.number_input("How many variants?", min_value=1, max_value=100, value=1, step=1, key="variant_count") - if st.button("Generate Animation Clip", key="generate_animation_clip"): - for _ in range(how_many_variants): - st.write("Generating animation clip...") - time.sleep(2) - st.write("Lol, jk, this isn't done yet") - time.sleep(2) - st.experimental_rerun() - else: - st.error("No animation style selected") \ No newline at end of file + if st.button("Generate Animation Clip", key="generate_animation_clip"): + vid_quality = "full" if video_resolution == "Full Resolution" else "preview" + st.write("Generating animation clip...") + create_single_interpolated_clip( + timing_uuid, + vid_quality, + settings, + variant_count + ) + st.experimental_rerun() + \ No newline at end of file diff --git a/ui_components/widgets/frame_clip_generation_elements.py b/ui_components/widgets/frame_clip_generation_elements.py index b6a50010..d3214119 100644 --- a/ui_components/widgets/frame_clip_generation_elements.py +++ b/ui_components/widgets/frame_clip_generation_elements.py @@ -2,7 +2,7 @@ import streamlit as st from shared.constants import AnimationStyleType from ui_components.methods.file_methods import convert_bytes_to_file -from ui_components.methods.video_methods import create_full_preview_video, update_speed_of_video_clip +from ui_components.methods.video_methods import create_full_preview_video, create_single_interpolated_clip, update_speed_of_video_clip from ui_components.models import InternalFrameTimingObject from utils.data_repo.data_repo import DataRepo from utils.media_processor.interpolator import VideoInterpolator @@ -10,39 +10,6 @@ # get audio_bytes of correct duration for a given frame def current_individual_clip_element(timing_uuid): - def generate_individual_clip(timing_uuid, quality): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - next_timing: InternalFrameTimingObject = data_repo.get_next_timing(timing_uuid) - - if quality == 'full': - interpolation_steps = VideoInterpolator.calculate_dynamic_interpolations_steps(timing.clip_duration) - elif quality == 'preview': - interpolation_steps = 3 - - timing.interpolated_steps = interpolation_steps - img_list = [timing.source_image.location, next_timing.source_image.location] - settings = {"interpolation_steps": timing.interpolation_steps} - video_bytes, log = VideoInterpolator.create_interpolated_clip( - img_list, - timing.animation_style, - settings - ) - - video_location = "videos/" + timing.project.name + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" - video = convert_bytes_to_file( - file_location_to_save=video_location, - mime_type="video/mp4", - file_bytes=video_bytes, - project_uuid=timing.project.uuid, - inference_log_id=log.uuid - ) - - data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video.uuid) - output_video = update_speed_of_video_clip(video, timing_uuid) - data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) - return output_video - data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) idx = timing.aux_frame_index @@ -55,7 +22,7 @@ def generate_individual_clip(timing_uuid, quality): if VideoInterpolator.calculate_dynamic_interpolations_steps(timing.clip_duration) > timing.interpolation_steps: st.error("Low Resolution") if st.button("Generate Full Resolution Clip", key=f"generate_full_resolution_video_{idx}"): - generate_individual_clip(timing.uuid, 'full') + create_single_interpolated_clip(timing.uuid, 'full') st.experimental_rerun() else: st.success("Full Resolution") @@ -81,11 +48,11 @@ def generate_individual_clip(timing_uuid, quality): with gen1: if st.button("Generate Low-Resolution Clip", key=f"generate_preview_video_{idx}"): - generate_individual_clip(timing.uuid, 'preview') + create_single_interpolated_clip(timing.uuid, 'preview') st.experimental_rerun() with gen2: if st.button("Generate Full Resolution Clip", key=f"generate_full_resolution_video_{idx}"): - generate_individual_clip(timing.uuid, 'full') + create_single_interpolated_clip(timing.uuid, 'full') st.experimental_rerun() diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index bdb4b5e6..6d67d86a 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -1,9 +1,11 @@ +import asyncio import os +from asgiref.sync import async_to_sync import cv2 import streamlit as st import requests as r import numpy as np -from shared.constants import AnimationStyleType +from shared.constants import AnimationStyleType, AnimationToolType from ui_components.methods.file_methods import generate_temp_file from ui_components.models import InferenceLogObject @@ -33,7 +35,7 @@ def calculate_dynamic_interpolations_steps(clip_duration): return interpolation_steps @staticmethod - def create_interpolated_clip(img_location_list, animation_style, settings): + def create_interpolated_clip(img_location_list, animation_style, settings, variant_count=1): data_repo = DataRepo() if not animation_style: project_setting = data_repo.get_project_setting(st.session_state["project_uuid"]) @@ -42,7 +44,8 @@ def create_interpolated_clip(img_location_list, animation_style, settings): if animation_style == AnimationStyleType.INTERPOLATION.value: return VideoInterpolator.video_through_frame_interpolation( img_location_list, - settings + settings, + variant_count ) elif animation_style == AnimationStyleType.DIRECT_MORPHING.value: @@ -54,7 +57,7 @@ def create_interpolated_clip(img_location_list, animation_style, settings): # returns a video bytes generated through interpolating frames between the given list of frames @staticmethod - def video_through_frame_interpolation(img_location_list, settings): + def video_through_frame_interpolation(img_location_list, settings, variant_count): # TODO: extend this for more than two images img1 = img_location_list[0] img2 = img_location_list[1] @@ -66,17 +69,29 @@ def video_through_frame_interpolation(img_location_list, settings): img2 = open(img2, "rb") ml_client = get_ml_client() - output, log = ml_client.predict_model_output(REPLICATE_MODEL.google_frame_interpolation, frame1=img1, frame2=img2, - times_to_interpolate=settings['interpolation_steps']) + animation_tool = settings['animation_tool'] if 'animation_tool' in settings else AnimationToolType.G_FILM.value + + # if animation_tool == AnimationToolType.G_FILM.value: + if True: + res = ml_client.predict_model_output_async(REPLICATE_MODEL.google_frame_interpolation, frame1=img1, frame2=img2, + times_to_interpolate=settings['interpolation_steps'], variant_count=3) + # else: + # # TODO: integrate the AD interpolation API here + # output, log = ml_client.predict_model_output(REPLICATE_MODEL.google_frame_interpolation, frame1=img1, frame2=img2, + # times_to_interpolate=settings['interpolation_steps']) - temp_output_file = generate_temp_file(output, '.mp4') - video_bytes = None - with open(temp_output_file.name, 'rb') as f: - video_bytes = f.read() + final_res = [] + for (output, log) in res: + temp_output_file = generate_temp_file(output, '.mp4') + video_bytes = None + with open(temp_output_file.name, 'rb') as f: + video_bytes = f.read() - os.remove(temp_output_file.name) + os.remove(temp_output_file.name) + final_res.append((video_bytes, log)) - return video_bytes, log + return final_res + @staticmethod def video_through_direct_morphing(img_location_list, settings): @@ -112,5 +127,12 @@ def load_image(image_path_or_url): video_bytes.append(frame_bytes.tobytes()) video_data = b''.join(video_bytes) - return video_data, InferenceLogObject({}) # returning None for inference log + return [(video_data, InferenceLogObject({}))] # returning None for inference log +@async_to_sync +async def google_interpolate_async(img1, img2, settings, variant_count=1): + ml_client = get_ml_client() + res = await asyncio.gather(*[ml_client.predict_model_output_async(REPLICATE_MODEL.google_frame_interpolation, frame1=img1, frame2=img2, + times_to_interpolate=settings['interpolation_steps']) for _ in range(variant_count)]) + + return res \ No newline at end of file diff --git a/utils/media_processor/video.py b/utils/media_processor/video.py index 254dc79c..5724afd0 100644 --- a/utils/media_processor/video.py +++ b/utils/media_processor/video.py @@ -1,3 +1,4 @@ +from io import BytesIO import os import tempfile from moviepy.editor import concatenate_videoclips, TextClip, VideoFileClip, vfx @@ -7,11 +8,22 @@ class VideoProcessor: @staticmethod def update_video_speed(video_location, animation_style, desired_duration): + clip = VideoFileClip(video_location) + + return VideoProcessor.update_clip_speed(clip, animation_style, desired_duration) + + @staticmethod + def update_video_bytes_speed(video_bytes, animation_style, desired_duration): + video_io = BytesIO(video_bytes) + clip = VideoFileClip(video_io) + + return VideoProcessor.update_clip_speed(clip, animation_style, desired_duration) + + @staticmethod + def update_clip_speed(clip: VideoFileClip, animation_style, desired_duration): temp_output_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4", mode='wb') - if animation_style == AnimationStyleType.DIRECT_MORPHING.value: - # Load the video clip - clip = VideoFileClip(video_location) + if animation_style == AnimationStyleType.DIRECT_MORPHING.value: clip = clip.set_fps(120) # Calculate the number of frames to keep @@ -32,7 +44,6 @@ def update_video_speed(video_location, animation_style, desired_duration): # modifying speed for any other animation method else: - clip = VideoFileClip(video_location) input_video_duration = clip.duration desired_speed_change = float( input_video_duration) / float(desired_duration) diff --git a/utils/ml_processor/motion_module.py b/utils/ml_processor/motion_module.py new file mode 100644 index 00000000..48c1455e --- /dev/null +++ b/utils/ml_processor/motion_module.py @@ -0,0 +1,31 @@ +from dataclasses import dataclass + + +@dataclass +class MotionModuleCheckpoint: + name: str + +# make sure to have unique names (streamlit limitation) +class AnimateDiffCheckpoint: + mm_v15_v2 = MotionModuleCheckpoint(name="mm-v15-v2") + ad_stabilized_motion = MotionModuleCheckpoint(name="AD_Stabilized_Motion") + temporal_diff = MotionModuleCheckpoint(name="TemporalDiff") + + @staticmethod + def get_name_list(): + checkpoint_names = [getattr(AnimateDiffCheckpoint, attr).name for attr in dir(AnimateDiffCheckpoint) if not \ + callable(getattr(AnimateDiffCheckpoint, attr)) and not attr.startswith("__") \ + and isinstance(getattr(AnimateDiffCheckpoint, attr), MotionModuleCheckpoint)] + return checkpoint_names + + @staticmethod + def get_model_from_name(name): + checkpoint_list = [getattr(AnimateDiffCheckpoint, attr) for attr in dir(AnimateDiffCheckpoint) if not \ + callable(getattr(AnimateDiffCheckpoint, attr)) and not attr.startswith("__") \ + and isinstance(getattr(AnimateDiffCheckpoint, attr), MotionModuleCheckpoint)] + + for ckpt in checkpoint_list: + if ckpt.name == name: + return ckpt + + return None \ No newline at end of file diff --git a/utils/ml_processor/replicate/replicate.py b/utils/ml_processor/replicate/replicate.py index f5756536..50597e83 100644 --- a/utils/ml_processor/replicate/replicate.py +++ b/utils/ml_processor/replicate/replicate.py @@ -1,3 +1,4 @@ +import asyncio import io import time from shared.constants import REPLICATE_USER @@ -15,7 +16,7 @@ from utils.ml_processor.replicate.constants import REPLICATE_MODEL, ReplicateModel from repository.data_logger import log_model_inference import utils.local_storage.local_storage as local_storage -from utils.ml_processor.replicate.utils import check_user_credits +from utils.ml_processor.replicate.utils import check_user_credits, check_user_credits_async class ReplicateProcessor(MachineLearningProcessor): @@ -61,6 +62,31 @@ def predict_model_output(self, model: ReplicateModel, **kwargs): self._update_usage_credits(end_time - start_time) return output, log + + @check_user_credits + def predict_model_output_async(self, model: ReplicateModel, **kwargs): + res = asyncio.run(self._multi_async_prediction(model, **kwargs)) + + output_list = [] + for (output, time_taken) in res: + log = log_model_inference(model, time_taken, **kwargs) + self._update_usage_credits(time_taken) + output_list.append((output, log)) + + return output_list + + async def _multi_async_prediction(self, model: ReplicateModel, **kwargs): + variant_count = kwargs['variant_count'] if ('variant_count' in kwargs and kwargs['variant_count']) else 1 + res = await asyncio.gather(*[self._async_model_prediction(model, **kwargs) for _ in range(variant_count)]) + return res + + async def _async_model_prediction(self, model: ReplicateModel, **kwargs): + model_version = self.get_model(model) + start_time = time.time() + output = await asyncio.to_thread(model_version.predict, **kwargs) + end_time = time.time() + time_taken = end_time - start_time + return output, time_taken @check_user_credits def inpainting(self, video_name, input_image, prompt, negative_prompt): diff --git a/utils/ml_processor/replicate/utils.py b/utils/ml_processor/replicate/utils.py index de8f14e0..23d84225 100644 --- a/utils/ml_processor/replicate/utils.py +++ b/utils/ml_processor/replicate/utils.py @@ -9,4 +9,14 @@ def wrapper(self, *args, **kwargs): else: raise RuntimeError("Insufficient credits. Please recharge") + return wrapper + +def check_user_credits_async(method): + async def wrapper(self, *args, **kwargs): + if user_credits_available(): + res = await method(self, *args, **kwargs) + return res + else: + raise RuntimeError("Insufficient credits. Please recharge") + return wrapper \ No newline at end of file From 2a3c5f1faa0146ad725de96869422a3a956d98f5 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Tue, 19 Sep 2023 23:02:20 +0530 Subject: [PATCH 014/164] video interpolation fixes --- ui_components/models.py | 5 +++++ ui_components/widgets/animation_style_element.py | 4 +++- utils/media_processor/interpolator.py | 14 ++------------ 3 files changed, 10 insertions(+), 13 deletions(-) diff --git a/ui_components/models.py b/ui_components/models.py index 6639ea6d..8836fc07 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -177,6 +177,11 @@ def animation_tool(self): return st.session_state[key] + @animation_tool.setter + def animation_tool(self, val): + key = f"{self.uuid}_animation_tool" + st.session_state[key] = val + @property def animation_style(self): key = f"{self.uuid}_animation_style" diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 0e7c1afa..558e28a3 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -10,7 +10,8 @@ def animation_style_element(timing_uuid, project_uuid): motion_modules = AnimateDiffCheckpoint.get_name_list() data_repo = DataRepo() project_settings = data_repo.get_project_setting(project_uuid) - current_animation_style = data_repo.get_timing_from_uuid(timing_uuid).animation_style + timing = data_repo.get_timing_from_uuid(timing_uuid) + current_animation_style = timing.animation_style variant_count = 1 if current_animation_style == "Interpolation": @@ -20,6 +21,7 @@ def animation_style_element(timing_uuid, project_uuid): settings = { "animation_tool": animation_tool } + timing.animation_tool = animation_tool if animation_tool == AnimationToolType.ANIMATEDIFF.value: motion_module = st.selectbox("Which motion module would you like to use?", options=motion_modules, key="motion_module") prompt_column_1, prompt_column_2 = st.columns([1, 1]) diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index 6d67d86a..7bbf0db8 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -1,6 +1,4 @@ -import asyncio import os -from asgiref.sync import async_to_sync import cv2 import streamlit as st import requests as r @@ -74,7 +72,7 @@ def video_through_frame_interpolation(img_location_list, settings, variant_count # if animation_tool == AnimationToolType.G_FILM.value: if True: res = ml_client.predict_model_output_async(REPLICATE_MODEL.google_frame_interpolation, frame1=img1, frame2=img2, - times_to_interpolate=settings['interpolation_steps'], variant_count=3) + times_to_interpolate=settings['interpolation_steps'], variant_count=variant_count) # else: # # TODO: integrate the AD interpolation API here # output, log = ml_client.predict_model_output(REPLICATE_MODEL.google_frame_interpolation, frame1=img1, frame2=img2, @@ -127,12 +125,4 @@ def load_image(image_path_or_url): video_bytes.append(frame_bytes.tobytes()) video_data = b''.join(video_bytes) - return [(video_data, InferenceLogObject({}))] # returning None for inference log - -@async_to_sync -async def google_interpolate_async(img1, img2, settings, variant_count=1): - ml_client = get_ml_client() - res = await asyncio.gather(*[ml_client.predict_model_output_async(REPLICATE_MODEL.google_frame_interpolation, frame1=img1, frame2=img2, - times_to_interpolate=settings['interpolation_steps']) for _ in range(variant_count)]) - - return res \ No newline at end of file + return [(video_data, InferenceLogObject({}))] # returning None for inference log \ No newline at end of file From b93db8f67f253afd8e06ffd026b5e7135286af33 Mon Sep 17 00:00:00 2001 From: POM Date: Tue, 19 Sep 2023 20:22:26 +0200 Subject: [PATCH 015/164] Update readme.md --- readme.md | 42 ++---------------------------------------- 1 file changed, 2 insertions(+), 40 deletions(-) diff --git a/readme.md b/readme.md index 677bce05..668bb32b 100644 --- a/readme.md +++ b/readme.md @@ -1,41 +1,3 @@ -# Welcome to Banodoco - V.0.2 Alpha Version! +# Welcome to Banodoco -Banodoco is a simple but powerful open source animation tool build on top of Stable Diffusion, Google FILM, and various other online machine models. - -## 1) Test It Out - -You can test out a preview version of Banodoco here. - -While the buttons and queries won't work and some things won't display properly, it should give you a good idea of what to expect. - -## 2) Download The Repo - -If you're comfortable with Git, you can pull this repo as normal. If you're not and don't want to figure that out, you can click "Code" in the top right, then click Download Zip to download all the files. - -## 3) Open Terminal - -Open your terminal and navigate to the folder where you downloaded the repo. - -To do this quick, you can type `cd` and then drag the folder into the terminal and press enter. - -## 4) Install Dependencies - -To install the dependencies, you can run the following command in your open terminal window: - -`pip install -r requirements.txt` - -If you're a developer, you'll probably want to install these in a virtual environment. - -## 5) Run The App - -To run the app, you can run the following command in your terminal window: - -`streamlit run app.py` - -This should open a new tab in your browser with the app running. If it doesn't, you can copy and paste the link that is printed in your terminal window. - -> Note: if you encounter issues, I'd suggest that you paste the error messages you get in terminal into ChatGPT and follow its suggestions. It this doesn't work, message in Discord! - -## 6) Follow The Setup Guide - -Once you have the app running, you can follow the setup guide inside the app to get started! +Banodoco is a simple but powerful open source animation tool build on top of Stable Diffusion, Animatediff and various other models. Launching soon. From b3cee4e8789bf17accd9f3c2892fbafad3c5bdba Mon Sep 17 00:00:00 2001 From: peter942 Date: Mon, 25 Sep 2023 17:54:16 +0200 Subject: [PATCH 016/164] Fixing manual zoomer --- ui_components/widgets/cropping_element.py | 73 ++++++++++++++++++++++- 1 file changed, 70 insertions(+), 3 deletions(-) diff --git a/ui_components/widgets/cropping_element.py b/ui_components/widgets/cropping_element.py index 7b49b552..03fbff15 100644 --- a/ui_components/widgets/cropping_element.py +++ b/ui_components/widgets/cropping_element.py @@ -15,6 +15,69 @@ from utils.data_repo.data_repo import DataRepo +def precision_cropping_element(stage, project_uuid): + data_repo = DataRepo() + project_settings: InternalSettingObject = data_repo.get_project_setting( + project_uuid) + + + input_image = fetch_image_by_stage(project_uuid, stage) + + # TODO: CORRECT-CODE check if this code works + if not input_image: + st.error("Please select a source image before cropping") + return + else: + input_image = generate_pil_image(input_image.location) + + col1, col2 = st.columns(2) + + with col1: + + st.subheader("Precision Cropping:") + + if st.button("Reset Cropping"): + reset_zoom_element() + + + zoom_inputs() + st.caption("Input Image:") + st.image(input_image, caption="Input Image", width=300) + + with col2: + + st.caption("Output Image:") + output_image = apply_image_transformations( + input_image, st.session_state['zoom_level_input'], st.session_state['rotation_angle_input'], st.session_state['x_shift'], st.session_state['y_shift']) + st.image(output_image, use_column_width=True) + + if st.button("Save Image"): + save_zoomed_image(output_image, st.session_state['current_frame_uuid'], stage, promote=True) + st.success("Image saved successfully!") + time.sleep(1) + st.experimental_rerun() + + inpaint_in_black_space_element( + output_image, project_settings.project.uuid, stage) + + +from math import gcd +import os +import time +import uuid +import streamlit as st +from PIL import ImageOps, Image +from streamlit_cropper import st_cropper +from backend.models import InternalFileObject +from shared.constants import InternalFileType + +from ui_components.methods.common_methods import apply_image_transformations, fetch_image_by_stage, inpaint_in_black_space_element, reset_zoom_element, save_zoomed_image, zoom_inputs +from ui_components.constants import WorkflowStageType +from ui_components.methods.file_methods import generate_pil_image, save_or_host_file +from ui_components.models import InternalProjectObject, InternalSettingObject +from utils.data_repo.data_repo import DataRepo + + def precision_cropping_element(stage, project_uuid): data_repo = DataRepo() project_settings: InternalSettingObject = data_repo.get_project_setting( @@ -62,6 +125,7 @@ def precision_cropping_element(stage, project_uuid): def manual_cropping_element(stage, timing_uuid): + data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) project_uuid = timing.project.uuid @@ -78,13 +142,18 @@ def manual_cropping_element(stage, timing_uuid): if 'current_working_image_number' not in st.session_state: st.session_state['current_working_image_number'] = st.session_state['current_frame_index'] + if 'current_stage' not in st.session_state: + st.session_state['current_stage'] = stage + def get_working_image(): st.session_state['working_image'] = generate_pil_image(input_image) + st.session_state['working_image'] = st.session_state['working_image'].convert('RGB') st.session_state['working_image'] = ImageOps.expand( st.session_state['working_image'], border=200, fill="black") st.session_state['current_working_image_number'] = st.session_state['current_frame_index'] + st.session_state['current_stage'] = stage - if 'working_image' not in st.session_state or st.session_state['current_working_image_number'] != st.session_state['current_frame_index']: + if 'working_image' not in st.session_state or st.session_state['current_working_image_number'] != st.session_state['current_frame_index'] or st.session_state['current_stage'] != stage: get_working_image() options1, options2, option3, option4 = st.columns([3, 1, 1, 1]) @@ -110,9 +179,7 @@ def get_working_image(): get_working_image() st.session_state['degrees_rotated_to'] = 0 st.experimental_rerun() - - project_settings: InternalProjectObject = data_repo.get_project_setting( timing.project.uuid) From 9a1e01a5fe15b50e0e166693791ac1a04b769526 Mon Sep 17 00:00:00 2001 From: peter942 Date: Mon, 25 Sep 2023 17:54:48 +0200 Subject: [PATCH 017/164] Fixing manual zoomer --- ui_components/widgets/cropping_element.py | 63 ----------------------- 1 file changed, 63 deletions(-) diff --git a/ui_components/widgets/cropping_element.py b/ui_components/widgets/cropping_element.py index 03fbff15..e085a8da 100644 --- a/ui_components/widgets/cropping_element.py +++ b/ui_components/widgets/cropping_element.py @@ -15,69 +15,6 @@ from utils.data_repo.data_repo import DataRepo -def precision_cropping_element(stage, project_uuid): - data_repo = DataRepo() - project_settings: InternalSettingObject = data_repo.get_project_setting( - project_uuid) - - - input_image = fetch_image_by_stage(project_uuid, stage) - - # TODO: CORRECT-CODE check if this code works - if not input_image: - st.error("Please select a source image before cropping") - return - else: - input_image = generate_pil_image(input_image.location) - - col1, col2 = st.columns(2) - - with col1: - - st.subheader("Precision Cropping:") - - if st.button("Reset Cropping"): - reset_zoom_element() - - - zoom_inputs() - st.caption("Input Image:") - st.image(input_image, caption="Input Image", width=300) - - with col2: - - st.caption("Output Image:") - output_image = apply_image_transformations( - input_image, st.session_state['zoom_level_input'], st.session_state['rotation_angle_input'], st.session_state['x_shift'], st.session_state['y_shift']) - st.image(output_image, use_column_width=True) - - if st.button("Save Image"): - save_zoomed_image(output_image, st.session_state['current_frame_uuid'], stage, promote=True) - st.success("Image saved successfully!") - time.sleep(1) - st.experimental_rerun() - - inpaint_in_black_space_element( - output_image, project_settings.project.uuid, stage) - - -from math import gcd -import os -import time -import uuid -import streamlit as st -from PIL import ImageOps, Image -from streamlit_cropper import st_cropper -from backend.models import InternalFileObject -from shared.constants import InternalFileType - -from ui_components.methods.common_methods import apply_image_transformations, fetch_image_by_stage, inpaint_in_black_space_element, reset_zoom_element, save_zoomed_image, zoom_inputs -from ui_components.constants import WorkflowStageType -from ui_components.methods.file_methods import generate_pil_image, save_or_host_file -from ui_components.models import InternalProjectObject, InternalSettingObject -from utils.data_repo.data_repo import DataRepo - - def precision_cropping_element(stage, project_uuid): data_repo = DataRepo() project_settings: InternalSettingObject = data_repo.get_project_setting( From 22bd9c50647d03865e215b0a3e42e33a7f1668ad Mon Sep 17 00:00:00 2001 From: peter942 Date: Wed, 27 Sep 2023 17:10:16 +0200 Subject: [PATCH 018/164] Refactoring timing --- .../components/frame_styling_page.py | 135 ++++-------------- ui_components/methods/common_methods.py | 7 +- .../widgets/add_key_frame_element.py | 86 +++++++++++ .../widgets/frame_clip_generation_elements.py | 2 + ui_components/widgets/frame_selector.py | 2 +- ui_components/widgets/frame_time_selector.py | 110 ++++++++++---- 6 files changed, 205 insertions(+), 137 deletions(-) create mode 100644 ui_components/widgets/add_key_frame_element.py diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index dd33a02d..523194b7 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -11,10 +11,11 @@ from ui_components.methods.video_methods import create_or_get_single_preview_video from ui_components.widgets.cropping_element import manual_cropping_element, precision_cropping_element from ui_components.widgets.frame_clip_generation_elements import current_individual_clip_element, current_preview_video_element, update_animation_style_element -from ui_components.widgets.frame_time_selector import single_frame_time_selector, update_frame_time +from ui_components.widgets.frame_time_selector import single_frame_time_selector, update_frame_time, single_frame_time_duration_setter from ui_components.widgets.frame_selector import frame_selector_widget from ui_components.widgets.image_carousal import display_image from ui_components.widgets.prompt_finder import prompt_finder_element +from ui_components.widgets.add_key_frame_element import add_key_frame_element from ui_components.widgets.styling_element import styling_element from ui_components.widgets.compare_to_other_variants import compare_to_other_variants from ui_components.widgets.animation_style_element import animation_style_element @@ -78,6 +79,9 @@ def frame_styling_page(mainheader2, project_uuid: str): if st.session_state['page'] == "Motion": + + + idx = st.session_state['current_frame_index'] - 1 st.session_state['show_comparison'] = st_memory.radio("Show:", options=["Other Variants", "Preview Video in Context"], horizontal=True, project_settings=project_settings, key="show_comparison_radio_motion") @@ -88,12 +92,14 @@ def frame_styling_page(mainheader2, project_uuid: str): elif st.session_state['show_comparison'] == "Preview Video in Context": current_preview_video_element(st.session_state['current_frame_uuid']) - update_animation_style_element(st.session_state['current_frame_uuid'], horizontal=False) + st.markdown("***") with st.expander("🎬 Choose Animation Style & Create Variants", expanded=True): + update_animation_style_element(st.session_state['current_frame_uuid'], horizontal=True) + animation_style_element(st.session_state['current_frame_uuid'], project_settings) @@ -294,10 +300,13 @@ def frame_styling_page(mainheader2, project_uuid: str): num_pages = math.ceil(len(timing_details) / items_per_page) + 1 st.markdown("---") - - st.session_state['current_page'] = st.radio("Select Page:", options=range( - 1, num_pages), horizontal=True, index=st.session_state['index_of_current_page'] - 1, key="page_selection_radio") + header_col_1, header_col_2, header_col_3 = st.columns([1, 5, 1]) + with header_col_1: + st.session_state['current_page'] = st.radio("Select Page:", options=range( + 1, num_pages), horizontal=True, index=st.session_state['index_of_current_page'] - 1, key="page_selection_radio") + with header_col_3: + shift_frames_setting = st.toggle("Shift Frames", help="If set to True, this will shift the frames after your adjustment forward or backwards.") if st.session_state['current_page'] != st.session_state['index_of_current_page']: st.session_state['index_of_current_page'] = st.session_state['current_page'] st.experimental_rerun() @@ -308,6 +317,7 @@ def frame_styling_page(mainheader2, project_uuid: str): end_index = min(start_index + items_per_page, len(timing_details)) + if st.session_state['page'] == "Styling": with st.sidebar: @@ -334,9 +344,8 @@ def frame_styling_page(mainheader2, project_uuid: str): with image3: time1, time2 = st.columns([1, 1]) with time1: - single_frame_time_selector(timing_details[i].uuid, 'sidebar') - st.info( - f"Duration: {timing_details[i].clip_duration:.2f} secs") + single_frame_time_selector(timing_details[i].uuid, 'sidebar', shift_frames=shift_frames_setting) + single_frame_time_duration_setter(timing_details[i].uuid,'sidebar',shift_frames=shift_frames_setting) with time2: st.write("") @@ -370,12 +379,11 @@ def frame_styling_page(mainheader2, project_uuid: str): # Update the current page in session state elif st.session_state['page'] == "Motion": - num_timing_details = len(timing_details) - shift1, shift2 = st.columns([2, 1.2]) - with shift2: - shift_frames = st.checkbox( - "Shift Frames", help="This will shift the after your adjustment forward or backwards.") + + + + num_timing_details = len(timing_details) timing_details = data_repo.get_timing_list_from_project(project_uuid) @@ -406,36 +414,12 @@ def frame_styling_page(mainheader2, project_uuid: str): st.write("") st.markdown("

FIN

", unsafe_allow_html=True) - single_frame_time_selector(timing_details[idx].uuid, 'motion') - st.caption(f"Duration: {timing_details[idx].clip_duration:.2f} secs") - - # calculate minimum and maximum values for slider - if idx == 0: - min_frame_time = 0.0 # make sure the value is a float - else: - min_frame_time = timing_details[idx].frame_time - - if idx == num_timing_details - 1: - max_frame_time = timing_details[idx].frame_time + 10.0 - elif idx < num_timing_details - 1: - max_frame_time = timing_details[idx+1].frame_time - - # disable slider only if it's the first frame - slider_disabled = idx == 0 - frame_time = st.slider( - f"#{idx+1} Frame Time = {round(timing_details[idx].frame_time, 3)}", - min_value=min_frame_time, - max_value=max_frame_time, - value=timing_details[idx].frame_time, - step=0.01, - disabled=slider_disabled, - key=f"frame_time_slider_{idx}" - ) + single_frame_time_selector(timing_details[idx].uuid, 'motion', shift_frames=shift_frames_setting) + + single_frame_time_duration_setter(timing_details[idx].uuid,'motion',shift_frames=shift_frames_setting) + update_animation_style_element(timing_details[idx].uuid) - # update timing details - if timing_details[idx].frame_time != frame_time: - update_frame_time(timing_details[idx].uuid, frame_time) if timing_details[idx].aux_frame_index != len(timing_details) - 1: with timing2: @@ -446,78 +430,13 @@ def frame_styling_page(mainheader2, project_uuid: str): st.markdown("***") st.markdown("***") - with st.expander("➕ Add Key Frame", expanded=True): - add1, add2 = st.columns(2) - - selected_image_location = "" - with add1: - # removed "Frame From Video" for now - image1,image2 = st.columns(2) - with image1: - source_of_starting_image = st.radio("Where would you like to get the starting image from?", [ - "Previous frame", "Uploaded image"], key="source_of_starting_image") - - which_stage_for_starting_image = None - if source_of_starting_image == "Previous frame": - with image2: - which_stage_for_starting_image = st.radio("Which stage would you like to use?", [ - ImageStage.MAIN_VARIANT.value, ImageStage.SOURCE_IMAGE.value], key="which_stage_for_starting_image", horizontal=True) - which_number_for_starting_image = st.number_input("Which frame would you like to use?", min_value=1, max_value= - max(1, len(timing_details)), value=st.session_state['current_frame_index'], step=1, key="which_number_for_starting_image") - if which_stage_for_starting_image == ImageStage.SOURCE_IMAGE.value: - if timing_details[which_number_for_starting_image - 1].source_image != "": - selected_image_location = timing_details[which_number_for_starting_image - 1].source_image.location - else: - selected_image_location = "" - elif which_stage_for_starting_image == ImageStage.MAIN_VARIANT.value: - selected_image_location = timing_details[which_number_for_starting_image - 1].primary_image_location - elif source_of_starting_image == "Uploaded image": - with image2: - uploaded_image = st.file_uploader( - "Upload an image", type=["png", "jpg", "jpeg"]) - # FILE UPLOAD HANDLE-- - if uploaded_image is not None: - image = Image.open(uploaded_image) - file_location = f"videos/{project_uuid}/assets/frames/1_selected/{uploaded_image.name}" - selected_image_location = save_or_host_file(image, file_location) - selected_image_location = selected_image_location or file_location - else: - selected_image_location = "" - which_number_for_starting_image = st.session_state['current_frame_index'] - - - how_long_after = st.slider( - "How long after the current frame?", min_value=0.0, max_value=10.0, value=2.5, step=0.1) - - radio_text = "Inherit styling settings from the " + ("current frame?" if source_of_starting_image == "Uploaded image" else "selected frame") - inherit_styling_settings = st_memory.radio(radio_text, ["Yes", "No"], \ - key="inherit_styling_settings", horizontal=True, project_settings=project_settings) - - apply_zoom_effects = st_memory.radio("Apply zoom effects to inputted image?", [ - "No","Yes"], key="apply_zoom_effects", horizontal=True, project_settings=project_settings) - - if apply_zoom_effects == "Yes": - zoom_inputs(position='new', horizontal=True) - - selected_image = None - with add2: - if selected_image_location: - if apply_zoom_effects == "Yes": - image_preview = generate_pil_image(selected_image_location) - selected_image = apply_image_transformations(image_preview, st.session_state['zoom_level_input'], st.session_state['rotation_angle_input'], st.session_state['x_shift'], st.session_state['y_shift']) - - else: - selected_image = generate_pil_image(selected_image_location) - st.info("Starting Image:") - st.image(selected_image) - else: - st.error("No Starting Image Found") + selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image = add_key_frame_element(timing_details, project_uuid) if st.button(f"Add key frame",type="primary",use_container_width=True): - add_key_frame(selected_image, inherit_styling_settings, how_long_after) + add_key_frame(selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image) st.experimental_rerun() diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 18d91449..ac677132 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -31,7 +31,7 @@ from streamlit_image_comparison import image_comparison -def add_key_frame(selected_image, inherit_styling_settings, how_long_after): +def add_key_frame(selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image): data_repo = DataRepo() project_uuid = st.session_state['project_uuid'] timing_details = data_repo.get_timing_list_from_project(project_uuid) @@ -66,9 +66,8 @@ def add_key_frame(selected_image, inherit_styling_settings, how_long_after): save_uploaded_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, "source") save_uploaded_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, "styled") - if inherit_styling_settings == "Yes": - index = which_stage_for_starting_image or index_of_current_item - clone_styling_settings(index - 1, timing_details[index_of_current_item].uuid) + if inherit_styling_settings == "Yes": + clone_styling_settings(index_of_current_item - 1, timing_details[index_of_current_item].uuid) data_repo.update_specific_timing(timing_details[index_of_current_item].uuid, \ animation_style=project_settings.default_animation_style) diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py new file mode 100644 index 00000000..7a7aafab --- /dev/null +++ b/ui_components/widgets/add_key_frame_element.py @@ -0,0 +1,86 @@ +import streamlit as st + +from utils import st_memory + +from utils.data_repo.data_repo import DataRepo + +from utils.constants import ImageStage +from ui_components.methods.file_methods import generate_pil_image,save_or_host_file +from ui_components.methods.common_methods import apply_image_transformations,zoom_inputs +from PIL import Image + + + +def add_key_frame_element(timing_details, project_uuid): + data_repo = DataRepo() + + timing_details = data_repo.get_timing_list_from_project(project_uuid) + project_settings = data_repo.get_project_setting(project_uuid) + + add1, add2 = st.columns(2) + + with add1: + + selected_image_location = "" + image1,image2 = st.columns(2) + with image1: + source_of_starting_image = st.radio("Where would you like to get the starting image from?", [ + "Previous frame", "Uploaded image"], key="source_of_starting_image") + + which_stage_for_starting_image = None + if source_of_starting_image == "Previous frame": + with image2: + which_stage_for_starting_image = st.radio("Which stage would you like to use?", [ + ImageStage.MAIN_VARIANT.value, ImageStage.SOURCE_IMAGE.value], key="which_stage_for_starting_image", horizontal=True) + which_number_for_starting_image = st.number_input("Which frame would you like to use?", min_value=1, max_value= + max(1, len(timing_details)), value=st.session_state['current_frame_index'], step=1, key="which_number_for_starting_image") + if which_stage_for_starting_image == ImageStage.SOURCE_IMAGE.value: + if timing_details[which_number_for_starting_image - 1].source_image != "": + selected_image_location = timing_details[which_number_for_starting_image - 1].source_image.location + else: + selected_image_location = "" + elif which_stage_for_starting_image == ImageStage.MAIN_VARIANT.value: + selected_image_location = timing_details[which_number_for_starting_image - 1].primary_image_location + elif source_of_starting_image == "Uploaded image": + with image2: + uploaded_image = st.file_uploader( + "Upload an image", type=["png", "jpg", "jpeg"]) + # FILE UPLOAD HANDLE-- + if uploaded_image is not None: + image = Image.open(uploaded_image) + file_location = f"videos/{project_uuid}/assets/frames/1_selected/{uploaded_image.name}" + selected_image_location = save_or_host_file(image, file_location) + selected_image_location = selected_image_location or file_location + else: + selected_image_location = "" + which_number_for_starting_image = st.session_state['current_frame_index'] + + + how_long_after = st.slider( + "How long after the current frame?", min_value=0.0, max_value=10.0, value=2.5, step=0.1) + + radio_text = "Inherit styling settings from the " + ("current frame?" if source_of_starting_image == "Uploaded image" else "selected frame") + inherit_styling_settings = st_memory.radio(radio_text, ["Yes", "No"], \ + key="inherit_styling_settings", horizontal=True, project_settings=project_settings) + + apply_zoom_effects = st_memory.radio("Apply zoom effects to inputted image?", [ + "No","Yes"], key="apply_zoom_effects", horizontal=True, project_settings=project_settings) + + if apply_zoom_effects == "Yes": + zoom_inputs(position='new', horizontal=True) + + selected_image = None + with add2: + if selected_image_location: + if apply_zoom_effects == "Yes": + image_preview = generate_pil_image(selected_image_location) + selected_image = apply_image_transformations(image_preview, st.session_state['zoom_level_input'], st.session_state['rotation_angle_input'], st.session_state['x_shift'], st.session_state['y_shift']) + + else: + selected_image = generate_pil_image(selected_image_location) + st.info("Starting Image:") + st.image(selected_image) + else: + st.error("No Starting Image Found") + + return selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image \ No newline at end of file diff --git a/ui_components/widgets/frame_clip_generation_elements.py b/ui_components/widgets/frame_clip_generation_elements.py index b6a50010..029e3665 100644 --- a/ui_components/widgets/frame_clip_generation_elements.py +++ b/ui_components/widgets/frame_clip_generation_elements.py @@ -139,6 +139,8 @@ def current_preview_video_element(timing_uuid): ''') with preview_video_2: + + st.info("This allows you to preview the video with the surrounding clips attached.") if st.button("Generate New Preview Video", key=f"generate_preview_{idx}"): preview_video = create_full_preview_video( diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index 53572b45..1d2d37be 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -32,7 +32,7 @@ def frame_selector_widget(): st.experimental_rerun() with time2: - single_frame_time_selector(st.session_state['current_frame_uuid'], 'navbar') + single_frame_time_selector(st.session_state['current_frame_uuid'], 'navbar', shift_frames=False) diff --git a/ui_components/widgets/frame_time_selector.py b/ui_components/widgets/frame_time_selector.py index 9e55af58..90e3514e 100644 --- a/ui_components/widgets/frame_time_selector.py +++ b/ui_components/widgets/frame_time_selector.py @@ -4,40 +4,102 @@ from utils.data_repo.data_repo import DataRepo import streamlit as st +def shift_subsequent_frames(timing, time_delta): + data_repo = DataRepo() + timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( + timing.project.uuid) -def single_frame_time_selector(timing_uuid, src): + if time_delta > 0: + for a in range(timing.aux_frame_index + 1, len(timing_details)): + frame = timing_details[a] + # shift them by the difference between the new frame time and the old frame time + new_frame_time = frame.frame_time + time_delta + data_repo.update_specific_timing(frame.uuid, frame_time=new_frame_time, timed_clip_id=None) + +def update_frame_duration(timing_uuid, frame_duration, next_timing, shift_frames): data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + + if next_timing: + # Calculate time_delta before updating next_timing.frame_time + time_delta = frame_duration - (next_timing.frame_time - timing.frame_time) - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) + next_timing.frame_time = timing.frame_time + frame_duration + data_repo.update_specific_timing(next_timing.uuid, frame_time=next_timing.frame_time, timed_clip_id=None) + + if shift_frames: + shift_subsequent_frames(timing, time_delta) - # src is required to create unique widget key - frame_time = st.number_input("Frame time (secs):", min_value=0.0, max_value=100.0, - value=timing.frame_time, step=0.1, key=f"frame_time_{timing.aux_frame_index}_{src}") - if frame_time != timing.frame_time: - update_frame_time(timing_uuid, frame_time) + # updating clip_duration + update_clip_duration_of_all_timing_frames(timing.project.uuid) -def update_frame_time(timing_uuid, frame_time): + st.experimental_rerun() + +def update_frame_time(timing_uuid, frame_time, shift_frames): data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - timing.project.uuid) - + data_repo.update_specific_timing(timing_uuid, frame_time=frame_time, timed_clip_id=None) - # if the frame time of this frame is more than the frame time of the next frame, - # then we need to update the next frame's frame time, and all the frames after that - # - shift them by the difference between the new frame time and the old frame time - next_timing = data_repo.get_next_timing(timing_uuid) - time_delta = (frame_time + timing.clip_duration) - next_timing.frame_time - if next_timing and time_delta > 0: - for a in range(timing.aux_frame_index, len(timing_details)): - frame = timing_details[a] - # shift them by the difference between the new frame time and the old frame time - new_frame_time = frame.frame_time + time_delta - data_repo.update_specific_timing(frame.uuid, frame_time=new_frame_time, timed_clip_id=None) + if shift_frames: + next_timing = data_repo.get_next_timing(timing_uuid) + if next_timing is not None: + time_delta = (frame_time + timing.clip_duration) - next_timing.frame_time + shift_subsequent_frames(timing, time_delta) # updating clip_duration update_clip_duration_of_all_timing_frames(timing.project.uuid) - st.experimental_rerun() \ No newline at end of file + st.experimental_rerun() + + +def single_frame_time_duration_setter(timing_uuid, src, shift_frames=True): + data_repo = DataRepo() + + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + next_timing = data_repo.get_next_timing(timing_uuid) + + # Calculate clip_duration + if next_timing: + clip_duration = next_timing.frame_time - timing.frame_time + else: + clip_duration = 0.0 # or some default value + + max_value = 100.0 if shift_frames else clip_duration + + disable_duration_input = False if next_timing else True + help_text = None if shift_frames else "This will not shift subsequent frames - to do this, go to the motion section and set Shift Frames = True" + frame_duration = st.number_input("Frame duration (secs):", min_value=0.0, max_value=max_value, + value=clip_duration, step=0.1, key=f"frame_duration_{timing.aux_frame_index}_{src}", + disabled=disable_duration_input, help=help_text) + + if frame_duration != clip_duration: + update_frame_duration(timing_uuid, frame_duration, next_timing, shift_frames) + + + +def single_frame_time_selector(timing_uuid, src, shift_frames=True): + data_repo = DataRepo() + + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + + # Get the previous timing object + timing_list: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project(timing.project.uuid) + prev_timing = None + if timing.aux_frame_index > 0: + prev_timing_uuid = timing_list[timing.aux_frame_index - 1].uuid + prev_timing = data_repo.get_timing_from_uuid(prev_timing_uuid) + + # If previous timing exists, use its frame time as min_value, else use 0.0 + min_value = prev_timing.frame_time if prev_timing else 0.0 + + disabled_time_change = True if timing.aux_frame_index == 0 else False + + next_timing = data_repo.get_next_timing(timing_uuid) + max_value = 100.0 if shift_frames else (next_timing.frame_time if next_timing else timing.frame_time) + help_text = None if shift_frames else "This will not shift subsequent frames - to do this, you do this in motion section - set Shift Frames = True" + frame_time = st.number_input("Frame time (secs):", min_value=min_value, max_value=max_value, + value=timing.frame_time, step=0.1, key=f"frame_time_{timing.aux_frame_index}_{src}",disabled=disabled_time_change, help=help_text) + if frame_time != timing.frame_time: + update_frame_time(timing_uuid, frame_time, shift_frames) From a7bc38d7538e00e46a246abed704b8faae9b2e9b Mon Sep 17 00:00:00 2001 From: peter942 Date: Wed, 27 Sep 2023 22:56:49 +0200 Subject: [PATCH 019/164] Creating timeline view --- app.py | 5 +- .../components/frame_styling_page.py | 234 +++---------- ui_components/methods/common_methods.py | 9 + .../widgets/add_key_frame_element.py | 4 +- ui_components/widgets/cropping_element.py | 20 ++ ui_components/widgets/frame_time_selector.py | 8 +- ui_components/widgets/inpainting_element.py | 324 ++++++++++++++++++ ui_components/widgets/list_view.py | 131 +++++++ ui_components/widgets/timeline_view.py | 65 ++++ utils/st_memory.py | 46 ++- 10 files changed, 648 insertions(+), 198 deletions(-) create mode 100644 ui_components/widgets/inpainting_element.py create mode 100644 ui_components/widgets/list_view.py create mode 100644 ui_components/widgets/timeline_view.py diff --git a/app.py b/app.py index 804da72a..d2a8c5a2 100644 --- a/app.py +++ b/app.py @@ -21,6 +21,7 @@ from utils.data_repo.data_repo import DataRepo + if OFFLINE_MODE: SENTRY_DSN = os.getenv('SENTRY_DSN', '') SENTRY_ENV = os.getenv('SENTRY_ENV', '') @@ -28,8 +29,8 @@ import boto3 ssm = boto3.client("ssm", region_name="ap-south-1") - SENTRY_ENV = ssm.get_parameter(Name='/banodoco-fe/sentry/environment')['Parameter']['Value'] - SENTRY_DSN = ssm.get_parameter(Name='/banodoco-fe/sentry/dsn')['Parameter']['Value'] + # SENTRY_ENV = ssm.get_parameter(Name='/banodoco-fe/sentry/environment')['Parameter']['Value'] + # SENTRY_DSN = ssm.get_parameter(Name='/banodoco-fe/sentry/dsn')['Parameter']['Value'] sentry_sdk.init( environment=SENTRY_ENV, diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 523194b7..e55b7281 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -2,23 +2,22 @@ from streamlit_image_comparison import image_comparison import time from PIL import Image -from ui_components.methods.common_methods import delete_frame, drawing_mode, promote_image_variant, save_uploaded_image, \ - create_timings_row_at_frame_number, move_frame, calculate_desired_duration_of_individual_clip, \ - calculate_desired_duration_of_individual_clip, apply_image_transformations, \ - ai_frame_editing_element, clone_styling_settings, zoom_inputs,add_key_frame -from ui_components.methods.file_methods import generate_pil_image, save_or_host_file +from ui_components.methods.common_methods import delete_frame, drawing_mode, ai_frame_editing_element, clone_styling_settings,add_key_frame,jump_to_single_frame_view_button from ui_components.methods.ml_methods import trigger_restyling_process from ui_components.methods.video_methods import create_or_get_single_preview_video -from ui_components.widgets.cropping_element import manual_cropping_element, precision_cropping_element +from ui_components.widgets.cropping_element import manual_cropping_element, precision_cropping_element, cropping_selector_element from ui_components.widgets.frame_clip_generation_elements import current_individual_clip_element, current_preview_video_element, update_animation_style_element -from ui_components.widgets.frame_time_selector import single_frame_time_selector, update_frame_time, single_frame_time_duration_setter +from ui_components.widgets.frame_time_selector import single_frame_time_selector, single_frame_time_duration_setter from ui_components.widgets.frame_selector import frame_selector_widget from ui_components.widgets.image_carousal import display_image from ui_components.widgets.prompt_finder import prompt_finder_element from ui_components.widgets.add_key_frame_element import add_key_frame_element from ui_components.widgets.styling_element import styling_element +from ui_components.widgets.timeline_view import timeline_view from ui_components.widgets.compare_to_other_variants import compare_to_other_variants from ui_components.widgets.animation_style_element import animation_style_element +from ui_components.widgets.inpainting_element import inpainting_element +from ui_components.widgets.list_view import list_view_set_up, page_toggle, styling_list_view,motion_list_view from streamlit_option_menu import option_menu from utils import st_memory @@ -78,22 +77,15 @@ def frame_styling_page(mainheader2, project_uuid: str): frame_selector_widget() if st.session_state['page'] == "Motion": - - - - - idx = st.session_state['current_frame_index'] - 1 - - st.session_state['show_comparison'] = st_memory.radio("Show:", options=["Other Variants", "Preview Video in Context"], horizontal=True, project_settings=project_settings, key="show_comparison_radio_motion") + + st.session_state['show_comparison'] = st_memory.radio("Show:", options=["Other Variants", "Preview Video in Context"], horizontal=True, key="show_comparison_radio_motion") if st.session_state['show_comparison'] == "Other Variants": compare_to_other_variants(timing_details, project_uuid, data_repo,stage="Motion") elif st.session_state['show_comparison'] == "Preview Video in Context": current_preview_video_element(st.session_state['current_frame_uuid']) - - - + st.markdown("***") with st.expander("🎬 Choose Animation Style & Create Variants", expanded=True): @@ -105,11 +97,9 @@ def frame_styling_page(mainheader2, project_uuid: str): elif st.session_state['page'] == "Styling": # carousal_of_images_element(project_uuid, stage=WorkflowStageType.STYLED.value) - comparison_values = [ - "Other Variants", "Source Frame", "Previous & Next Frame", "None"] - - st.session_state['show_comparison'] = st_memory.radio("Show comparison to:", options=comparison_values, horizontal=True, project_settings=project_settings, key="show_comparison_radio") + comparison_values = ["Other Variants", "Source Frame", "Previous & Next Frame", "None"] + st.session_state['show_comparison'] = st_memory.radio("Show comparison to:", options=comparison_values, horizontal=True, key="show_comparison_radio") if st.session_state['show_comparison'] == "Other Variants": compare_to_other_variants(timing_details, project_uuid, data_repo,stage="Styling") @@ -249,194 +239,80 @@ def frame_styling_page(mainheader2, project_uuid: str): prompt_finder_element(project_uuid) elif st.session_state['styling_view'] == "Crop, Move & Rotate Image": - with st.expander("🤏 Crop, Move & Rotate Image", expanded=True): - - selector1, selector2, selector3 = st.columns([1, 1, 1]) - with selector1: - which_stage = st.radio("Which stage to work on?", ["Styled Key Frame", "Unedited Key Frame"], key="which_stage", horizontal=True) - with selector2: - how_to_crop = st_memory.radio("How to crop:", options=["Precision Cropping","Manual Cropping"], project_settings=project_settings, key="how_to_crop",horizontal=True) - - if which_stage == "Styled Key Frame": - stage_name = WorkflowStageType.STYLED.value - elif which_stage == "Unedited Key Frame": - stage_name = WorkflowStageType.SOURCE.value - - if how_to_crop == "Manual Cropping": - manual_cropping_element(stage_name, st.session_state['current_frame_uuid']) - elif how_to_crop == "Precision Cropping": - precision_cropping_element(stage_name, project_uuid) - + with st.expander("🤏 Crop, Move & Rotate Image", expanded=True): + cropping_selector_element(project_uuid) + elif st.session_state['styling_view'] == "Inpainting & BG Removal": with st.expander("🌌 Inpainting, Background Removal & More", expanded=True): - which_stage_to_inpaint = st.radio("Which stage to work on?", ["Styled Key Frame", "Unedited Key Frame"], horizontal=True, key="which_stage_inpainting") - if which_stage_to_inpaint == "Styled Key Frame": - inpainting_stage = WorkflowStageType.STYLED.value - elif which_stage_to_inpaint == "Unedited Key Frame": - inpainting_stage = WorkflowStageType.SOURCE.value - - ai_frame_editing_element(st.session_state['current_frame_uuid'], inpainting_stage) + inpainting_element(st.session_state['current_frame_uuid']) elif st.session_state['styling_view'] == "Draw On Image": with st.expander("📝 Draw On Image", expanded=True): - which_stage_to_draw_on = st.radio("Which stage to work on?", ["Styled Key Frame", "Unedited Key Frame"], horizontal=True, key="which_stage_drawing") + which_stage_to_draw_on = st_memory.radio("Which stage to work on?", ["Styled Key Frame", "Unedited Key Frame"], horizontal=True, key="which_stage_drawing") if which_stage_to_draw_on == "Styled Key Frame": drawing_mode(timing_details,project_settings,project_uuid, stage=WorkflowStageType.STYLED.value) elif which_stage_to_draw_on == "Unedited Key Frame": drawing_mode(timing_details,project_settings,project_uuid, stage=WorkflowStageType.SOURCE.value) + + with st.expander("➕ Add Key Frame", expanded=True): + + selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image = add_key_frame_element(timing_details, project_uuid) + + if st.button(f"Add key frame",type="primary",use_container_width=True): + + add_key_frame(selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image) + st.experimental_rerun() + elif st.session_state['frame_styling_view_type'] == "List View": - if 'current_page' not in st.session_state: - st.session_state['current_page'] = 1 - - if not('index_of_current_page' in st.session_state and st.session_state['index_of_current_page']): - st.session_state['index_of_current_page'] = 1 - - items_per_page = 10 - num_pages = math.ceil(len(timing_details) / items_per_page) + 1 st.markdown("---") header_col_1, header_col_2, header_col_3 = st.columns([1, 5, 1]) - with header_col_1: - st.session_state['current_page'] = st.radio("Select Page:", options=range( - 1, num_pages), horizontal=True, index=st.session_state['index_of_current_page'] - 1, key="page_selection_radio") - with header_col_3: - shift_frames_setting = st.toggle("Shift Frames", help="If set to True, this will shift the frames after your adjustment forward or backwards.") - if st.session_state['current_page'] != st.session_state['index_of_current_page']: - st.session_state['index_of_current_page'] = st.session_state['current_page'] - st.experimental_rerun() - - st.markdown("---") - - start_index = (st.session_state['current_page'] - 1) * items_per_page - end_index = min(start_index + items_per_page, - len(timing_details)) - + with header_col_1: + st.session_state['list_view_type'] = st_memory.radio("View type:", options=["Timeline View","Detailed View"], key="list_view_type_slider") - - if st.session_state['page'] == "Styling": - with st.sidebar: - styling_element(st.session_state['current_frame_uuid'], view_type="List") - - timing_details = data_repo.get_timing_list_from_project(project_uuid) - - for i in range(start_index, end_index): - - - display_number = i + 1 - - st.subheader(f"Frame {display_number}") - image1, image2, image3 = st.columns([2, 3, 2]) - - with image1: - display_image( - timing_uuid=timing_details[i].uuid, stage=WorkflowStageType.SOURCE.value, clickable=False) - - with image2: - display_image( - timing_uuid=timing_details[i].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) - - with image3: - time1, time2 = st.columns([1, 1]) - with time1: - single_frame_time_selector(timing_details[i].uuid, 'sidebar', shift_frames=shift_frames_setting) - single_frame_time_duration_setter(timing_details[i].uuid,'sidebar',shift_frames=shift_frames_setting) - - with time2: - st.write("") - - if st.button(f"Jump to single frame view for #{display_number}"): - st.session_state['prev_frame_index'] = display_number - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid - st.session_state['frame_styling_view_type'] = "Individual View" - st.session_state['change_view_type'] = True - st.experimental_rerun() - - st.markdown("---") - btn1, btn2, btn3 = st.columns([2, 1, 1]) - with btn1: - if st.button("Delete this keyframe", key=f'{i}'): - delete_frame(timing_details[i].uuid) - st.experimental_rerun() - with btn2: - if st.button("⬆️", key=f"Promote {display_number}"): - move_frame("Up", timing_details[i].uuid) - st.experimental_rerun() - with btn3: - if st.button("⬇️", key=f"Demote {display_number}"): - move_frame("Down", timing_details[i].uuid) - st.experimental_rerun() + with header_col_3: + shift_frames_setting = st.toggle("Shift Frames", help="If set to True, it will shift the frames after your adjustment forward by the amount of time you move.") - st.markdown("***") + if st.session_state['list_view_type'] == "Detailed View": + + with header_col_2: + num_pages, items_per_page = list_view_set_up(timing_details, project_uuid) + start_index, end_index = page_toggle(num_pages, items_per_page,project_uuid) - # Display radio buttons for pagination at the bottom st.markdown("***") + + if st.session_state['page'] == "Styling": - # Update the current page in session state - elif st.session_state['page'] == "Motion": - - - + with st.sidebar: + styling_element(st.session_state['current_frame_uuid'], view_type="List") + + styling_list_view(start_index, end_index, shift_frames_setting, project_uuid) - num_timing_details = len(timing_details) - - timing_details = data_repo.get_timing_list_from_project(project_uuid) - - for idx in range(start_index, end_index): - st.header(f"Frame {idx+1}") - timing1, timing2, timing3 = st.columns([1, 1, 1]) - - with timing1: - frame1, frame2,frame3 = st.columns([2,1,2]) - with frame1: - if timing_details[idx].primary_image_location: - st.image( - timing_details[idx].primary_image_location) - with frame2: - st.write("") - st.write("") - st.write("") - st.write("") - st.write("") - st.info(" ➜") - with frame3: - if idx+1 < num_timing_details and timing_details[idx+1].primary_image_location: - st.image(timing_details[idx+1].primary_image_location) - elif idx+1 == num_timing_details: - st.write("") - st.write("") - st.write("") - st.write("") - st.markdown("

FIN

", unsafe_allow_html=True) + st.markdown("***") - single_frame_time_selector(timing_details[idx].uuid, 'motion', shift_frames=shift_frames_setting) + # Update the current page in session state + elif st.session_state['page'] == "Motion": + + motion_list_view(start_index, end_index, shift_frames_setting, project_uuid) - single_frame_time_duration_setter(timing_details[idx].uuid,'motion',shift_frames=shift_frames_setting) + elif st.session_state['list_view_type'] == "Timeline View": - update_animation_style_element(timing_details[idx].uuid) + with header_col_2: + items_per_row = st.slider("How many frames per row?", min_value=1, max_value=10, value=5, step=1, key="items_per_row") + with header_col_3: + expand_all = st_memory.toggle("Expand All", key="expand_all") + if st.session_state['page'] == "Styling": + timeline_view(shift_frames_setting, project_uuid, items_per_row,expand_all,"Styling") + elif st.session_state['page'] == "Motion": + timeline_view(shift_frames_setting, project_uuid, items_per_row,expand_all,"Motion") - if timing_details[idx].aux_frame_index != len(timing_details) - 1: - with timing2: - current_individual_clip_element(timing_details[idx].uuid) - with timing3: - current_preview_video_element(timing_details[idx].uuid) - st.markdown("***") - - st.markdown("***") - with st.expander("➕ Add Key Frame", expanded=True): - - selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image = add_key_frame_element(timing_details, project_uuid) - - if st.button(f"Add key frame",type="primary",use_container_width=True): - - add_key_frame(selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image) - st.experimental_rerun() diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index ac677132..8a066aa4 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -31,6 +31,15 @@ from streamlit_image_comparison import image_comparison +def jump_to_single_frame_view_button(display_number, timing_details): + if st.button(f"Jump to #{display_number}"): + st.session_state['prev_frame_index'] = display_number + st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid + st.session_state['frame_styling_view_type'] = "Individual View" + st.session_state['change_view_type'] = True + st.experimental_rerun() + + def add_key_frame(selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image): data_repo = DataRepo() project_uuid = st.session_state['project_uuid'] diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index 7a7aafab..885fbd67 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -61,10 +61,10 @@ def add_key_frame_element(timing_details, project_uuid): radio_text = "Inherit styling settings from the " + ("current frame?" if source_of_starting_image == "Uploaded image" else "selected frame") inherit_styling_settings = st_memory.radio(radio_text, ["Yes", "No"], \ - key="inherit_styling_settings", horizontal=True, project_settings=project_settings) + key="inherit_styling_settings", horizontal=True) apply_zoom_effects = st_memory.radio("Apply zoom effects to inputted image?", [ - "No","Yes"], key="apply_zoom_effects", horizontal=True, project_settings=project_settings) + "No","Yes"], key="apply_zoom_effects", horizontal=True) if apply_zoom_effects == "Yes": zoom_inputs(position='new', horizontal=True) diff --git a/ui_components/widgets/cropping_element.py b/ui_components/widgets/cropping_element.py index e085a8da..18522406 100644 --- a/ui_components/widgets/cropping_element.py +++ b/ui_components/widgets/cropping_element.py @@ -14,6 +14,25 @@ from ui_components.models import InternalProjectObject, InternalSettingObject from utils.data_repo.data_repo import DataRepo +from utils import st_memory + + +def cropping_selector_element(project_uuid): + selector1, selector2, selector3 = st.columns([1, 1, 1]) + with selector1: + which_stage = st_memory.radio("Which stage to work on?", ["Styled Key Frame", "Unedited Key Frame"], key="which_stage", horizontal=True) + with selector2: + how_to_crop = st_memory.radio("How to crop:", options=["Precision Cropping","Manual Cropping"], key="how_to_crop",horizontal=True) + + if which_stage == "Styled Key Frame": + stage_name = WorkflowStageType.STYLED.value + elif which_stage == "Unedited Key Frame": + stage_name = WorkflowStageType.SOURCE.value + + if how_to_crop == "Manual Cropping": + manual_cropping_element(stage_name, st.session_state['current_frame_uuid']) + elif how_to_crop == "Precision Cropping": + precision_cropping_element(stage_name, project_uuid) def precision_cropping_element(stage, project_uuid): data_repo = DataRepo() @@ -92,6 +111,7 @@ def get_working_image(): if 'working_image' not in st.session_state or st.session_state['current_working_image_number'] != st.session_state['current_frame_index'] or st.session_state['current_stage'] != stage: get_working_image() + st.experimental_rerun() options1, options2, option3, option4 = st.columns([3, 1, 1, 1]) with options1: diff --git a/ui_components/widgets/frame_time_selector.py b/ui_components/widgets/frame_time_selector.py index 90e3514e..84b87fa2 100644 --- a/ui_components/widgets/frame_time_selector.py +++ b/ui_components/widgets/frame_time_selector.py @@ -68,8 +68,8 @@ def single_frame_time_duration_setter(timing_uuid, src, shift_frames=True): max_value = 100.0 if shift_frames else clip_duration disable_duration_input = False if next_timing else True - help_text = None if shift_frames else "This will not shift subsequent frames - to do this, go to the motion section and set Shift Frames = True" - frame_duration = st.number_input("Frame duration (secs):", min_value=0.0, max_value=max_value, + help_text = None if shift_frames else "This will not shift subsequent frames - to do this, go to the Bulk View and set Shift Frames = True" + frame_duration = st.number_input("Duration:", min_value=0.0, max_value=max_value, value=clip_duration, step=0.1, key=f"frame_duration_{timing.aux_frame_index}_{src}", disabled=disable_duration_input, help=help_text) @@ -98,8 +98,8 @@ def single_frame_time_selector(timing_uuid, src, shift_frames=True): next_timing = data_repo.get_next_timing(timing_uuid) max_value = 100.0 if shift_frames else (next_timing.frame_time if next_timing else timing.frame_time) - help_text = None if shift_frames else "This will not shift subsequent frames - to do this, you do this in motion section - set Shift Frames = True" - frame_time = st.number_input("Frame time (secs):", min_value=min_value, max_value=max_value, + help_text = None if shift_frames else "This will not shift subsequent frames - to do this, go to the Bulk View and set Shift Frames = True" + frame_time = st.number_input("Time:", min_value=min_value, max_value=max_value, value=timing.frame_time, step=0.1, key=f"frame_time_{timing.aux_frame_index}_{src}",disabled=disabled_time_change, help=help_text) if frame_time != timing.frame_time: update_frame_time(timing_uuid, frame_time, shift_frames) diff --git a/ui_components/widgets/inpainting_element.py b/ui_components/widgets/inpainting_element.py new file mode 100644 index 00000000..b2ed299a --- /dev/null +++ b/ui_components/widgets/inpainting_element.py @@ -0,0 +1,324 @@ + +import os +import time +from io import BytesIO +from typing import List +import requests as r +from PIL import Image +import streamlit as st +from streamlit_drawable_canvas import st_canvas +from ui_components.constants import WorkflowStageType +from utils.data_repo.data_repo import DataRepo + +from utils import st_memory +from utils.data_repo.data_repo import DataRepo +from utils import st_memory +from ui_components.methods.common_methods import execute_image_edit +from ui_components.models import InternalFrameTimingObject, InternalSettingObject +from streamlit_image_comparison import image_comparison + + +def inpainting_element(timing_uuid): + + which_stage_to_inpaint = st_memory.radio("Which stage to work on?", ["Styled Key Frame", "Unedited Key Frame"], horizontal=True, key="which_stage_inpainting") + + if which_stage_to_inpaint == "Styled Key Frame": + stage = WorkflowStageType.STYLED.value + elif which_stage_to_inpaint == "Unedited Key Frame": + stage = WorkflowStageType.SOURCE.value + data_repo = DataRepo() + timing = data_repo.get_timing_from_uuid(timing_uuid) + timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( + timing.project.uuid) + project_settings: InternalSettingObject = data_repo.get_project_setting( + timing.project.uuid) + + if len(timing_details) == 0: + st.info("You need to add key frames first in the Key Frame Selection section.") + else: + main_col_1, main_col_2 = st.columns([1, 2]) + + with main_col_1: + st.write("") + + # initiative value + if "current_frame_uuid" not in st.session_state: + st.session_state['current_frame_uuid'] = timing_details[0].uuid + + + if "edited_image" not in st.session_state: + st.session_state.edited_image = "" + + if stage == WorkflowStageType.STYLED.value and len(timing.alternative_images_list) == 0: + st.info("You need to add a style first in the Style Selection section.") + else: + if stage == WorkflowStageType.SOURCE.value: + editing_image = timing.source_image.location + elif stage == WorkflowStageType.STYLED.value: + variants = timing.alternative_images_list + editing_image = timing.primary_image_location + + width = int(project_settings.width) + height = int(project_settings.height) + + if editing_image == "": + st.error( + f"You don't have a {stage} image yet so you can't edit it.") + else: + with main_col_1: + if 'index_of_type_of_mask_selection' not in st.session_state: + st.session_state['index_of_type_of_mask_selection'] = 0 + mask_selection_options = ["Manual Background Selection", "Automated Background Selection", + "Automated Layer Selection", "Re-Use Previous Mask", "Invert Previous Mask"] + type_of_mask_selection = st.radio("How would you like to select what to edit?", mask_selection_options, + horizontal=True, index=st.session_state['index_of_type_of_mask_selection']) + if st.session_state['index_of_type_of_mask_selection'] != mask_selection_options.index(type_of_mask_selection): + st.session_state['index_of_type_of_mask_selection'] = mask_selection_options.index( + type_of_mask_selection) + st.experimental_rerun() + + if "which_layer" not in st.session_state: + st.session_state['which_layer'] = "Background" + st.session_state['which_layer_index'] = 0 + + if type_of_mask_selection == "Automated Layer Selection": + layers = ["Background", "Middleground", "Foreground"] + st.session_state['which_layer'] = st.multiselect( + "Which layers would you like to replace?", layers) + + if type_of_mask_selection == "Manual Background Selection": + if st.session_state['edited_image'] == "": + with main_col_1: + if editing_image.startswith("http"): + canvas_image = r.get(editing_image) + canvas_image = Image.open( + BytesIO(canvas_image.content)) + else: + canvas_image = Image.open(editing_image) + if 'drawing_input' not in st.session_state: + st.session_state['drawing_input'] = 'Magic shapes 🪄' + col1, col2 = st.columns([6, 3]) + + with col1: + st.session_state['drawing_input'] = st.radio( + "Drawing tool:", + ("Make shapes 🪄", "Move shapes 🏋🏾‍♂️", "Make squares □", "Draw lines ✏️"), horizontal=True, + ) + + if st.session_state['drawing_input'] == "Move shapes 🏋🏾‍♂️": + drawing_mode = "transform" + st.info( + "To delete something, just move it outside of the image! 🥴") + elif st.session_state['drawing_input'] == "Make shapes 🪄": + drawing_mode = "polygon" + st.info("To end a shape, right click!") + elif st.session_state['drawing_input'] == "Draw lines ✏️": + drawing_mode = "freedraw" + st.info("To draw, draw! ") + elif st.session_state['drawing_input'] == "Make squares □": + drawing_mode = "rect" + + with col2: + if drawing_mode == "freedraw": + stroke_width = st.slider( + "Stroke width: ", 1, 25, 12) + else: + stroke_width = 3 + + with main_col_2: + + realtime_update = True + + canvas_result = st_canvas( + fill_color="rgba(0, 0, 0)", + stroke_width=stroke_width, + stroke_color="rgba(0, 0, 0)", + background_color="rgb(255, 255, 255)", + background_image=canvas_image, + update_streamlit=realtime_update, + height=height, + width=width, + drawing_mode=drawing_mode, + display_toolbar=True, + key="full_app", + ) + + if 'image_created' not in st.session_state: + st.session_state['image_created'] = 'no' + + if canvas_result.image_data is not None: + img_data = canvas_result.image_data + im = Image.fromarray( + img_data.astype("uint8"), mode="RGBA") + create_or_update_mask( + st.session_state['current_frame_uuid'], im) + else: + image_file = data_repo.get_file_from_uuid(st.session_state['edited_image']) + image_comparison( + img1=editing_image, + img2=image_file.location, starting_position=5, label1="Original", label2="Edited") + if st.button("Reset Canvas"): + st.session_state['edited_image'] = "" + st.experimental_rerun() + + elif type_of_mask_selection == "Automated Background Selection" or type_of_mask_selection == "Automated Layer Selection" or type_of_mask_selection == "Re-Use Previous Mask" or type_of_mask_selection == "Invert Previous Mask": + with main_col_1: + if type_of_mask_selection in ["Re-Use Previous Mask", "Invert Previous Mask"]: + if not timing_details[st.session_state['current_frame_index'] - 1].mask: + st.info( + "You don't have a previous mask to re-use.") + else: + mask1, mask2 = st.columns([2, 1]) + with mask1: + if type_of_mask_selection == "Re-Use Previous Mask": + st.info( + "This will update the **black pixels** in the mask with the pixels from the image you are editing.") + elif type_of_mask_selection == "Invert Previous Mask": + st.info( + "This will update the **white pixels** in the mask with the pixels from the image you are editing.") + st.image( + timing_details[st.session_state['current_frame_index'] - 1].mask.location, use_column_width=True) + + with main_col_2: + if st.session_state['edited_image'] == "": + st.image(editing_image, use_column_width=True) + else: + image_file = data_repo.get_file_from_uuid(st.session_state['edited_image']) + image_comparison( + img1=editing_image, + img2=image_file.location, starting_position=5, label1="Original", label2="Edited") + if st.button("Reset Canvas"): + st.session_state['edited_image'] = "" + st.experimental_rerun() + + with main_col_1: + + if "type_of_mask_replacement" not in st.session_state: + st.session_state["type_of_mask_replacement"] = "Replace With Image" + st.session_state["index_of_type_of_mask_replacement"] = 0 + + types_of_mask_replacement = [ + "Inpainting", "Replace With Image"] + st.session_state["type_of_mask_replacement"] = st.radio( + "Select type of edit", types_of_mask_replacement, horizontal=True, index=st.session_state["index_of_type_of_mask_replacement"]) + + if st.session_state["index_of_type_of_mask_replacement"] != types_of_mask_replacement.index(st.session_state["type_of_mask_replacement"]): + st.session_state["index_of_type_of_mask_replacement"] = types_of_mask_replacement.index( + st.session_state["type_of_mask_replacement"]) + st.experimental_rerun() + + if st.session_state["type_of_mask_replacement"] == "Replace With Image": + prompt = "" + negative_prompt = "" + background_list = [f for f in os.listdir( + f'videos/{timing.project.uuid}/assets/resources/backgrounds') if f.endswith('.png')] + background_list = [f for f in os.listdir( + f'videos/{timing.project.uuid}/assets/resources/backgrounds') if f.endswith('.png')] + sources_of_images = ["Uploaded", "From Other Frame"] + if 'index_of_source_of_image' not in st.session_state: + st.session_state['index_of_source_of_image'] = 0 + source_of_image = st.radio("Select type of image", sources_of_images, + horizontal=True, index=st.session_state['index_of_source_of_image']) + + if st.session_state['index_of_source_of_image'] != sources_of_images.index(source_of_image): + st.session_state['index_of_source_of_image'] = sources_of_images.index( + source_of_image) + st.experimental_rerun() + + if source_of_image == "Uploaded": + btn1, btn2 = st.columns([1, 1]) + with btn1: + uploaded_files = st.file_uploader( + "Add more background images here", accept_multiple_files=True) + if st.button("Upload Backgrounds"): + for uploaded_file in uploaded_files: + with open(os.path.join(f"videos/{timing.project.uuid}/assets/resources/backgrounds", uploaded_file.name), "wb") as f: + f.write(uploaded_file.getbuffer()) + st.success( + "Your backgrounds are uploaded file - they should appear in the dropdown.") + background_list.append( + uploaded_file.name) + time.sleep(1.5) + st.experimental_rerun() + with btn2: + background_selection = st.selectbox( + "Range background", background_list) + background_image = f'videos/{timing.project.uuid}/assets/resources/backgrounds/{background_selection}' + if background_list != []: + st.image(f"{background_image}", + use_column_width=True) + elif source_of_image == "From Other Frame": + btn1, btn2 = st.columns([1, 1]) + with btn1: + which_stage_to_use = st.radio( + "Select stage to use:", WorkflowStageType.value_list()) + which_image_to_use = st.number_input( + "Select image to use:", min_value=0, max_value=len(timing_details)-1, value=0) + if which_stage_to_use == WorkflowStageType.SOURCE.value: + background_image = timing_details[which_image_to_use].source_image.location + + elif which_stage_to_use == WorkflowStageType.STYLED.value: + background_image = timing_details[which_image_to_use].primary_image_location + with btn2: + st.image(background_image, + use_column_width=True) + + elif st.session_state["type_of_mask_replacement"] == "Inpainting": + btn1, btn2 = st.columns([1, 1]) + with btn1: + prompt = st.text_area("Prompt:", help="Describe the whole image, but focus on the details you want changed!", + value=project_settings.default_prompt) + with btn2: + negative_prompt = st.text_area( + "Negative Prompt:", help="Enter any things you want to make the model avoid!", value=project_settings.default_negative_prompt) + + edit1, edit2 = st.columns(2) + + with edit1: + if st.button(f'Run Edit On Current Image'): + if st.session_state["type_of_mask_replacement"] == "Inpainting": + edited_image = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], + "", editing_image, prompt, negative_prompt, width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) + st.session_state['edited_image'] = edited_image.uuid + elif st.session_state["type_of_mask_replacement"] == "Replace With Image": + edited_image = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], + background_image, editing_image, "", "", width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) + st.session_state['edited_image'] = edited_image.uuid + st.experimental_rerun() + + with edit2: + if st.session_state['edited_image'] != "": + if st.button("Promote Last Edit", type="primary"): + if stage == WorkflowStageType.SOURCE.value: + data_repo.update_specific_timing( + st.session_state['current_frame_uuid'], source_image_id=st.session_state['edited_image']) + elif stage == WorkflowStageType.STYLED.value: + number_of_image_variants = add_image_variant( + st.session_state['edited_image'], st.session_state['current_frame_uuid']) + promote_image_variant( + st.session_state['current_frame_uuid'], number_of_image_variants - 1) + st.session_state['edited_image'] = "" + st.experimental_rerun() + else: + if st.button("Run Edit & Promote"): + if st.session_state["type_of_mask_replacement"] == "Inpainting": + edited_image = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], + "", editing_image, prompt, negative_prompt, width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) + st.session_state['edited_image'] = edited_image.uuid + elif st.session_state["type_of_mask_replacement"] == "Replace With Image": + edited_image = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], + background_image, editing_image, "", "", width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) + st.session_state['edited_image'] = edited_image.uuid + + if stage == WorkflowStageType.SOURCE.value: + data_repo.update_specific_timing( + st.session_state['current_frame_uuid'], source_image_id=st.session_state['edited_image']) + elif stage == WorkflowStageType.STYLED.value: + number_of_image_variants = add_image_variant( + edited_image.uuid, st.session_state['current_frame_uuid']) + promote_image_variant( + st.session_state['current_frame_uuid'], number_of_image_variants - 1) + + st.session_state['edited_image'] = "" + st.success("Image promoted!") + st.experimental_rerun() \ No newline at end of file diff --git a/ui_components/widgets/list_view.py b/ui_components/widgets/list_view.py new file mode 100644 index 00000000..66a1fc91 --- /dev/null +++ b/ui_components/widgets/list_view.py @@ -0,0 +1,131 @@ +import streamlit as st +from ui_components.constants import WorkflowStageType +from utils.data_repo.data_repo import DataRepo +from ui_components.widgets.frame_time_selector import single_frame_time_selector, single_frame_time_duration_setter +from ui_components.widgets.image_carousal import display_image +from ui_components.methods.common_methods import delete_frame, move_frame,jump_to_single_frame_view_button +import math +from utils.data_repo.data_repo import DataRepo +from ui_components.methods.common_methods import delete_frame +from ui_components.widgets.frame_clip_generation_elements import current_individual_clip_element, current_preview_video_element, update_animation_style_element +from ui_components.widgets.frame_time_selector import single_frame_time_selector, single_frame_time_duration_setter +from ui_components.widgets.image_carousal import display_image +from ui_components.widgets.frame_clip_generation_elements import current_individual_clip_element, current_preview_video_element +from utils.data_repo.data_repo import DataRepo + +def list_view_set_up(timing_details,project_uuid): + data_repo = DataRepo() + + timing_details = data_repo.get_timing_list_from_project(project_uuid) + if 'current_page' not in st.session_state: + st.session_state['current_page'] = 1 + + if not('index_of_current_page' in st.session_state and st.session_state['index_of_current_page']): + st.session_state['index_of_current_page'] = 1 + + items_per_page = 10 + num_pages = math.ceil(len(timing_details) / items_per_page) + 1 + + return num_pages, items_per_page + +def page_toggle(num_pages, items_per_page, project_uuid): + data_repo = DataRepo() + timing_details = data_repo.get_timing_list_from_project(project_uuid) + + st.session_state['current_page'] = st.radio("Select Page:", options=range( + 1, num_pages), horizontal=True, index=st.session_state['index_of_current_page'] - 1, key="page_selection_radio") + if st.session_state['current_page'] != st.session_state['index_of_current_page']: + st.session_state['index_of_current_page'] = st.session_state['current_page'] + st.experimental_rerun() + + start_index = (st.session_state['current_page'] - 1) * items_per_page + end_index = min(start_index + items_per_page,len(timing_details)) + + return start_index, end_index + +def styling_list_view(start_index, end_index, shift_frames_setting, project_uuid): + data_repo = DataRepo() + timing_details = data_repo.get_timing_list_from_project(project_uuid) + for i in range(start_index, end_index): + display_number = i + 1 + st.subheader(f"Frame {display_number}") + image1, image2, image3 = st.columns([2, 3, 2]) + + with image1: + display_image(timing_uuid=timing_details[i].uuid, stage=WorkflowStageType.SOURCE.value, clickable=False) + + with image2: + display_image(timing_uuid=timing_details[i].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) + + with image3: + time1, time2 = st.columns([1, 1]) + with time1: + single_frame_time_selector(timing_details[i].uuid, 'sidebar', shift_frames=shift_frames_setting) + single_frame_time_duration_setter(timing_details[i].uuid,'sidebar',shift_frames=shift_frames_setting) + + with time2: + st.write("") + + + jump_to_single_frame_view_button(display_number,timing_details) + + st.markdown("---") + btn1, btn2, btn3 = st.columns([2, 1, 1]) + with btn1: + if st.button("Delete this keyframe", key=f'{i}'): + delete_frame(timing_details[i].uuid) + st.experimental_rerun() + with btn2: + if st.button("⬆️", key=f"Promote {display_number}"): + move_frame("Up", timing_details[i].uuid) + st.experimental_rerun() + with btn3: + if st.button("⬇️", key=f"Demote {display_number}"): + move_frame("Down", timing_details[i].uuid) + st.experimental_rerun() + + st.markdown("***") + +def motion_list_view(start_index, end_index, shift_frames_setting, project_uuid): + data_repo = DataRepo() + timing_details = data_repo.get_timing_list_from_project(project_uuid) + num_timing_details = len(timing_details) + timing_details = data_repo.get_timing_list_from_project(project_uuid) + + for idx in range(start_index, end_index): + st.header(f"Frame {idx+1}") + timing1, timing2, timing3 = st.columns([1, 1, 1]) + + with timing1: + frame1, frame2, frame3 = st.columns([2, 1, 2]) + with frame1: + if timing_details[idx].primary_image_location: + st.image(timing_details[idx].primary_image_location) + with frame2: + st.write("") + st.write("") + st.write("") + st.write("") + st.write("") + st.info(" ➜") + with frame3: + if idx+1 < num_timing_details and timing_details[idx+1].primary_image_location: + st.image(timing_details[idx+1].primary_image_location) + elif idx+1 == num_timing_details: + st.write("") + st.write("") + st.write("") + st.write("") + st.markdown("

FIN

", unsafe_allow_html=True) + + single_frame_time_selector(timing_details[idx].uuid, 'motion', shift_frames=shift_frames_setting) + single_frame_time_duration_setter(timing_details[idx].uuid, 'motion', shift_frames=shift_frames_setting) + update_animation_style_element(timing_details[idx].uuid) + + if timing_details[idx].aux_frame_index != len(timing_details) - 1: + with timing2: + current_individual_clip_element(timing_details[idx].uuid) + with timing3: + current_preview_video_element(timing_details[idx].uuid) + + st.markdown("***") \ No newline at end of file diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py new file mode 100644 index 00000000..5ac519b0 --- /dev/null +++ b/ui_components/widgets/timeline_view.py @@ -0,0 +1,65 @@ +import streamlit as st +from ui_components.methods.common_methods import delete_frame, jump_to_single_frame_view_button, move_frame +from ui_components.widgets.frame_time_selector import single_frame_time_selector, single_frame_time_duration_setter +from ui_components.widgets.image_carousal import display_image +from utils.data_repo.data_repo import DataRepo +from ui_components.widgets.frame_clip_generation_elements import update_animation_style_element +from ui_components.constants import WorkflowStageType + +def timeline_view_buttons(i, j, timing_details, items_per_row): + if items_per_row > 6: + jump_to_single_frame_view_button(i + j + 1, timing_details) + st.markdown("***") + btn1, btn2, btn3 = st.columns([1, 1, 1]) + with btn1: + st.button("⬅️", key=f"move_frame_back_{i + j + 1}", help="Move frame back") + with btn2: + st.button("➡️", key=f"move_frame_forward_{i + j + 1}", help="Move frame forward") + with btn3: + st.button("🗑️", key=f"delete_frame_{i + j + 1}", help="Delete frame") + else: + btn1, btn2, btn3, btn4 = st.columns([1.7, 1, 1, 1]) + with btn1: + jump_to_single_frame_view_button(i + j + 1, timing_details) + with btn2: + st.button("⬅️", key=f"move_frame_back_{i + j + 1}", help="Move frame back") + with btn3: + st.button("➡️", key=f"move_frame_forward_{i + j + 1}", help="Move frame forward") + with btn4: + st.button("🗑️", key=f"delete_frame_{i + j + 1}", help="Delete frame") + + +def timeline_view(shift_frames_setting, project_uuid, items_per_row, expand_all, stage='Styling'): + data_repo = DataRepo() + timing = data_repo.get_timing_list_from_project(project_uuid)[0] + timing_details = data_repo.get_timing_list_from_project(project_uuid) + for i in range(0, len(timing_details), items_per_row): # Step of items_per_row for grid + grid = st.columns(items_per_row) # Create items_per_row columns for grid + for j in range(items_per_row): + if i + j < len(timing_details): # Check if index is within range + with grid[j]: + display_number = i + j + 1 + if stage == 'Styling': + display_image(timing_uuid=timing_details[i + j].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) + elif stage == 'Motion': + if timing.timed_clip: + st.video(timing.timed_clip.location) + else: + st.error("No video found for this frame.") + with st.expander(f'Frame {display_number}', expanded=expand_all): + single_frame_time_selector(timing_details[i + j].uuid, 'motion', shift_frames=shift_frames_setting) + single_frame_time_duration_setter(timing_details[i + j].uuid, 'motion', shift_frames=shift_frames_setting) + update_animation_style_element(timing_details[i + j].uuid) + timeline_view_buttons(i, j, timing_details, items_per_row) + # if move_frame_back button is clicked + if st.session_state[f"move_frame_back_{i + j + 1}"]: + move_frame("Up", timing_details[i + j].uuid) + st.experimental_rerun() + if st.session_state[f"move_frame_forward_{i + j + 1}"]: + move_frame("Down", timing_details[i + j].uuid) + st.experimental_rerun() + if st.session_state[f"delete_frame_{i + j + 1}"]: + delete_frame(timing_details[i + j].uuid) + st.experimental_rerun() + + st.markdown("***") \ No newline at end of file diff --git a/utils/st_memory.py b/utils/st_memory.py index f4e89188..d24ffb5f 100644 --- a/utils/st_memory.py +++ b/utils/st_memory.py @@ -6,22 +6,15 @@ logger = AppLogger() # TODO: custom elements must be stateless and completely separate from our code logic -def radio(label, options, index=0, key=None, help=None, on_change=None, disabled=False, horizontal=False, label_visibility="visible", default_value=0, project_settings=None): - if key not in st.session_state: - if not getattr(project_settings, key, default_value): - st.session_state[key] = default_value - else: - st.session_state[key] = options.index(getattr(project_settings, key, default_value)) +def radio(label, options, index=0, key=None, help=None, on_change=None, disabled=False, horizontal=False, label_visibility="visible", default_value=0): + + if key not in st.session_state: + st.session_state[key] = default_value - # logger.log(LoggingType.DEBUG, "radio session_state key: " + str(st.session_state[key]) + " type: " + str(type(st.session_state[key]))) selection = st.radio(label=label, options=options, index=st.session_state[key], horizontal=horizontal, label_visibility=label_visibility) if options.index(selection) != st.session_state[key]: st.session_state[key] = options.index(selection) - # if getattr(project_settings, key) != None: - # data_repo = DataRepo() - # data_repo.update_project_setting(project_settings.project.uuid, key=selection) - # pass st.experimental_rerun() return selection @@ -46,3 +39,34 @@ def number_input(label, min_value=None, max_value=None, value=None, step=None, f return selection +def select_slider(label, options=(), value=None, format_func=None, key=None, help=None, on_change=None, args=None, kwargs=None, *, disabled=False, label_visibility="visible", default_value=None, project_settings=None): + if key not in st.session_state: + if getattr(project_settings, key, default_value): + st.session_state[key] = getattr(project_settings, key, default_value) + else: + st.session_state[key] = default_value + + selection = st.select_slider(label, options, st.session_state[key], format_func, key, help, on_change, disabled, label_visibility) + + if selection != st.session_state[key]: + st.session_state[key] = selection + if getattr(project_settings, key, default_value): + data_repo = DataRepo() + data_repo.update_project_setting(project_settings.project.uuid, key=value) + st.experimental_rerun() + + return selection + + + +def toggle(label, key=None, help=None, on_change=None, disabled=False, label_visibility="visible", default_value=False): + if key not in st.session_state: + st.session_state[key] = default_value + + selection = st.toggle(label=label, value=st.session_state[key], help=help, on_change=on_change, disabled=disabled, label_visibility=label_visibility) + + if selection != st.session_state[key]: + st.session_state[key] = selection + st.experimental_rerun() + + return selection \ No newline at end of file From d933b2eb7083a3f503d4ef0b98d7cb0a5f884c5c Mon Sep 17 00:00:00 2001 From: peter942 Date: Thu, 28 Sep 2023 01:56:29 +0200 Subject: [PATCH 020/164] Lots --- .../components/frame_styling_page.py | 115 +--- ui_components/methods/common_methods.py | 588 +++--------------- ui_components/setup.py | 24 +- ui_components/widgets/drawing_element.py | 254 ++++++++ ui_components/widgets/inpainting_element.py | 2 +- utils/st_memory.py | 15 + 6 files changed, 381 insertions(+), 617 deletions(-) create mode 100644 ui_components/widgets/drawing_element.py diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index e55b7281..bd325fcd 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -1,13 +1,12 @@ import streamlit as st -from streamlit_image_comparison import image_comparison + import time from PIL import Image -from ui_components.methods.common_methods import delete_frame, drawing_mode, ai_frame_editing_element, clone_styling_settings,add_key_frame,jump_to_single_frame_view_button +from ui_components.methods.common_methods import add_key_frame,compare_to_previous_and_next_frame,compare_to_source_frame,style_cloning_element from ui_components.methods.ml_methods import trigger_restyling_process -from ui_components.methods.video_methods import create_or_get_single_preview_video -from ui_components.widgets.cropping_element import manual_cropping_element, precision_cropping_element, cropping_selector_element -from ui_components.widgets.frame_clip_generation_elements import current_individual_clip_element, current_preview_video_element, update_animation_style_element -from ui_components.widgets.frame_time_selector import single_frame_time_selector, single_frame_time_duration_setter + +from ui_components.widgets.cropping_element import cropping_selector_element +from ui_components.widgets.frame_clip_generation_elements import current_preview_video_element, update_animation_style_element from ui_components.widgets.frame_selector import frame_selector_widget from ui_components.widgets.image_carousal import display_image from ui_components.widgets.prompt_finder import prompt_finder_element @@ -17,6 +16,7 @@ from ui_components.widgets.compare_to_other_variants import compare_to_other_variants from ui_components.widgets.animation_style_element import animation_style_element from ui_components.widgets.inpainting_element import inpainting_element +from ui_components.widgets.drawing_element import drawing_element from ui_components.widgets.list_view import list_view_set_up, page_toggle, styling_list_view,motion_list_view from streamlit_option_menu import option_menu from utils import st_memory @@ -96,78 +96,29 @@ def frame_styling_page(mainheader2, project_uuid: str): elif st.session_state['page'] == "Styling": - # carousal_of_images_element(project_uuid, stage=WorkflowStageType.STYLED.value) - comparison_values = ["Other Variants", "Source Frame", "Previous & Next Frame", "None"] - - st.session_state['show_comparison'] = st_memory.radio("Show comparison to:", options=comparison_values, horizontal=True, key="show_comparison_radio") + # carousal_of_images_element(project_uuid, stage=WorkflowStageType.STYLED.value) + st.session_state['show_comparison'] = st_memory.radio("Show comparison to:", options=["Other Variants", "Source Frame", "Previous & Next Frame", "None"], horizontal=True, key="show_comparison_radio") if st.session_state['show_comparison'] == "Other Variants": compare_to_other_variants(timing_details, project_uuid, data_repo,stage="Styling") elif st.session_state['show_comparison'] == "Source Frame": - if timing_details[st.session_state['current_frame_index']- 1].primary_image: - img2 = timing_details[st.session_state['current_frame_index'] - 1].primary_image_location - else: - img2 = 'https://i.ibb.co/GHVfjP0/Image-Not-Yet-Created.png' - - img1 = timing_details[st.session_state['current_frame_index'] - 1].source_image.location if timing_details[st.session_state['current_frame_index'] - 1].source_image else 'https://i.ibb.co/GHVfjP0/Image-Not-Yet-Created.png' - - image_comparison(starting_position=50, - img1=img1, - img2=img2, make_responsive=False, label1=WorkflowStageType.SOURCE.value, label2=WorkflowStageType.STYLED.value) + compare_to_source_frame(timing_details) elif st.session_state['show_comparison'] == "Previous & Next Frame": - mainimages1, mainimages2, mainimages3 = st.columns([1, 1, 1]) - - with mainimages1: - if st.session_state['current_frame_index'] - 2 >= 0: - previous_image = data_repo.get_timing_from_frame_number(project_uuid, frame_number=st.session_state['current_frame_index'] - 2) - st.info(f"Previous image") - display_image( - timing_uuid=previous_image.uuid, stage=WorkflowStageType.STYLED.value, clickable=False) - - if st.button(f"Preview Interpolation From #{st.session_state['current_frame_index']-1} to #{st.session_state['current_frame_index']}", key=f"Preview Interpolation From #{st.session_state['current_frame_index']-1} to #{st.session_state['current_frame_index']}", use_container_width=True): - prev_frame_timing = data_repo.get_prev_timing(st.session_state['current_frame_uuid']) - create_or_get_single_preview_video(prev_frame_timing.uuid) - prev_frame_timing = data_repo.get_timing_from_uuid(prev_frame_timing.uuid) - st.video(prev_frame_timing.timed_clip.location) - - with mainimages2: - st.success(f"Current image") - display_image( - timing_uuid=st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) - - with mainimages3: - if st.session_state['current_frame_index'] + 1 <= len(timing_details): - next_image = data_repo.get_timing_from_frame_number(project_uuid, frame_number=st.session_state['current_frame_index']) - st.info(f"Next image") - display_image(timing_uuid=next_image.uuid, stage=WorkflowStageType.STYLED.value, clickable=False) - - if st.button(f"Preview Interpolation From #{st.session_state['current_frame_index']} to #{st.session_state['current_frame_index']+1}", key=f"Preview Interpolation From #{st.session_state['current_frame_index']} to #{st.session_state['current_frame_index']+1}", use_container_width=True): - create_or_get_single_preview_video( - st.session_state['current_frame_uuid']) - current_frame = data_repo.get_timing_from_uuid(st.session_state['current_frame_uuid']) - st.video(current_frame.timed_clip.location) + compare_to_previous_and_next_frame(project_uuid,timing_details) elif st.session_state['show_comparison'] == "None": - display_image( - timing_uuid=st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) - st.markdown("***") + display_image(timing_uuid=st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) - if 'styling_view_index' not in st.session_state: - st.session_state['styling_view_index'] = 0 - st.session_state['change_styling_view_type'] = False - + st.markdown("***") + styling_views = ["Generate Variants", "Crop, Move & Rotate Image", "Inpainting & BG Removal","Draw On Image"] - - st.session_state['styling_view'] = option_menu(None, styling_views, icons=['magic', 'crop', "paint-bucket", 'pencil'], menu_icon="cast", default_index=st.session_state['styling_view_index'], key="styling_view_selector", orientation="horizontal", styles={ - "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "#66A9BE"}}) - - if st.session_state['styling_view_index'] != styling_views.index(st.session_state['styling_view']): - st.session_state['styling_view_index'] = styling_views.index(st.session_state['styling_view']) - + + st.session_state['styling_view'] = st_memory.menu('',styling_views, icons=['magic', 'crop', "paint-bucket", 'pencil'], menu_icon="cast", default_index=st.session_state.get('styling_view_index', 0), key="styling_view_selector", orientation="horizontal", styles={"nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "#66A9BE"}}) + if st.session_state['styling_view'] == "Generate Variants": with st.expander("🛠️ Generate Variants + Prompt Settings", expanded=True): @@ -181,8 +132,6 @@ def frame_styling_page(mainheader2, project_uuid: str): f"How many variants?", min_value=1, max_value=100, key=f"number_of_variants_{st.session_state['current_frame_index']}") with detail2: - st.write("") - st.write("") # TODO: add custom model validation such for sd img2img the value of strength can only be 1 if st.button(f"Generate variants", key=f"new_variations_{st.session_state['current_frame_index']}", help="This will generate new variants based on the settings to the left."): @@ -213,27 +162,7 @@ def frame_styling_page(mainheader2, project_uuid: str): st.markdown("***") - open_copier = st.checkbox( - "Copy styling settings from another frame") - if open_copier is True: - copy1, copy2 = st.columns([1, 1]) - with copy1: - which_frame_to_copy_from = st.number_input("Which frame would you like to copy styling settings from?", min_value=1, max_value=len( - timing_details), value=st.session_state['current_frame_index'], step=1) - if st.button("Copy styling settings from this frame"): - clone_styling_settings(which_frame_to_copy_from - 1, st.session_state['current_frame_uuid']) - st.experimental_rerun() - - with copy2: - display_image( - idx=which_frame_to_copy_from, stage=WorkflowStageType.STYLED.value, clickable=False, timing_details=timing_details) - st.caption("Prompt:") - st.caption( - timing_details[which_frame_to_copy_from].prompt) - if timing_details[which_frame_to_copy_from].model is not None: - st.caption("Model:") - st.caption( - timing_details[which_frame_to_copy_from].model.name) + style_cloning_element(timing_details) with st.expander("🔍 Prompt Finder"): prompt_finder_element(project_uuid) @@ -250,12 +179,10 @@ def frame_styling_page(mainheader2, project_uuid: str): elif st.session_state['styling_view'] == "Draw On Image": with st.expander("📝 Draw On Image", expanded=True): - - which_stage_to_draw_on = st_memory.radio("Which stage to work on?", ["Styled Key Frame", "Unedited Key Frame"], horizontal=True, key="which_stage_drawing") - if which_stage_to_draw_on == "Styled Key Frame": - drawing_mode(timing_details,project_settings,project_uuid, stage=WorkflowStageType.STYLED.value) - elif which_stage_to_draw_on == "Unedited Key Frame": - drawing_mode(timing_details,project_settings,project_uuid, stage=WorkflowStageType.SOURCE.value) + + drawing_element(timing_details,project_settings,project_uuid) + + with st.expander("➕ Add Key Frame", expanded=True): diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 8a066aa4..91c7c0e1 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -1,7 +1,6 @@ import io from typing import List import streamlit as st -from streamlit_drawable_canvas import st_canvas import os from PIL import Image, ImageDraw, ImageOps, ImageFilter from moviepy.editor import * @@ -25,11 +24,84 @@ from utils.data_repo.data_repo import DataRepo from shared.constants import AnimationStyleType +from ui_components.widgets.image_carousal import display_image +from streamlit_image_comparison import image_comparison + from ui_components.models import InternalFileObject from typing import Union -from streamlit_image_comparison import image_comparison +def compare_to_source_frame(timing_details): + if timing_details[st.session_state['current_frame_index']- 1].primary_image: + img2 = timing_details[st.session_state['current_frame_index'] - 1].primary_image_location + else: + img2 = 'https://i.ibb.co/GHVfjP0/Image-Not-Yet-Created.png' + + img1 = timing_details[st.session_state['current_frame_index'] - 1].source_image.location if timing_details[st.session_state['current_frame_index'] - 1].source_image else 'https://i.ibb.co/GHVfjP0/Image-Not-Yet-Created.png' + + image_comparison(starting_position=50, + img1=img1, + img2=img2, make_responsive=False, label1=WorkflowStageType.SOURCE.value, label2=WorkflowStageType.STYLED.value) + + + +def compare_to_previous_and_next_frame(project_uuid, timing_details): + data_repo = DataRepo() + mainimages1, mainimages2, mainimages3 = st.columns([1, 1, 1]) + + with mainimages1: + if st.session_state['current_frame_index'] - 2 >= 0: + previous_image = data_repo.get_timing_from_frame_number(project_uuid, frame_number=st.session_state['current_frame_index'] - 2) + st.info(f"Previous image:") + display_image( + timing_uuid=previous_image.uuid, stage=WorkflowStageType.STYLED.value, clickable=False) + + if st.button(f"Preview Interpolation From #{st.session_state['current_frame_index']-1} to #{st.session_state['current_frame_index']}", key=f"Preview Interpolation From #{st.session_state['current_frame_index']-1} to #{st.session_state['current_frame_index']}", use_container_width=True): + prev_frame_timing = data_repo.get_prev_timing(st.session_state['current_frame_uuid']) + create_or_get_single_preview_video(prev_frame_timing.uuid) + prev_frame_timing = data_repo.get_timing_from_uuid(prev_frame_timing.uuid) + st.video(prev_frame_timing.timed_clip.location) + + with mainimages2: + st.success(f"Current image:") + display_image( + timing_uuid=st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) + + with mainimages3: + if st.session_state['current_frame_index'] + 1 <= len(timing_details): + next_image = data_repo.get_timing_from_frame_number(project_uuid, frame_number=st.session_state['current_frame_index']) + st.info(f"Next image") + display_image(timing_uuid=next_image.uuid, stage=WorkflowStageType.STYLED.value, clickable=False) + + if st.button(f"Preview Interpolation From #{st.session_state['current_frame_index']} to #{st.session_state['current_frame_index']+1}", key=f"Preview Interpolation From #{st.session_state['current_frame_index']} to #{st.session_state['current_frame_index']+1}", use_container_width=True): + create_or_get_single_preview_video( + st.session_state['current_frame_uuid']) + current_frame = data_repo.get_timing_from_uuid(st.session_state['current_frame_uuid']) + st.video(current_frame.timed_clip.location) + + + +def style_cloning_element(timing_details): + open_copier = st.checkbox("Copy styling settings from another frame") + if open_copier is True: + copy1, copy2 = st.columns([1, 1]) + with copy1: + which_frame_to_copy_from = st.number_input("Which frame would you like to copy styling settings from?", min_value=1, max_value=len( + timing_details), value=st.session_state['current_frame_index'], step=1) + if st.button("Copy styling settings from this frame"): + clone_styling_settings(which_frame_to_copy_from - 1, st.session_state['current_frame_uuid']) + st.experimental_rerun() + + with copy2: + display_image( + idx=which_frame_to_copy_from, stage=WorkflowStageType.STYLED.value, clickable=False, timing_details=timing_details) + st.caption("Prompt:") + st.caption( + timing_details[which_frame_to_copy_from].prompt) + if timing_details[which_frame_to_copy_from].model is not None: + st.caption("Model:") + st.caption( + timing_details[which_frame_to_copy_from].model.name) def jump_to_single_frame_view_button(display_number, timing_details): if st.button(f"Jump to #{display_number}"): @@ -394,303 +466,7 @@ def reset_zoom_element(): st.session_state['y_shift'] = 0 st.experimental_rerun() -def ai_frame_editing_element(timing_uuid, stage=WorkflowStageType.SOURCE.value): - data_repo = DataRepo() - timing = data_repo.get_timing_from_uuid(timing_uuid) - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - timing.project.uuid) - project_settings: InternalSettingObject = data_repo.get_project_setting( - timing.project.uuid) - - if len(timing_details) == 0: - st.info("You need to add key frames first in the Key Frame Selection section.") - else: - main_col_1, main_col_2 = st.columns([1, 2]) - - with main_col_1: - st.write("") - - # initiative value - if "current_frame_uuid" not in st.session_state: - st.session_state['current_frame_uuid'] = timing_details[0].uuid - - - if "edited_image" not in st.session_state: - st.session_state.edited_image = "" - - if stage == WorkflowStageType.STYLED.value and len(timing.alternative_images_list) == 0: - st.info("You need to add a style first in the Style Selection section.") - else: - if stage == WorkflowStageType.SOURCE.value: - editing_image = timing.source_image.location - elif stage == WorkflowStageType.STYLED.value: - variants = timing.alternative_images_list - editing_image = timing.primary_image_location - - width = int(project_settings.width) - height = int(project_settings.height) - - if editing_image == "": - st.error( - f"You don't have a {stage} image yet so you can't edit it.") - else: - with main_col_1: - if 'index_of_type_of_mask_selection' not in st.session_state: - st.session_state['index_of_type_of_mask_selection'] = 0 - mask_selection_options = ["Manual Background Selection", "Automated Background Selection", - "Automated Layer Selection", "Re-Use Previous Mask", "Invert Previous Mask"] - type_of_mask_selection = st.radio("How would you like to select what to edit?", mask_selection_options, - horizontal=True, index=st.session_state['index_of_type_of_mask_selection']) - if st.session_state['index_of_type_of_mask_selection'] != mask_selection_options.index(type_of_mask_selection): - st.session_state['index_of_type_of_mask_selection'] = mask_selection_options.index( - type_of_mask_selection) - st.experimental_rerun() - - if "which_layer" not in st.session_state: - st.session_state['which_layer'] = "Background" - st.session_state['which_layer_index'] = 0 - - if type_of_mask_selection == "Automated Layer Selection": - layers = ["Background", "Middleground", "Foreground"] - st.session_state['which_layer'] = st.multiselect( - "Which layers would you like to replace?", layers) - - if type_of_mask_selection == "Manual Background Selection": - if st.session_state['edited_image'] == "": - with main_col_1: - if editing_image.startswith("http"): - canvas_image = r.get(editing_image) - canvas_image = Image.open( - BytesIO(canvas_image.content)) - else: - canvas_image = Image.open(editing_image) - if 'drawing_input' not in st.session_state: - st.session_state['drawing_input'] = 'Magic shapes 🪄' - col1, col2 = st.columns([6, 3]) - - with col1: - st.session_state['drawing_input'] = st.radio( - "Drawing tool:", - ("Make shapes 🪄", "Move shapes 🏋🏾‍♂️", "Make squares □", "Draw lines ✏️"), horizontal=True, - ) - - if st.session_state['drawing_input'] == "Move shapes 🏋🏾‍♂️": - drawing_mode = "transform" - st.info( - "To delete something, just move it outside of the image! 🥴") - elif st.session_state['drawing_input'] == "Make shapes 🪄": - drawing_mode = "polygon" - st.info("To end a shape, right click!") - elif st.session_state['drawing_input'] == "Draw lines ✏️": - drawing_mode = "freedraw" - st.info("To draw, draw! ") - elif st.session_state['drawing_input'] == "Make squares □": - drawing_mode = "rect" - - with col2: - if drawing_mode == "freedraw": - stroke_width = st.slider( - "Stroke width: ", 1, 25, 12) - else: - stroke_width = 3 - - with main_col_2: - - realtime_update = True - - canvas_result = st_canvas( - fill_color="rgba(0, 0, 0)", - stroke_width=stroke_width, - stroke_color="rgba(0, 0, 0)", - background_color="rgb(255, 255, 255)", - background_image=canvas_image, - update_streamlit=realtime_update, - height=height, - width=width, - drawing_mode=drawing_mode, - display_toolbar=True, - key="full_app", - ) - - if 'image_created' not in st.session_state: - st.session_state['image_created'] = 'no' - - if canvas_result.image_data is not None: - img_data = canvas_result.image_data - im = Image.fromarray( - img_data.astype("uint8"), mode="RGBA") - create_or_update_mask( - st.session_state['current_frame_uuid'], im) - else: - image_file = data_repo.get_file_from_uuid(st.session_state['edited_image']) - image_comparison( - img1=editing_image, - img2=image_file.location, starting_position=5, label1="Original", label2="Edited") - if st.button("Reset Canvas"): - st.session_state['edited_image'] = "" - st.experimental_rerun() - - elif type_of_mask_selection == "Automated Background Selection" or type_of_mask_selection == "Automated Layer Selection" or type_of_mask_selection == "Re-Use Previous Mask" or type_of_mask_selection == "Invert Previous Mask": - with main_col_1: - if type_of_mask_selection in ["Re-Use Previous Mask", "Invert Previous Mask"]: - if not timing_details[st.session_state['current_frame_index'] - 1].mask: - st.info( - "You don't have a previous mask to re-use.") - else: - mask1, mask2 = st.columns([2, 1]) - with mask1: - if type_of_mask_selection == "Re-Use Previous Mask": - st.info( - "This will update the **black pixels** in the mask with the pixels from the image you are editing.") - elif type_of_mask_selection == "Invert Previous Mask": - st.info( - "This will update the **white pixels** in the mask with the pixels from the image you are editing.") - st.image( - timing_details[st.session_state['current_frame_index'] - 1].mask.location, use_column_width=True) - - with main_col_2: - if st.session_state['edited_image'] == "": - st.image(editing_image, use_column_width=True) - else: - image_file = data_repo.get_file_from_uuid(st.session_state['edited_image']) - image_comparison( - img1=editing_image, - img2=image_file.location, starting_position=5, label1="Original", label2="Edited") - if st.button("Reset Canvas"): - st.session_state['edited_image'] = "" - st.experimental_rerun() - - with main_col_1: - - if "type_of_mask_replacement" not in st.session_state: - st.session_state["type_of_mask_replacement"] = "Replace With Image" - st.session_state["index_of_type_of_mask_replacement"] = 0 - - types_of_mask_replacement = [ - "Inpainting", "Replace With Image"] - st.session_state["type_of_mask_replacement"] = st.radio( - "Select type of edit", types_of_mask_replacement, horizontal=True, index=st.session_state["index_of_type_of_mask_replacement"]) - - if st.session_state["index_of_type_of_mask_replacement"] != types_of_mask_replacement.index(st.session_state["type_of_mask_replacement"]): - st.session_state["index_of_type_of_mask_replacement"] = types_of_mask_replacement.index( - st.session_state["type_of_mask_replacement"]) - st.experimental_rerun() - if st.session_state["type_of_mask_replacement"] == "Replace With Image": - prompt = "" - negative_prompt = "" - background_list = [f for f in os.listdir( - f'videos/{timing.project.uuid}/assets/resources/backgrounds') if f.endswith('.png')] - background_list = [f for f in os.listdir( - f'videos/{timing.project.uuid}/assets/resources/backgrounds') if f.endswith('.png')] - sources_of_images = ["Uploaded", "From Other Frame"] - if 'index_of_source_of_image' not in st.session_state: - st.session_state['index_of_source_of_image'] = 0 - source_of_image = st.radio("Select type of image", sources_of_images, - horizontal=True, index=st.session_state['index_of_source_of_image']) - - if st.session_state['index_of_source_of_image'] != sources_of_images.index(source_of_image): - st.session_state['index_of_source_of_image'] = sources_of_images.index( - source_of_image) - st.experimental_rerun() - - if source_of_image == "Uploaded": - btn1, btn2 = st.columns([1, 1]) - with btn1: - uploaded_files = st.file_uploader( - "Add more background images here", accept_multiple_files=True) - if st.button("Upload Backgrounds"): - for uploaded_file in uploaded_files: - with open(os.path.join(f"videos/{timing.project.uuid}/assets/resources/backgrounds", uploaded_file.name), "wb") as f: - f.write(uploaded_file.getbuffer()) - st.success( - "Your backgrounds are uploaded file - they should appear in the dropdown.") - background_list.append( - uploaded_file.name) - time.sleep(1.5) - st.experimental_rerun() - with btn2: - background_selection = st.selectbox( - "Range background", background_list) - background_image = f'videos/{timing.project.uuid}/assets/resources/backgrounds/{background_selection}' - if background_list != []: - st.image(f"{background_image}", - use_column_width=True) - elif source_of_image == "From Other Frame": - btn1, btn2 = st.columns([1, 1]) - with btn1: - which_stage_to_use = st.radio( - "Select stage to use:", WorkflowStageType.value_list()) - which_image_to_use = st.number_input( - "Select image to use:", min_value=0, max_value=len(timing_details)-1, value=0) - if which_stage_to_use == WorkflowStageType.SOURCE.value: - background_image = timing_details[which_image_to_use].source_image.location - - elif which_stage_to_use == WorkflowStageType.STYLED.value: - background_image = timing_details[which_image_to_use].primary_image_location - with btn2: - st.image(background_image, - use_column_width=True) - - elif st.session_state["type_of_mask_replacement"] == "Inpainting": - btn1, btn2 = st.columns([1, 1]) - with btn1: - prompt = st.text_area("Prompt:", help="Describe the whole image, but focus on the details you want changed!", - value=project_settings.default_prompt) - with btn2: - negative_prompt = st.text_area( - "Negative Prompt:", help="Enter any things you want to make the model avoid!", value=project_settings.default_negative_prompt) - - edit1, edit2 = st.columns(2) - - with edit1: - if st.button(f'Run Edit On Current Image'): - if st.session_state["type_of_mask_replacement"] == "Inpainting": - edited_image = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], - "", editing_image, prompt, negative_prompt, width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) - st.session_state['edited_image'] = edited_image.uuid - elif st.session_state["type_of_mask_replacement"] == "Replace With Image": - edited_image = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], - background_image, editing_image, "", "", width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) - st.session_state['edited_image'] = edited_image.uuid - st.experimental_rerun() - - with edit2: - if st.session_state['edited_image'] != "": - if st.button("Promote Last Edit", type="primary"): - if stage == WorkflowStageType.SOURCE.value: - data_repo.update_specific_timing( - st.session_state['current_frame_uuid'], source_image_id=st.session_state['edited_image']) - elif stage == WorkflowStageType.STYLED.value: - number_of_image_variants = add_image_variant( - st.session_state['edited_image'], st.session_state['current_frame_uuid']) - promote_image_variant( - st.session_state['current_frame_uuid'], number_of_image_variants - 1) - st.session_state['edited_image'] = "" - st.experimental_rerun() - else: - if st.button("Run Edit & Promote"): - if st.session_state["type_of_mask_replacement"] == "Inpainting": - edited_image = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], - "", editing_image, prompt, negative_prompt, width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) - st.session_state['edited_image'] = edited_image.uuid - elif st.session_state["type_of_mask_replacement"] == "Replace With Image": - edited_image = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], - background_image, editing_image, "", "", width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) - st.session_state['edited_image'] = edited_image.uuid - - if stage == WorkflowStageType.SOURCE.value: - data_repo.update_specific_timing( - st.session_state['current_frame_uuid'], source_image_id=st.session_state['edited_image']) - elif stage == WorkflowStageType.STYLED.value: - number_of_image_variants = add_image_variant( - edited_image.uuid, st.session_state['current_frame_uuid']) - promote_image_variant( - st.session_state['current_frame_uuid'], number_of_image_variants - 1) - - st.session_state['edited_image'] = "" - st.success("Image promoted!") - st.experimental_rerun() # cropped_img here is a PIL image object def inpaint_in_black_space_element(cropped_img, project_uuid, stage=WorkflowStageType.SOURCE.value): @@ -811,13 +587,18 @@ def move_frame(direction, timing_uuid): if direction == "Up": if timing.aux_frame_index == 0: + st.error("This is the first frame") + time.sleep(1) return data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index - 1) elif direction == "Down": + timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) if timing.aux_frame_index == len(timing_list) - 1: + st.error("This is the last frame") + time.sleep(1) return data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index + 1) @@ -839,6 +620,11 @@ def delete_frame(timing_uuid): data_repo.update_specific_timing( next_timing.uuid, timed_clip_id=None) + # If the deleted frame is the first one, set the time of the next frame to 0.00 + if timing.aux_frame_index == 0 and next_timing: + data_repo.update_specific_timing( + next_timing.uuid, frame_time=0.00) + data_repo.delete_timing_from_uuid(timing.uuid) if timing.aux_frame_index == len(timing_details) - 1: @@ -1024,213 +810,7 @@ def create_or_update_mask(timing_uuid, image) -> InternalFileObject: return timing.mask.location # adds the image file in variant (alternative images) list -def drawing_mode(timing_details,project_settings,project_uuid,stage=WorkflowStageType.STYLED.value): - - data_repo = DataRepo() - - canvas1, canvas2 = st.columns([1, 1.5]) - timing = data_repo.get_timing_from_uuid( - st.session_state['current_frame_uuid']) - - with canvas1: - width = int(project_settings.width) - height = int(project_settings.height) - - if timing.source_image and timing.source_image.location != "": - if timing.source_image.location.startswith("http"): - canvas_image = r.get( - timing.source_image.location) - canvas_image = Image.open( - BytesIO(canvas_image.content)) - else: - canvas_image = Image.open( - timing.source_image.location) - else: - canvas_image = Image.new( - "RGB", (width, height), "white") - if 'drawing_input' not in st.session_state: - st.session_state['drawing_input'] = 'Magic shapes 🪄' - col1, col2 = st.columns([6, 5]) - - with col1: - st.session_state['drawing_input'] = st.radio( - "Drawing tool:", - ("Draw lines ✏️", "Erase Lines ❌", "Make shapes 🪄", "Move shapes 🏋🏾‍♂️", "Make Lines ║", "Make squares □"), horizontal=True, - ) - - if st.session_state['drawing_input'] == "Move shapes 🏋🏾‍♂️": - drawing_mode = "transform" - - elif st.session_state['drawing_input'] == "Make shapes 🪄": - drawing_mode = "polygon" - - elif st.session_state['drawing_input'] == "Draw lines ✏️": - drawing_mode = "freedraw" - - elif st.session_state['drawing_input'] == "Erase Lines ❌": - drawing_mode = "freedraw" - - elif st.session_state['drawing_input'] == "Make Lines ║": - drawing_mode = "line" - - elif st.session_state['drawing_input'] == "Make squares □": - drawing_mode = "rect" - - - with col2: - - stroke_width = st.slider( - "Stroke width: ", 1, 100, 2) - if st.session_state['drawing_input'] == "Erase Lines ❌": - stroke_colour = "#ffffff" - else: - stroke_colour = st.color_picker( - "Stroke color hex: ", value="#000000") - fill = st.checkbox("Fill shapes", value=False) - if fill == True: - fill_color = st.color_picker( - "Fill color hex: ") - else: - fill_color = "" - - - st.markdown("***") - - - threshold1, threshold2 = st.columns([1, 1]) - with threshold1: - low_threshold = st.number_input( - "Low Threshold", min_value=0, max_value=255, value=100, step=1) - with threshold2: - high_threshold = st.number_input( - "High Threshold", min_value=0, max_value=255, value=200, step=1) - - if 'canny_image' not in st.session_state: - st.session_state['canny_image'] = None - - if st.button("Extract Canny From image"): - if stage == WorkflowStageType.SOURCE.value: - image_path = timing_details[st.session_state['current_frame_index'] - 1].source_image.location - - elif stage == WorkflowStageType.STYLED.value: - image_path = timing_details[st.session_state['current_frame_index'] - 1].primary_image_location - - - canny_image = extract_canny_lines( - image_path, project_uuid, low_threshold, high_threshold) - - st.session_state['canny_image'] = canny_image.uuid - if st.session_state['canny_image']: - canny_image = data_repo.get_file_from_uuid(st.session_state['canny_image']) - - canny_action_1, canny_action_2 = st.columns([2, 1]) - with canny_action_1: - st.image(canny_image.location) - - if st.button(f"Make Into Guidance Image"): - data_repo.update_specific_timing(st.session_state['current_frame_uuid'], source_image_id=st.session_state['canny_image']) - st.session_state['reset_canvas'] = True - st.session_state['canny_image'] = None - st.experimental_rerun() - - with canvas2: - realtime_update = True - - if "reset_canvas" not in st.session_state: - st.session_state['reset_canvas'] = False - - if st.session_state['reset_canvas'] != True: - canvas_result = st_canvas( - fill_color=fill_color, - stroke_width=stroke_width, - stroke_color=stroke_colour, - background_color="rgb(255, 255, 255)", - background_image=canvas_image, - update_streamlit=realtime_update, - height=height, - width=width, - drawing_mode=drawing_mode, - display_toolbar=True, - key="full_app_draw", - ) - - if 'image_created' not in st.session_state: - st.session_state['image_created'] = 'no' - - if canvas_result.image_data is not None: - img_data = canvas_result.image_data - im = Image.fromarray( - img_data.astype("uint8"), mode="RGBA") - else: - st.session_state['reset_canvas'] = False - canvas_result = st_canvas() - time.sleep(0.1) - st.experimental_rerun() - if canvas_result is not None: - st.write("You can save the image below") - if canvas_result.json_data is not None and not canvas_result.json_data.get('objects'): - st.button("Save New Image", key="save_canvas", disabled=True, help="Draw something first") - else: - if st.button("Save New Image", key="save_canvas_active",type="primary"): - if canvas_result.image_data is not None: - # overlay the canvas image on top of the canny image and save the result - # if canny image is from a url, then we need to download it first - if timing.source_image and timing.source_image.location: - if timing.source_image.location.startswith("http"): - canny_image = r.get( - timing.source_image.location) - canny_image = Image.open( - BytesIO(canny_image.content)) - else: - canny_image = Image.open( - timing.source_image.location) - else: - canny_image = Image.new( - "RGB", (width, height), "white") - - canny_image = canny_image.convert("RGBA") - # canvas_image = canvas_image.convert("RGBA") - canvas_image = im - canvas_image = canvas_image.convert("RGBA") - - # converting the images to the same size and mode - if canny_image.size != canvas_image.size: - canny_image = canny_image.resize( - canvas_image.size) - - if canny_image.mode != canvas_image.mode: - canny_image = canny_image.convert( - canvas_image.mode) - - new_canny_image = Image.alpha_composite( - canny_image, canvas_image) - if new_canny_image.mode != "RGB": - new_canny_image = new_canny_image.convert( - "RGB") - - unique_file_name = str(uuid.uuid4()) + ".png" - file_location = f"videos/{timing.project.uuid}/assets/resources/masks/{unique_file_name}" - hosted_url = save_or_host_file(new_canny_image, file_location) - file_data = { - "name": str(uuid.uuid4()) + ".png", - "type": InternalFileType.IMAGE.value, - "project_id": project_uuid - } - - if hosted_url: - file_data.update({'hosted_url': hosted_url}) - else: - file_data.update({'local_path': file_location}) - - canny_image = data_repo.create_file( - **file_data) - data_repo.update_specific_timing( - st.session_state['current_frame_uuid'], source_image_id=canny_image.uuid) - st.success("New Canny Image Saved") - st.session_state['reset_canvas'] = True - time.sleep(1) - st.experimental_rerun() def add_image_variant(image_file_uuid: str, timing_uuid: str): data_repo = DataRepo() diff --git a/ui_components/setup.py b/ui_components/setup.py index b37889da..94a4d182 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -13,6 +13,7 @@ from streamlit_option_menu import option_menu from ui_components.models import InternalAppSettingObject from utils.common_utils import create_working_assets, get_current_user_uuid, reset_project_state +from utils import st_memory from utils.data_repo.data_repo import DataRepo @@ -33,34 +34,21 @@ def setup_app_ui(): sections = ["Open Project", "App Settings", "New Project"] - if "section" not in st.session_state: - st.session_state["section"] = sections[0] - st.session_state['change_section'] = False - - if st.session_state['change_section'] == True: - st.session_state['section_index'] = sections.index( - st.session_state["section"]) - else: - st.session_state['section_index'] = None - with h2: st.write("") - st.session_state["section"] = option_menu( + st.session_state["section"] = st_memory.menu( "", sections, icons=['cog', 'cog', 'cog'], menu_icon="ellipsis-v", - orientation="horizontal", + default_index=st.session_state.get('section_index', 0), key="app_settings", + orientation="horizontal", styles={ "nav-link": {"font-size": "12px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "grey"} - }, - manual_select=st.session_state['section_index'] + } ) - - if st.session_state['change_section'] == True: - st.session_state['change_section'] = False project_list = data_repo.get_all_project_list( user_id=get_current_user_uuid()) @@ -118,7 +106,7 @@ def setup_app_ui(): with st.sidebar: main_view_types = ["Creative Process", "Tools & Settings", "Video Rendering"] - st.session_state['main_view_type'] = option_menu(None, main_view_types, icons=['search-heart', 'tools', "play-circle", 'stopwatch'], menu_icon="cast", default_index=0, key="main_view_type_name", orientation="horizontal", styles={ + st.session_state['main_view_type'] = st_memory.menu(None, main_view_types, icons=['search-heart', 'tools', "play-circle", 'stopwatch'], menu_icon="cast", default_index=0, key="main_view_type_name", orientation="horizontal", styles={ "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "red"}}) mainheader1, mainheader2 = st.columns([3, 2]) diff --git a/ui_components/widgets/drawing_element.py b/ui_components/widgets/drawing_element.py new file mode 100644 index 00000000..92e891cf --- /dev/null +++ b/ui_components/widgets/drawing_element.py @@ -0,0 +1,254 @@ +from io import BytesIO +import uuid +import json +import time +import streamlit as st +from PIL import Image +from streamlit_drawable_canvas import st_canvas +from ui_components.constants import WorkflowStageType +from utils.data_repo.data_repo import DataRepo +from ui_components.methods.common_methods import extract_canny_lines +from shared.constants import InternalFileType +from ui_components.methods.file_methods import save_or_host_file + + +from utils import st_memory + + +def drawing_element(timing_details,project_settings,project_uuid,stage=WorkflowStageType.STYLED.value): + + which_stage_to_draw_on = st_memory.radio("Which stage to work on?", ["Styled Image", "Guidance Image"], horizontal=True, key="which_stage_drawing") + + if which_stage_to_draw_on == "Styled Image": + stage=WorkflowStageType.STYLED.value + elif which_stage_to_draw_on == "Guidance Image": + stage=WorkflowStageType.SOURCE.value + + if stage == WorkflowStageType.SOURCE.value: + image_path = timing_details[st.session_state['current_frame_index'] - 1].source_image.location + elif stage == WorkflowStageType.STYLED.value: + image_path = timing_details[st.session_state['current_frame_index'] - 1].primary_image_location + data_repo = DataRepo() + + canvas1, canvas2 = st.columns([1, 1.5]) + timing = data_repo.get_timing_from_uuid( + st.session_state['current_frame_uuid']) + + with canvas1: + width = int(project_settings.width) + height = int(project_settings.height) + + if timing.source_image and timing.source_image.location != "": + if timing.source_image.location.startswith("http"): + canvas_image = r.get( + timing.source_image.location) + canvas_image = Image.open( + BytesIO(canvas_image.content)) + else: + canvas_image = Image.open( + image_path) + else: + canvas_image = Image.new( + "RGB", (width, height), "white") + if 'drawing_input' not in st.session_state: + st.session_state['drawing_input'] = 'Magic shapes 🪄' + col1, col2 = st.columns([6, 5]) + + with col1: + st.session_state['drawing_input'] = st.radio( + "Drawing tool:", + ("Draw lines ✏️", "Erase Lines ❌", "Make shapes 🪄", "Move shapes 🏋🏾‍♂️", "Make Lines ║", "Make squares □"), horizontal=True, + ) + + if st.session_state['drawing_input'] == "Move shapes 🏋🏾‍♂️": + drawing_mode = "transform" + + elif st.session_state['drawing_input'] == "Make shapes 🪄": + drawing_mode = "polygon" + + elif st.session_state['drawing_input'] == "Draw lines ✏️": + drawing_mode = "freedraw" + + elif st.session_state['drawing_input'] == "Erase Lines ❌": + drawing_mode = "freedraw" + + elif st.session_state['drawing_input'] == "Make Lines ║": + drawing_mode = "line" + + elif st.session_state['drawing_input'] == "Make squares □": + drawing_mode = "rect" + + + with col2: + + stroke_width = st.slider( + "Stroke width: ", 1, 100, 2) + if st.session_state['drawing_input'] == "Erase Lines ❌": + stroke_colour = "#ffffff" + else: + stroke_colour = st.color_picker( + "Stroke color hex: ", value="#000000") + fill = st.checkbox("Fill shapes", value=False) + if fill == True: + fill_color = st.color_picker( + "Fill color hex: ") + else: + fill_color = "" + + + st.markdown("***") + + + threshold1, threshold2 = st.columns([1, 1]) + with threshold1: + low_threshold = st.number_input( + "Low Threshold", min_value=0, max_value=255, value=100, step=1) + with threshold2: + high_threshold = st.number_input( + "High Threshold", min_value=0, max_value=255, value=200, step=1) + + if 'canny_image' not in st.session_state: + st.session_state['canny_image'] = None + + if st.button("Extract Canny From image"): + if stage == WorkflowStageType.SOURCE.value: + image_path = timing_details[st.session_state['current_frame_index'] - 1].source_image.location + + elif stage == WorkflowStageType.STYLED.value: + image_path = timing_details[st.session_state['current_frame_index'] - 1].primary_image_location + + + canny_image = extract_canny_lines( + image_path, project_uuid, low_threshold, high_threshold) + + st.session_state['canny_image'] = canny_image.uuid + + if st.session_state['canny_image']: + canny_image = data_repo.get_file_from_uuid(st.session_state['canny_image']) + + canny_action_1, canny_action_2 = st.columns([2, 1]) + with canny_action_1: + st.image(canny_image.location) + + if st.button(f"Make Into Guidance Image"): + data_repo.update_specific_timing(st.session_state['current_frame_uuid'], source_image_id=st.session_state['canny_image']) + st.session_state['reset_canvas'] = True + st.session_state['canny_image'] = None + st.experimental_rerun() + + with canvas2: + realtime_update = True + + if "reset_canvas" not in st.session_state: + st.session_state['reset_canvas'] = False + + if st.session_state['reset_canvas'] != True: + canvas_result = st_canvas( + fill_color=fill_color, + stroke_width=stroke_width, + stroke_color=stroke_colour, + background_color="rgb(255, 255, 255)", + background_image=canvas_image, + update_streamlit=realtime_update, + height=height, + width=width, + drawing_mode=drawing_mode, + display_toolbar=True, + key="full_app_draw", + ) + + if 'image_created' not in st.session_state: + st.session_state['image_created'] = 'no' + + if canvas_result.image_data is not None: + img_data = canvas_result.image_data + im = Image.fromarray( + img_data.astype("uint8"), mode="RGBA") + else: + st.session_state['reset_canvas'] = False + canvas_result = st_canvas() + time.sleep(0.1) + st.experimental_rerun() + if canvas_result is not None: + if canvas_result.json_data is not None and not canvas_result.json_data.get('objects'): + st.button("Save New Image", key="save_canvas", disabled=True, help="Draw something first") + else: + if st.button("Save New Image", key="save_canvas_active",type="primary"): + if canvas_result.image_data is not None: + # overlay the canvas image on top of the canny image and save the result + # if canny image is from a url, then we need to download it first + if stage == WorkflowStageType.SOURCE.value: + if timing.source_image and timing.source_image.location: + if timing.source_image.location.startswith("http"): + canny_image = r.get( + timing.source_image.location) + canny_image = Image.open( + BytesIO(canny_image.content)) + else: + canny_image = Image.open( + timing.source_image.location) + else: + canny_image = Image.new( + "RGB", (width, height), "white") + elif stage == WorkflowStageType.STYLED.value: + if timing.primary_image_location: + if timing.primary_image_location.startswith("http"): + canny_image = r.get( + timing.primary_image_location) + canny_image = Image.open( + BytesIO(canny_image.content)) + else: + canny_image = Image.open( + timing.primary_image_location) + else: + canny_image = Image.new( + "RGB", (width, height), "white") + + canny_image = canny_image.convert("RGBA") + # canvas_image = canvas_image.convert("RGBA") + canvas_image = im + canvas_image = canvas_image.convert("RGBA") + + # converting the images to the same size and mode + if canny_image.size != canvas_image.size: + canny_image = canny_image.resize( + canvas_image.size) + + if canny_image.mode != canvas_image.mode: + canny_image = canny_image.convert( + canvas_image.mode) + + new_canny_image = Image.alpha_composite( + canny_image, canvas_image) + if new_canny_image.mode != "RGB": + new_canny_image = new_canny_image.convert( + "RGB") + + unique_file_name = str(uuid.uuid4()) + ".png" + file_location = f"videos/{timing.project.uuid}/assets/resources/masks/{unique_file_name}" + hosted_url = save_or_host_file(new_canny_image, file_location) + file_data = { + "name": str(uuid.uuid4()) + ".png", + "type": InternalFileType.IMAGE.value, + "project_id": project_uuid + } + + if hosted_url: + file_data.update({'hosted_url': hosted_url}) + else: + file_data.update({'local_path': file_location}) + + canny_image = data_repo.create_file( + **file_data) + if stage == WorkflowStageType.SOURCE.value: + data_repo.update_specific_timing( + st.session_state['current_frame_uuid'], source_image_id=canny_image.uuid) + elif stage == WorkflowStageType.STYLED.value: + data_repo.update_specific_timing( + st.session_state['current_frame_uuid'], primary_image_id=canny_image.uuid) + + + st.success("New Canny Image Saved") + st.session_state['reset_canvas'] = True + time.sleep(1) + st.experimental_rerun() \ No newline at end of file diff --git a/ui_components/widgets/inpainting_element.py b/ui_components/widgets/inpainting_element.py index b2ed299a..900c3af5 100644 --- a/ui_components/widgets/inpainting_element.py +++ b/ui_components/widgets/inpainting_element.py @@ -13,7 +13,7 @@ from utils import st_memory from utils.data_repo.data_repo import DataRepo from utils import st_memory -from ui_components.methods.common_methods import execute_image_edit +from ui_components.methods.common_methods import execute_image_edit, create_or_update_mask from ui_components.models import InternalFrameTimingObject, InternalSettingObject from streamlit_image_comparison import image_comparison diff --git a/utils/st_memory.py b/utils/st_memory.py index d24ffb5f..ff1d9f92 100644 --- a/utils/st_memory.py +++ b/utils/st_memory.py @@ -2,6 +2,7 @@ from shared.logging.constants import LoggingType from shared.logging.logging import AppLogger from utils.data_repo.data_repo import DataRepo +from streamlit_option_menu import option_menu logger = AppLogger() @@ -69,4 +70,18 @@ def toggle(label, key=None, help=None, on_change=None, disabled=False, label_vis st.session_state[key] = selection st.experimental_rerun() + return selection + + +def menu(menu_title,options, icons=None, menu_icon=None, default_index=0, key=None, help=None, on_change=None, disabled=False, orientation="horizontal", default_value=0, styles=None): + + if key not in st.session_state: + st.session_state[key] = default_value + + selection = option_menu(menu_title,options=options, icons=icons, menu_icon=menu_icon, orientation=orientation, default_index=st.session_state[key], styles=styles) + + if options.index(selection) != st.session_state[key]: + st.session_state[key] = options.index(selection) + st.experimental_rerun() + return selection \ No newline at end of file From 69e3f793fee8d7a42ada684cf168eb9dbc945050 Mon Sep 17 00:00:00 2001 From: peter942 Date: Fri, 29 Sep 2023 03:51:12 +0200 Subject: [PATCH 021/164] Lots --- app.py | 5 +- shared/constants.py | 4 +- ui_components/components/app_settings_page.py | 4 +- .../components/custom_models_page.py | 4 +- .../components/frame_styling_page.py | 50 ++++---- ui_components/components/new_project_page.py | 4 +- .../components/project_settings_page.py | 4 +- .../components/video_rendering_page.py | 12 +- ui_components/methods/common_methods.py | 109 +++++++++++++++++- ui_components/setup.py | 8 +- ui_components/widgets/attach_audio_element.py | 2 +- ui_components/widgets/frame_selector.py | 4 +- ui_components/widgets/frame_time_selector.py | 15 ++- ui_components/widgets/list_view.py | 6 +- ui_components/widgets/timeline_view.py | 95 +++++++++------ utils/st_memory.py | 44 +++++-- utils/third_party_auth/google/google_auth.py | 2 +- 17 files changed, 254 insertions(+), 118 deletions(-) diff --git a/app.py b/app.py index d2a8c5a2..4f6b6ae0 100644 --- a/app.py +++ b/app.py @@ -44,7 +44,8 @@ def main(): auth_details = get_url_param(AUTH_TOKEN) if (not auth_details or auth_details == 'None')\ and SERVER != ServerType.DEVELOPMENT.value: - st.subheader("Login with google to proceed") + st.markdown("# :red[ba]:green[no]:orange[do]:blue[co]") + st.subheader("Login with Google to proceed") auth_url = get_google_auth_url() st.markdown(auth_url, unsafe_allow_html=True) @@ -66,7 +67,7 @@ def main(): delete_url_param(AUTH_TOKEN) st.error("please login again") else: - # initializing project constants + project_init() data_repo = DataRepo() diff --git a/shared/constants.py b/shared/constants.py index 21db07b7..6c5eedea 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -61,9 +61,9 @@ class InternalFileTag(ExtendedEnum): TEMP_IMAGE = 'temp' class AnimationStyleType(ExtendedEnum): - INTERPOLATION = "Interpolation" - DIRECT_MORPHING = "Direct Morphing" + INTERPOLATION = "Interpolation" IMAGE_TO_VIDEO = "Image to Video" + DIRECT_MORPHING = "None" ##################### global constants ##################### diff --git a/ui_components/components/app_settings_page.py b/ui_components/components/app_settings_page.py index 2d93d3e9..3664b877 100644 --- a/ui_components/components/app_settings_page.py +++ b/ui_components/components/app_settings_page.py @@ -13,7 +13,7 @@ def app_settings_page(): app_secrets = data_repo.get_app_secrets_from_user_uuid() if SERVER == ServerType.DEVELOPMENT.value: - with st.expander("Replicate API Keys:"): + with st.expander("Replicate API Keys:", expanded=True): replicate_username = st.text_input("replicate_username", value = app_secrets["replicate_username"]) replicate_key = st.text_input("replicate_key", value = app_secrets["replicate_key"]) if st.button("Save Settings"): @@ -22,7 +22,7 @@ def app_settings_page(): st.experimental_rerun() if SERVER != ServerType.DEVELOPMENT.value: - with st.expander("Purchase Credits"): + with st.expander("Purchase Credits", expanded=True): user_credits = get_current_user(fresh_fetch=True)['total_credits'] st.write(f"Total Credits: {user_credits}") c1, c2 = st.columns([1,1]) diff --git a/ui_components/components/custom_models_page.py b/ui_components/components/custom_models_page.py index 769132cb..621532d8 100644 --- a/ui_components/components/custom_models_page.py +++ b/ui_components/components/custom_models_page.py @@ -12,7 +12,7 @@ def custom_models_page(project_uuid): data_repo = DataRepo() - with st.expander("Existing models"): + with st.expander("Existing models", expanded=True): st.subheader("Existing Models:") @@ -60,7 +60,7 @@ def custom_models_page(project_uuid): st.image(model.training_image_list[2].location) st.markdown("***") - with st.expander("Train a new model"): + with st.expander("Train a new model", expanded=True): st.subheader("Train a new model:") type_of_model = st.selectbox("Type of model:", [AIModelCategory.DREAMBOOTH.value, AIModelCategory.LORA.value], help="If you'd like to use other methods for model training, let us know - or implement it yourself :)") diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index bd325fcd..5a91c3b7 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -1,10 +1,8 @@ import streamlit as st -import time -from PIL import Image + from ui_components.methods.common_methods import add_key_frame,compare_to_previous_and_next_frame,compare_to_source_frame,style_cloning_element from ui_components.methods.ml_methods import trigger_restyling_process - from ui_components.widgets.cropping_element import cropping_selector_element from ui_components.widgets.frame_clip_generation_elements import current_preview_video_element, update_animation_style_element from ui_components.widgets.frame_selector import frame_selector_widget @@ -18,20 +16,18 @@ from ui_components.widgets.inpainting_element import inpainting_element from ui_components.widgets.drawing_element import drawing_element from ui_components.widgets.list_view import list_view_set_up, page_toggle, styling_list_view,motion_list_view -from streamlit_option_menu import option_menu from utils import st_memory import math from ui_components.constants import WorkflowStageType -from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo def frame_styling_page(mainheader2, project_uuid: str): - data_repo = DataRepo() - + data_repo = DataRepo() + timing_details = data_repo.get_timing_list_from_project(project_uuid) project_settings = data_repo.get_project_setting(project_uuid) @@ -47,12 +43,13 @@ def frame_styling_page(mainheader2, project_uuid: str): st.session_state['num_inference_steps'] = project_settings.default_num_inference_steps st.session_state['transformation_stage'] = project_settings.default_stage st.session_state['show_comparison'] = "Don't show" - - if "current_frame_uuid" not in st.session_state: + + if "current_frame_uuid" not in st.session_state: timing = data_repo.get_timing_list_from_project(project_uuid)[0] st.session_state['current_frame_uuid'] = timing.uuid + if 'frame_styling_view_type' not in st.session_state: st.session_state['frame_styling_view_type'] = "Individual View" st.session_state['frame_styling_view_type_index'] = 0 @@ -114,10 +111,8 @@ def frame_styling_page(mainheader2, project_uuid: str): display_image(timing_uuid=st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) st.markdown("***") - - styling_views = ["Generate Variants", "Crop, Move & Rotate Image", "Inpainting & BG Removal","Draw On Image"] - - st.session_state['styling_view'] = st_memory.menu('',styling_views, icons=['magic', 'crop', "paint-bucket", 'pencil'], menu_icon="cast", default_index=st.session_state.get('styling_view_index', 0), key="styling_view_selector", orientation="horizontal", styles={"nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "#66A9BE"}}) + + st.session_state['styling_view'] = st_memory.menu('',["Generate Variants", "Crop, Move & Rotate Image", "Inpainting & BG Removal","Draw On Image"], icons=['magic', 'crop', "paint-bucket", 'pencil'], menu_icon="cast", default_index=st.session_state.get('styling_view_index', 0), key="styling_view_selector", orientation="horizontal", styles={"nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "#66A9BE"}}) if st.session_state['styling_view'] == "Generate Variants": @@ -181,9 +176,7 @@ def frame_styling_page(mainheader2, project_uuid: str): with st.expander("📝 Draw On Image", expanded=True): drawing_element(timing_details,project_settings,project_uuid) - - - + with st.expander("➕ Add Key Frame", expanded=True): selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image = add_key_frame_element(timing_details, project_uuid) @@ -192,25 +185,23 @@ def frame_styling_page(mainheader2, project_uuid: str): add_key_frame(selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image) st.experimental_rerun() - - elif st.session_state['frame_styling_view_type'] == "List View": st.markdown("---") - header_col_1, header_col_2, header_col_3 = st.columns([1, 5, 1]) + header_col_1, header_col_2, header_col_3, header_col_4, header_col_5 = st.columns([1,0.5,5, 1, 1]) with header_col_1: st.session_state['list_view_type'] = st_memory.radio("View type:", options=["Timeline View","Detailed View"], key="list_view_type_slider") - with header_col_3: + with header_col_5: shift_frames_setting = st.toggle("Shift Frames", help="If set to True, it will shift the frames after your adjustment forward by the amount of time you move.") if st.session_state['list_view_type'] == "Detailed View": - with header_col_2: + with header_col_4: num_pages, items_per_page = list_view_set_up(timing_details, project_uuid) - start_index, end_index = page_toggle(num_pages, items_per_page,project_uuid) + start_index, end_index = page_toggle(num_pages, items_per_page,project_uuid, position='top') st.markdown("***") @@ -222,23 +213,24 @@ def frame_styling_page(mainheader2, project_uuid: str): styling_list_view(start_index, end_index, shift_frames_setting, project_uuid) st.markdown("***") - + # Update the current page in session state elif st.session_state['page'] == "Motion": motion_list_view(start_index, end_index, shift_frames_setting, project_uuid) + start_index, end_index = page_toggle(num_pages, items_per_page,project_uuid, position='bottom') + elif st.session_state['list_view_type'] == "Timeline View": - with header_col_2: - items_per_row = st.slider("How many frames per row?", min_value=1, max_value=10, value=5, step=1, key="items_per_row") - with header_col_3: - expand_all = st_memory.toggle("Expand All", key="expand_all") + with st.sidebar: + styling_element(st.session_state['current_frame_uuid'], view_type="List") + if st.session_state['page'] == "Styling": - timeline_view(shift_frames_setting, project_uuid, items_per_row,expand_all,"Styling") + timeline_view(shift_frames_setting, project_uuid,"Styling",header_col_3,header_col_4) elif st.session_state['page'] == "Motion": - timeline_view(shift_frames_setting, project_uuid, items_per_row,expand_all,"Motion") + timeline_view(shift_frames_setting, project_uuid,"Motion",header_col_3,header_col_4) diff --git a/ui_components/components/new_project_page.py b/ui_components/components/new_project_page.py index 5eeb8204..97297b39 100644 --- a/ui_components/components/new_project_page.py +++ b/ui_components/components/new_project_page.py @@ -109,8 +109,8 @@ def new_project_page(): st.session_state["project_uuid"] = new_project.uuid project_list = data_repo.get_all_project_list(user_id=get_current_user_uuid()) st.session_state["index_of_project_name"] = len(project_list) - 1 - st.session_state["section"] = "Open Project" - st.session_state['change_section'] = True + st.session_state["main_view_type"] = "Creative Process" + st.session_state['app_settings'] = 0 st.success("Project created successfully!") time.sleep(1) st.experimental_rerun() \ No newline at end of file diff --git a/ui_components/components/project_settings_page.py b/ui_components/components/project_settings_page.py index f8b0cdb6..d0a29673 100644 --- a/ui_components/components/project_settings_page.py +++ b/ui_components/components/project_settings_page.py @@ -15,7 +15,7 @@ def project_settings_page(project_uuid): # make a list of all the files in videos/{project_name}/assets/resources/music project_name = project.name - attach_audio_element(project_uuid, False) + attach_audio_element(project_uuid, True) # with st.expander("Version History"): # version_name = st.text_input( @@ -70,7 +70,7 @@ def project_settings_page(project_uuid): # st.success("backup deleted successfully!") # st.experimental_rerun() - with st.expander("Frame Size"): + with st.expander("Frame Size", expanded=True): st.write("Current Size = ", project_settings.width, "x", project_settings.height) width = st.selectbox("Select video width", options=[ diff --git a/ui_components/components/video_rendering_page.py b/ui_components/components/video_rendering_page.py index 9832cf1a..c37fa8b1 100644 --- a/ui_components/components/video_rendering_page.py +++ b/ui_components/components/video_rendering_page.py @@ -30,10 +30,8 @@ def video_rendering_page(mainheader2, project_uuid): final_video_name = st.text_input( "What would you like to name this video?", value=random_name) - attach_audio_element(project_uuid, False) + attach_audio_element(project_uuid, True) - delete_existing_timed_clips = st.checkbox( - "Delete all the existing timing clips.", value=False) quality1, quality2 = st.columns([1, 2]) @@ -43,15 +41,11 @@ def video_rendering_page(mainheader2, project_uuid): with quality2: if quality_of_video == "Preview": - st.info("THIS MEANS") + st.info("This means it'll generate videos at lower resolutions and frame rates.") else: - st.info("THIS MEANS") + st.info("This means it'll generate videos at higher resolutions and frame rates.") if st.button("Render New Video"): - if delete_existing_timed_clips == True: - for i in timing_details: - data_repo.update_specific_timing(timing_details[i].uuid, timed_clip_id=None) - timing_details = data_repo.get_timing_list_from_project(project_uuid) render_video(final_video_name, project_uuid, quality_of_video, InternalFileTag.COMPLETE_GENERATED_VIDEO.value) st.success("Video rendered!") diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 91c7c0e1..f9bfa708 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -156,9 +156,9 @@ def add_key_frame(selected_image, inherit_styling_settings, how_long_after, whic if len(timing_details) == 1: st.session_state['current_frame_index'] = 1 st.session_state['current_frame_uuid'] = timing_details[0].uuid - else: - st.session_state['current_frame_index'] = min(len(timing_details), st.session_state['current_frame_index']) - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid + else: + st.session_state['prev_frame_index'] = min(len(timing_details), st.session_state['current_frame_index']+1) + st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index']].uuid st.session_state['page'] = "Styling" st.session_state['section_index'] = 0 @@ -580,6 +580,68 @@ def rotate_image(location, degree): return rotated_image +def change_frame_position(timing_uuid, new_position): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + + timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) + + # Check if the new position is within the valid range + if new_position < 0 or new_position >= len(timing_list): + print(f"Invalid position: {new_position}") + st.error("Invalid position") + time.sleep(1) + return + + print(f"Updating timing {timing.uuid} to new position {new_position}") + data_repo.update_specific_timing(timing.uuid, aux_frame_index=new_position) + + # Shift the other frames + if new_position > timing.aux_frame_index: + for i in range(timing.aux_frame_index + 1, new_position + 1): + print(f"Shifting timing {timing_list[i].uuid} to position {i-1}") + data_repo.update_specific_timing(timing_list[i].uuid, aux_frame_index=i-1) + else: + for i in range(new_position, timing.aux_frame_index): + print(f"Shifting timing {timing_list[i].uuid} to position {i+1}") + data_repo.update_specific_timing(timing_list[i].uuid, aux_frame_index=i+1) + + # Update the clip duration of all timing frames + print("Updating timings in order") + update_timings_in_order(timing.project.uuid) + +def update_timings_in_order(project_uuid): + data_repo = DataRepo() + + timing_list: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project(project_uuid) + + # Iterate through the timing objects + for i, timing in enumerate(timing_list): + # Set the frame time to the index of the timing object + print(f"Updating timing {timing.uuid} frame time to {float(i)}") + data_repo.update_specific_timing(timing.uuid, frame_time=float(i)) + + +def change_frame_position_input(timing_uuid, src): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) + + min_value = 1 + max_value = len(timing_list) + + new_position = st.number_input("Move to new position:", min_value=min_value, max_value=max_value, + value=timing.aux_frame_index + 1, step=1, key=f"new_position_{timing.aux_frame_index}_{src}") + if st.button('Update Position',key=f"change_frame_position_{timing.aux_frame_index}_{src}"): + change_frame_position(timing_uuid, new_position - 1) + st.experimental_rerun() + # if new_position != timing.aux_frame_index: + # print(f"Changing frame position from {timing.aux_frame_index + 1} to {new_position}") + # change_frame_position(timing_uuid, new_position - 1) + + + + def move_frame(direction, timing_uuid): data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( @@ -603,9 +665,46 @@ def move_frame(direction, timing_uuid): data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index + 1) - # updating clip_duration update_clip_duration_of_all_timing_frames(timing.project.uuid) + +def move_frame_back_button(timing_uuid, orientation): + if orientation == "side-to-side": + arrow = "⬅️" + direction = "Up" + else: # up-down + arrow = "⬆️" + direction = "Up" + + if st.button(arrow, key=f"move_frame_back_{timing_uuid}", help="Move frame back"): + move_frame(direction, timing_uuid) + st.experimental_rerun() + + + + + + + + + +def move_frame_forward_button(timing_uuid, orientation): + direction = "Down" + if orientation == "side-to-side": + arrow = "➡️" + else: # up-down + direction = "Down" + + if st.button(arrow, key=f"move_frame_forward_{timing_uuid}", help="Move frame forward"): + move_frame(direction, timing_uuid) + st.experimental_rerun() + + +def delete_frame_button(timing_uuid): + if st.button("🗑️", key=f"delete_frame_{timing_uuid}", help="Delete frame"): + delete_frame(timing_uuid) + st.experimental_rerun() + def delete_frame(timing_uuid): data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( @@ -979,6 +1078,8 @@ def update_clip_duration_of_all_timing_frames(project_uuid): duration_of_static_time = 0.0 + + data_repo.update_specific_timing( timing_item.uuid, clip_duration=total_duration_of_frame) diff --git a/ui_components/setup.py b/ui_components/setup.py index 94a4d182..f1fb6396 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -113,6 +113,8 @@ def setup_app_ui(): # with mainheader1: # st.header(st.session_state["page"]) + + if st.session_state["main_view_type"] == "Creative Process": @@ -129,7 +131,7 @@ def setup_app_ui(): st.session_state["manual_select"] = None st.session_state['page'] = option_menu(None, pages, icons=['palette', 'camera-reels', "hourglass", 'stopwatch'], menu_icon="cast", orientation="horizontal", key="secti2on_selector", styles={ - "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "orange"}}, manual_select=st.session_state["manual_select"]) + "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "green"}}, manual_select=st.session_state["manual_select"]) # TODO: CORRECT-CODE view_types = ["Individual View", "List View"] @@ -163,7 +165,7 @@ def on_change_view_type(key): orientation="horizontal", key="section-selecto1r", styles={"nav-link": {"font-size": "15px", "margin":"0px", "--hover-color": "#eee"}, - "nav-link-selected": {"background-color": "green"}}, + "nav-link-selected": {"background-color": "orange"}}, manual_select=st.session_state['frame_styling_view_type_index'], on_change=on_change_view_type ) @@ -184,7 +186,7 @@ def on_change_view_type(key): st.session_state["manual_select"] = None st.session_state['page'] = option_menu(None, tool_pages, icons=['pencil', 'palette', "hourglass", 'stopwatch'], menu_icon="cast", orientation="horizontal", key="secti2on_selector", styles={ - "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "orange"}}, manual_select=st.session_state["manual_select"]) + "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "green"}}, manual_select=st.session_state["manual_select"]) if st.session_state["page"] == "Custom Models": custom_models_page(st.session_state["project_uuid"]) diff --git a/ui_components/widgets/attach_audio_element.py b/ui_components/widgets/attach_audio_element.py index 080035ab..dc66ca91 100644 --- a/ui_components/widgets/attach_audio_element.py +++ b/ui_components/widgets/attach_audio_element.py @@ -10,7 +10,7 @@ def attach_audio_element(project_uuid, expanded): uuid=project_uuid) project_setting: InternalSettingObject = data_repo.get_project_setting(project_uuid) - with st.expander("Audio"): + with st.expander("Audio", expanded=expanded): uploaded_file = st.file_uploader("Attach audio", type=[ "mp3"], help="This will attach this audio when you render a video") if st.button("Upload and attach new audio"): diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index 1d2d37be..2e3c4d9d 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -12,6 +12,8 @@ def frame_selector_widget(): time1, time2 = st.columns([1,1]) timing_details = data_repo.get_timing_list_from_project(project_uuid=st.session_state["project_uuid"]) + len_timing_details = len(timing_details) if len(timing_details) > 0 else 1.0 + st.progress(st.session_state['current_frame_index'] / len_timing_details) with time1: if 'prev_frame_index' not in st.session_state: st.session_state['prev_frame_index'] = 1 @@ -34,7 +36,7 @@ def frame_selector_widget(): with time2: single_frame_time_selector(st.session_state['current_frame_uuid'], 'navbar', shift_frames=False) - + image_1, image_2 = st.columns([1,1]) diff --git a/ui_components/widgets/frame_time_selector.py b/ui_components/widgets/frame_time_selector.py index 84b87fa2..cd4cbed0 100644 --- a/ui_components/widgets/frame_time_selector.py +++ b/ui_components/widgets/frame_time_selector.py @@ -68,7 +68,7 @@ def single_frame_time_duration_setter(timing_uuid, src, shift_frames=True): max_value = 100.0 if shift_frames else clip_duration disable_duration_input = False if next_timing else True - help_text = None if shift_frames else "This will not shift subsequent frames - to do this, go to the Bulk View and set Shift Frames = True" + help_text = None if shift_frames else "You currently won't shift subsequent frames - to do this, go to the List View and turn on Shift Frames." frame_duration = st.number_input("Duration:", min_value=0.0, max_value=max_value, value=clip_duration, step=0.1, key=f"frame_duration_{timing.aux_frame_index}_{src}", disabled=disable_duration_input, help=help_text) @@ -84,22 +84,27 @@ def single_frame_time_selector(timing_uuid, src, shift_frames=True): timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) - # Get the previous timing object timing_list: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project(timing.project.uuid) prev_timing = None if timing.aux_frame_index > 0: prev_timing_uuid = timing_list[timing.aux_frame_index - 1].uuid prev_timing = data_repo.get_timing_from_uuid(prev_timing_uuid) - # If previous timing exists, use its frame time as min_value, else use 0.0 min_value = prev_timing.frame_time if prev_timing else 0.0 disabled_time_change = True if timing.aux_frame_index == 0 else False next_timing = data_repo.get_next_timing(timing_uuid) - max_value = 100.0 if shift_frames else (next_timing.frame_time if next_timing else timing.frame_time) - help_text = None if shift_frames else "This will not shift subsequent frames - to do this, go to the Bulk View and set Shift Frames = True" + if next_timing: + max_value = 100.0 if shift_frames else next_timing.frame_time + else: + max_value = timing.frame_time + 100 # Allow up to 100 seconds more if it's the last item + + help_text = None if shift_frames else "You currently won't shift subsequent frames - to do this, go to the List View and turn on Shift Frames." frame_time = st.number_input("Time:", min_value=min_value, max_value=max_value, value=timing.frame_time, step=0.1, key=f"frame_time_{timing.aux_frame_index}_{src}",disabled=disabled_time_change, help=help_text) if frame_time != timing.frame_time: update_frame_time(timing_uuid, frame_time, shift_frames) + + + diff --git a/ui_components/widgets/list_view.py b/ui_components/widgets/list_view.py index 66a1fc91..a4a03ab5 100644 --- a/ui_components/widgets/list_view.py +++ b/ui_components/widgets/list_view.py @@ -28,12 +28,12 @@ def list_view_set_up(timing_details,project_uuid): return num_pages, items_per_page -def page_toggle(num_pages, items_per_page, project_uuid): +def page_toggle(num_pages, items_per_page, project_uuid, position): data_repo = DataRepo() timing_details = data_repo.get_timing_list_from_project(project_uuid) - st.session_state['current_page'] = st.radio("Select Page:", options=range( - 1, num_pages), horizontal=True, index=st.session_state['index_of_current_page'] - 1, key="page_selection_radio") + st.session_state['current_page'] = st.radio(f"Select page:", options=range( + 1, num_pages), horizontal=True, index=st.session_state['index_of_current_page'] - 1, key=f"page_selection_radio_{position}") if st.session_state['current_page'] != st.session_state['index_of_current_page']: st.session_state['index_of_current_page'] = st.session_state['current_page'] st.experimental_rerun() diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index 5ac519b0..225118a4 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -1,44 +1,57 @@ import streamlit as st -from ui_components.methods.common_methods import delete_frame, jump_to_single_frame_view_button, move_frame +from ui_components.methods.common_methods import delete_frame, jump_to_single_frame_view_button, move_frame,delete_frame_button,move_frame_back_button,move_frame_forward_button,change_frame_position_input,update_clip_duration_of_all_timing_frames from ui_components.widgets.frame_time_selector import single_frame_time_selector, single_frame_time_duration_setter +from typing import List from ui_components.widgets.image_carousal import display_image from utils.data_repo.data_repo import DataRepo from ui_components.widgets.frame_clip_generation_elements import update_animation_style_element from ui_components.constants import WorkflowStageType +from ui_components.models import InternalFrameTimingObject +from utils import st_memory -def timeline_view_buttons(i, j, timing_details, items_per_row): - if items_per_row > 6: - jump_to_single_frame_view_button(i + j + 1, timing_details) - st.markdown("***") - btn1, btn2, btn3 = st.columns([1, 1, 1]) +def timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_setter_toggle, animation_style_selector_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_frame_position_toggle): + + if time_setter_toggle: + single_frame_time_selector(timing_details[i + j].uuid, 'motion', shift_frames=shift_frames_setting) + if duration_setter_toggle: + single_frame_time_duration_setter(timing_details[i + j].uuid, 'motion', shift_frames=shift_frames_setting) + if animation_style_selector_toggle: + update_animation_style_element(timing_details[i + j].uuid) + btn1, btn2, btn3 = st.columns([1, 1, 1]) + if move_frames_toggle: + with btn1: - st.button("⬅️", key=f"move_frame_back_{i + j + 1}", help="Move frame back") + move_frame_back_button(timing_details[i + j].uuid, "side-to-side") with btn2: - st.button("➡️", key=f"move_frame_forward_{i + j + 1}", help="Move frame forward") + move_frame_forward_button(timing_details[i + j].uuid, "side-to-side") + if delete_frames_toggle: with btn3: - st.button("🗑️", key=f"delete_frame_{i + j + 1}", help="Delete frame") - else: - btn1, btn2, btn3, btn4 = st.columns([1.7, 1, 1, 1]) - with btn1: - jump_to_single_frame_view_button(i + j + 1, timing_details) - with btn2: - st.button("⬅️", key=f"move_frame_back_{i + j + 1}", help="Move frame back") - with btn3: - st.button("➡️", key=f"move_frame_forward_{i + j + 1}", help="Move frame forward") - with btn4: - st.button("🗑️", key=f"delete_frame_{i + j + 1}", help="Delete frame") + delete_frame_button(timing_details[i + j].uuid) + if change_frame_position_toggle: + change_frame_position_input(timing_details[i + j].uuid, "side-to-side") + + if time_setter_toggle or duration_setter_toggle or animation_style_selector_toggle or move_frames_toggle or delete_frames_toggle or change_frame_position_toggle: + st.caption("--") + jump_to_single_frame_view_button(i + j + 1, timing_details) + +def timeline_view(shift_frames_setting, project_uuid, stage,header_col_2,header_col_3): + st.markdown("---") -def timeline_view(shift_frames_setting, project_uuid, items_per_row, expand_all, stage='Styling'): data_repo = DataRepo() timing = data_repo.get_timing_list_from_project(project_uuid)[0] timing_details = data_repo.get_timing_list_from_project(project_uuid) + with header_col_3: + items_per_row = st_memory.slider("How many frames per row?", min_value=1, max_value=10, value=5, step=1, key="items_per_row_slider") + with header_col_2: + time_setter_toggle, animation_style_selector_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_frame_position_toggle = display_toggles() + for i in range(0, len(timing_details), items_per_row): # Step of items_per_row for grid grid = st.columns(items_per_row) # Create items_per_row columns for grid for j in range(items_per_row): if i + j < len(timing_details): # Check if index is within range with grid[j]: - display_number = i + j + 1 + display_number = i + j + 1 if stage == 'Styling': display_image(timing_uuid=timing_details[i + j].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) elif stage == 'Motion': @@ -46,20 +59,26 @@ def timeline_view(shift_frames_setting, project_uuid, items_per_row, expand_all, st.video(timing.timed_clip.location) else: st.error("No video found for this frame.") - with st.expander(f'Frame {display_number}', expanded=expand_all): - single_frame_time_selector(timing_details[i + j].uuid, 'motion', shift_frames=shift_frames_setting) - single_frame_time_duration_setter(timing_details[i + j].uuid, 'motion', shift_frames=shift_frames_setting) - update_animation_style_element(timing_details[i + j].uuid) - timeline_view_buttons(i, j, timing_details, items_per_row) - # if move_frame_back button is clicked - if st.session_state[f"move_frame_back_{i + j + 1}"]: - move_frame("Up", timing_details[i + j].uuid) - st.experimental_rerun() - if st.session_state[f"move_frame_forward_{i + j + 1}"]: - move_frame("Down", timing_details[i + j].uuid) - st.experimental_rerun() - if st.session_state[f"delete_frame_{i + j + 1}"]: - delete_frame(timing_details[i + j].uuid) - st.experimental_rerun() - - st.markdown("***") \ No newline at end of file + with st.expander(f'Frame #{display_number}', True): + timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_setter_toggle, animation_style_selector_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_frame_position_toggle) + +def display_toggles(): + + col1, col2, col3 = st.columns(3) + + with col1: + expand_all = st_memory.toggle("Expand All", key="expand_all",value=False) + + if expand_all: + time_setter_toggle = animation_style_selector_toggle = duration_setter_toggle = move_frames_toggle = delete_frames_toggle = change_frame_position_toggle = True + else: + with col2: + time_setter_toggle = st_memory.toggle("Time Setter", value=True, key="time_setter_toggle") + animation_style_selector_toggle = st_memory.toggle("Animation Style Selector", value=False, key="animation_style_selector_toggle") + duration_setter_toggle = st_memory.toggle("Duration Setter", value=False, key="duration_setter_toggle") + with col3: + move_frames_toggle = st_memory.toggle("Move Frames", value=True, key="move_frames_toggle") + delete_frames_toggle = st_memory.toggle("Delete Frames", value=False, key="delete_frames_toggle") + change_frame_position_toggle = st_memory.toggle("Change Frame Position", value=False, key="change_frame_position_toggle") + + return time_setter_toggle, animation_style_selector_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_frame_position_toggle diff --git a/utils/st_memory.py b/utils/st_memory.py index ff1d9f92..8f1df234 100644 --- a/utils/st_memory.py +++ b/utils/st_memory.py @@ -20,25 +20,45 @@ def radio(label, options, index=0, key=None, help=None, on_change=None, disabled return selection +def selectbox(label, options, index=0, key=None, help=None, on_change=None, disabled=False, format_func=str): + + if key not in st.session_state: + st.session_state[key] = index + + selection = st.selectbox(label=label, options=options, index=st.session_state[key], format_func=format_func) + + if options.index(selection) != st.session_state[key]: + st.session_state[key] = options.index(selection) + st.experimental_rerun() + + return selection + + +def number_input(label, min_value=None, max_value=None, step=None, format=None, key=None, help=None, on_change=None, args=None, kwargs=None, *, disabled=False, label_visibility="visible",value=1): -def number_input(label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, help=None, on_change=None, args=None, kwargs=None, *, disabled=False, label_visibility="visible", default_value=0, project_settings=None): if key not in st.session_state: - if getattr(project_settings, key, default_value): - st.session_state[key] = int(getattr(project_settings, key, default_value)) - else: - st.session_state[key] = default_value + st.session_state[key] = value - selection = st.number_input(label, min_value, max_value, st.session_state[key], step, format, key, help, on_change, disabled, label_visibility) + selection = st.number_input(label, min_value, max_value, st.session_state[key], step, format, help, on_change, disabled, label_visibility) if selection != st.session_state[key]: st.session_state[key] = selection - if getattr(project_settings, key, default_value): - data_repo = DataRepo() - data_repo.update_project_setting(project_settings.project.uuid, key=value) st.experimental_rerun() return selection +def slider(label, min_value=None, max_value=None, value=None, step=None, format=None, key=None, help=None, on_change=None, args=None, kwargs=None, *, disabled=False, label_visibility="visible"): + + if key not in st.session_state: + st.session_state[key] = value + + selection = st.slider(label=label, min_value=min_value, max_value=max_value, value=st.session_state[key], step=step, format=format, help=help, on_change=on_change, disabled=disabled, label_visibility=label_visibility) + + if selection != st.session_state[key]: + st.session_state[key] = selection + st.experimental_rerun() + + return selection def select_slider(label, options=(), value=None, format_func=None, key=None, help=None, on_change=None, args=None, kwargs=None, *, disabled=False, label_visibility="visible", default_value=None, project_settings=None): if key not in st.session_state: @@ -47,7 +67,7 @@ def select_slider(label, options=(), value=None, format_func=None, key=None, hel else: st.session_state[key] = default_value - selection = st.select_slider(label, options, st.session_state[key], format_func, key, help, on_change, disabled, label_visibility) + selection = st.select_slider(label, options, st.session_state[key], format_func, help, on_change, disabled, label_visibility) if selection != st.session_state[key]: st.session_state[key] = selection @@ -59,10 +79,10 @@ def select_slider(label, options=(), value=None, format_func=None, key=None, hel return selection +def toggle(label, value=True,key=None, help=None, on_change=None, disabled=False, label_visibility="visible"): -def toggle(label, key=None, help=None, on_change=None, disabled=False, label_visibility="visible", default_value=False): if key not in st.session_state: - st.session_state[key] = default_value + st.session_state[key] = value selection = st.toggle(label=label, value=st.session_state[key], help=help, on_change=on_change, disabled=disabled, label_visibility=label_visibility) diff --git a/utils/third_party_auth/google/google_auth.py b/utils/third_party_auth/google/google_auth.py index e1fb1f8c..d025c8f4 100644 --- a/utils/third_party_auth/google/google_auth.py +++ b/utils/third_party_auth/google/google_auth.py @@ -27,4 +27,4 @@ def get_google_auth_url(): client: GoogleOAuth2 = GoogleOAuth2(GOOGLE_AUTH_CLIENT_ID, GOOGLE_SECRET) authorization_url = asyncio.run( get_authorization_url(client, REDIRECT_URI)) - return f""" Google login """ \ No newline at end of file + return f""" Google login -> """ \ No newline at end of file From 3ec0ec79d7ae9d9c5d2ec6fef1f63c0032740f55 Mon Sep 17 00:00:00 2001 From: peter942 Date: Fri, 29 Sep 2023 15:50:44 +0200 Subject: [PATCH 022/164] Name fixes --- shared/constants.py | 4 ++-- ui_components/components/frame_styling_page.py | 4 ++-- ui_components/methods/common_methods.py | 15 ++++++++++++--- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/shared/constants.py b/shared/constants.py index 6c5eedea..409de05f 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -61,8 +61,8 @@ class InternalFileTag(ExtendedEnum): TEMP_IMAGE = 'temp' class AnimationStyleType(ExtendedEnum): - INTERPOLATION = "Interpolation" - IMAGE_TO_VIDEO = "Image to Video" + INTERPOLATION = "Interpolate to next" + IMAGE_TO_VIDEO = "Image to video" DIRECT_MORPHING = "None" diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 5a91c3b7..5749ad72 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -190,7 +190,7 @@ def frame_styling_page(mainheader2, project_uuid: str): st.markdown("---") - header_col_1, header_col_2, header_col_3, header_col_4, header_col_5 = st.columns([1,0.5,5, 1, 1]) + header_col_1, header_col_2, header_col_3, header_col_4, header_col_5 = st.columns([1.25,0.25,4, 1.5, 1.5]) with header_col_1: st.session_state['list_view_type'] = st_memory.radio("View type:", options=["Timeline View","Detailed View"], key="list_view_type_slider") @@ -224,7 +224,7 @@ def frame_styling_page(mainheader2, project_uuid: str): elif st.session_state['list_view_type'] == "Timeline View": with st.sidebar: - styling_element(st.session_state['current_frame_uuid'], view_type="List") + styling_element(st.session_state['current_frame_uuid'], view_type="List") if st.session_state['page'] == "Styling": diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index f9bfa708..a597a454 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -639,34 +639,43 @@ def change_frame_position_input(timing_uuid, src): # print(f"Changing frame position from {timing.aux_frame_index + 1} to {new_position}") # change_frame_position(timing_uuid, new_position - 1) +from datetime import datetime - def move_frame(direction, timing_uuid): + print(f"{datetime.now()} - Starting move_frame function") data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) + print(f"{datetime.now()} - Retrieved timing object") + if direction == "Up": + print(f"{datetime.now()} - Moving frame up") if timing.aux_frame_index == 0: + print(f"{datetime.now()} - This is the first frame") st.error("This is the first frame") time.sleep(1) return data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index - 1) + print(f"{datetime.now()} - Updated timing object") elif direction == "Down": - + print(f"{datetime.now()} - Moving frame down") timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) if timing.aux_frame_index == len(timing_list) - 1: + print(f"{datetime.now()} - This is the last frame") st.error("This is the last frame") time.sleep(1) return data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index + 1) + print(f"{datetime.now()} - Updated timing object") + print(f"{datetime.now()} - Updating clip duration of all timing frames") update_clip_duration_of_all_timing_frames(timing.project.uuid) - + print(f"{datetime.now()} - Finished move_frame function") def move_frame_back_button(timing_uuid, orientation): if orientation == "side-to-side": From 907076addc82df4a8f9a89322ef35c64262ad3d9 Mon Sep 17 00:00:00 2001 From: peter942 Date: Fri, 29 Sep 2023 21:42:35 +0200 Subject: [PATCH 023/164] Name fixes --- ui_components/methods/common_methods.py | 62 +++++++++---------------- ui_components/widgets/list_view.py | 20 ++++---- 2 files changed, 31 insertions(+), 51 deletions(-) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index a597a454..d7bc2b11 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -3,6 +3,7 @@ import streamlit as st import os from PIL import Image, ImageDraw, ImageOps, ImageFilter +from datetime import datetime from moviepy.editor import * import cv2 import requests as r @@ -617,9 +618,10 @@ def update_timings_in_order(project_uuid): # Iterate through the timing objects for i, timing in enumerate(timing_list): - # Set the frame time to the index of the timing object - print(f"Updating timing {timing.uuid} frame time to {float(i)}") - data_repo.update_specific_timing(timing.uuid, frame_time=float(i)) + # Set the frame time to the index of the timing object only if it's different from the current one + if timing.frame_time != float(i): + print(f"Updating timing {timing.uuid} frame time to {float(i)}") + data_repo.update_specific_timing(timing.uuid, frame_time=float(i)) def change_frame_position_input(timing_uuid, src): @@ -635,25 +637,15 @@ def change_frame_position_input(timing_uuid, src): if st.button('Update Position',key=f"change_frame_position_{timing.aux_frame_index}_{src}"): change_frame_position(timing_uuid, new_position - 1) st.experimental_rerun() - # if new_position != timing.aux_frame_index: - # print(f"Changing frame position from {timing.aux_frame_index + 1} to {new_position}") - # change_frame_position(timing_uuid, new_position - 1) - -from datetime import datetime - + -def move_frame(direction, timing_uuid): - print(f"{datetime.now()} - Starting move_frame function") +def move_frame(direction, timing_uuid): data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) - print(f"{datetime.now()} - Retrieved timing object") - - if direction == "Up": - print(f"{datetime.now()} - Moving frame up") - if timing.aux_frame_index == 0: - print(f"{datetime.now()} - This is the first frame") + if direction == "Up": + if timing.aux_frame_index == 0: st.error("This is the first frame") time.sleep(1) return @@ -665,52 +657,44 @@ def move_frame(direction, timing_uuid): print(f"{datetime.now()} - Moving frame down") timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) if timing.aux_frame_index == len(timing_list) - 1: - print(f"{datetime.now()} - This is the last frame") + st.error("This is the last frame") time.sleep(1) return - data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index + 1) - print(f"{datetime.now()} - Updated timing object") + data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index + 1) - print(f"{datetime.now()} - Updating clip duration of all timing frames") - update_clip_duration_of_all_timing_frames(timing.project.uuid) - print(f"{datetime.now()} - Finished move_frame function") def move_frame_back_button(timing_uuid, orientation): + direction = "Up" if orientation == "side-to-side": - arrow = "⬅️" - direction = "Up" + arrow = "⬅️" else: # up-down - arrow = "⬆️" - direction = "Up" - + arrow = "⬆️" if st.button(arrow, key=f"move_frame_back_{timing_uuid}", help="Move frame back"): move_frame(direction, timing_uuid) st.experimental_rerun() - - - - - - - - + def move_frame_forward_button(timing_uuid, orientation): direction = "Down" if orientation == "side-to-side": arrow = "➡️" else: # up-down - direction = "Down" + arrow = "⬇️" if st.button(arrow, key=f"move_frame_forward_{timing_uuid}", help="Move frame forward"): move_frame(direction, timing_uuid) st.experimental_rerun() -def delete_frame_button(timing_uuid): - if st.button("🗑️", key=f"delete_frame_{timing_uuid}", help="Delete frame"): +def delete_frame_button(timing_uuid, show_label=False): + if show_label: + label = "Delete Frame 🗑️" + else: + label = "🗑️" + + if st.button(label, key=f"delete_frame_{timing_uuid}", help="Delete frame"): delete_frame(timing_uuid) st.experimental_rerun() diff --git a/ui_components/widgets/list_view.py b/ui_components/widgets/list_view.py index a4a03ab5..299b09fa 100644 --- a/ui_components/widgets/list_view.py +++ b/ui_components/widgets/list_view.py @@ -3,7 +3,7 @@ from utils.data_repo.data_repo import DataRepo from ui_components.widgets.frame_time_selector import single_frame_time_selector, single_frame_time_duration_setter from ui_components.widgets.image_carousal import display_image -from ui_components.methods.common_methods import delete_frame, move_frame,jump_to_single_frame_view_button +from ui_components.methods.common_methods import delete_frame, move_frame,jump_to_single_frame_view_button,delete_frame_button,move_frame_back_button,move_frame_forward_button import math from utils.data_repo.data_repo import DataRepo from ui_components.methods.common_methods import delete_frame @@ -72,17 +72,13 @@ def styling_list_view(start_index, end_index, shift_frames_setting, project_uuid st.markdown("---") btn1, btn2, btn3 = st.columns([2, 1, 1]) with btn1: - if st.button("Delete this keyframe", key=f'{i}'): - delete_frame(timing_details[i].uuid) - st.experimental_rerun() - with btn2: - if st.button("⬆️", key=f"Promote {display_number}"): - move_frame("Up", timing_details[i].uuid) - st.experimental_rerun() - with btn3: - if st.button("⬇️", key=f"Demote {display_number}"): - move_frame("Down", timing_details[i].uuid) - st.experimental_rerun() + delete_frame_button(timing_details[i].uuid, show_label=False) + with btn2: + move_frame_back_button(timing_details[i].uuid, orientation='up-down') + + with btn3: + move_frame_forward_button(timing_details[i].uuid, orientation='up-down') + st.markdown("***") From e04cda5947b77a1fdc92d40d4837337786afc380 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 30 Sep 2023 10:10:46 +0530 Subject: [PATCH 024/164] new project bug fix --- banodoco_settings.py | 3 ++- shared/constants.py | 2 +- ui_components/components/frame_styling_page.py | 2 +- ui_components/components/new_project_page.py | 2 +- .../widgets/variant_comparison_element.py | 2 +- utils/cache/cache.py | 18 +++++++++++++++++- utils/cache/cache_methods.py | 7 +------ utils/common_utils.py | 4 ++++ 8 files changed, 28 insertions(+), 12 deletions(-) diff --git a/banodoco_settings.py b/banodoco_settings.py index f0046045..00c41f64 100644 --- a/banodoco_settings.py +++ b/banodoco_settings.py @@ -112,7 +112,8 @@ def create_new_project(user: InternalUserObject, project_name: str, width=512, h "frame_time": 0.0, "animation_style": animation_style, "aux_frame_index": 0, - "source_image_id": source_image.uuid + "source_image_id": source_image.uuid, + "alternative_images": json.dumps([source_image.uuid]) } timing: InternalFrameTimingObject = data_repo.create_timing(**timing_data) diff --git a/shared/constants.py b/shared/constants.py index 47f85ee2..30f4feb7 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -61,7 +61,7 @@ class InternalFileTag(ExtendedEnum): TEMP_IMAGE = 'temp' class AnimationStyleType(ExtendedEnum): - INTERPOLATION = "Interpolate to next" + INTERPOLATION = "Interpolate to next" IMAGE_TO_VIDEO = "Image to video" DIRECT_MORPHING = "None" diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 83095c53..722003c6 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -98,7 +98,7 @@ def frame_styling_page(mainheader2, project_uuid: str): comparison_values = [ "Other Variants", "Source Frame", "Previous & Next Frame", "None"] - st.session_state['show_comparison'] = st_memory.radio("Show comparison to:", options=comparison_values, horizontal=True, project_settings=project_settings, key="show_comparison_radio") + st.session_state['show_comparison'] = st_memory.radio("Show comparison to:", options=comparison_values, horizontal=True, key="show_comparison_radio") if st.session_state['show_comparison'] == "Other Variants": diff --git a/ui_components/components/new_project_page.py b/ui_components/components/new_project_page.py index 97297b39..09c4722f 100644 --- a/ui_components/components/new_project_page.py +++ b/ui_components/components/new_project_page.py @@ -92,7 +92,7 @@ def new_project_page(): st.error(f"Failed to save the uploaded image due to {str(e)}") # remvoing the initial frame which moved to the 1st position - initial_frame = data_repo.get_timing_from_frame_number(new_project.uuid, 1) + initial_frame = data_repo.get_timing_from_frame_number(new_project.uuid, 0) data_repo.delete_timing_from_uuid(initial_frame.uuid) if uploaded_audio: diff --git a/ui_components/widgets/variant_comparison_element.py b/ui_components/widgets/variant_comparison_element.py index a29a28d6..49d2d076 100644 --- a/ui_components/widgets/variant_comparison_element.py +++ b/ui_components/widgets/variant_comparison_element.py @@ -72,7 +72,7 @@ def variant_comparison_element(timing_uuid, stage=CreativeProcessType.MOTION.val st.image(variants[which_variant- 1].location, use_column_width=True) - if which_variant- 1 != current_variant: + if which_variant - 1 != current_variant: if st.button(f"Promote Variant #{which_variant}", key=f"Promote Variant #{which_variant} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image"): if stage == CreativeProcessType.MOTION.value: promote_video_variant(timing.uuid, which_variant - 1) diff --git a/utils/cache/cache.py b/utils/cache/cache.py index 83dd10ac..f542f3db 100644 --- a/utils/cache/cache.py +++ b/utils/cache/cache.py @@ -1,5 +1,13 @@ import streamlit as st +from utils.enum import ExtendedEnum + +class CacheKey(ExtendedEnum): + TIMING_DETAILS = "timing_details" + APP_SETTING = "app_setting" + APP_SECRET = "app_secret" + PROJECT_SETTING = "project_setting" + class StCache: @staticmethod @@ -80,4 +88,12 @@ def get_all(data_type): if data_type in st.session_state: return st.session_state[data_type] - return [] \ No newline at end of file + return [] + + # deletes all cached objects of every data type + @staticmethod + def clear_entire_cache() -> bool: + for c in CacheKey.value_list(): + StCache.delete_all(c) + + return True \ No newline at end of file diff --git a/utils/cache/cache_methods.py b/utils/cache/cache_methods.py index a04a3bcc..012ee9d8 100644 --- a/utils/cache/cache_methods.py +++ b/utils/cache/cache_methods.py @@ -1,15 +1,10 @@ from shared.logging.logging import AppLogger -from utils.cache.cache import StCache +from utils.cache.cache import CacheKey, StCache from utils.enum import ExtendedEnum logger = AppLogger() -class CacheKey(ExtendedEnum): - TIMING_DETAILS = "timing_details" - APP_SETTING = "app_setting" - APP_SECRET = "app_secret" - PROJECT_SETTING = "project_setting" # NOTE: caching only timing_details, project settings and app settings. invalidating cache everytime a related data is updated def cache_data(cls): diff --git a/utils/common_utils.py b/utils/common_utils.py index 660104ae..2cfab1f6 100644 --- a/utils/common_utils.py +++ b/utils/common_utils.py @@ -5,6 +5,7 @@ import json from shared.constants import SERVER, ServerType from ui_components.models import InternalUserObject +from utils.cache.cache import StCache from utils.constants import LOGGED_USER from utils.data_repo.data_repo import DataRepo @@ -148,3 +149,6 @@ def reset_project_state(): if key in st.session_state: del st.session_state[key] + + # reset cache + StCache.clear_entire_cache() \ No newline at end of file From 6d90a3c645e69be321b6cf6f83616c4ac13b87a6 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 30 Sep 2023 11:35:59 +0530 Subject: [PATCH 025/164] main variant styling fixed --- ui_components/methods/ml_methods.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index 053d42cd..2f058eba 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -100,8 +100,7 @@ def trigger_restyling_process( else: variants: List[InternalFileObject] = timing.alternative_images_list number_of_variants = len(variants) - primary_image = timing.primary_image - source_image = primary_image.location + source_image = timing.primary_image output_file = restyle_images(timing_uuid, source_image) @@ -120,7 +119,7 @@ def trigger_restyling_process( print("No new generation to promote") -def restyle_images(timing_uuid, source_image) -> InternalFileObject: +def restyle_images(timing_uuid, source_image: InternalFileObject) -> InternalFileObject: data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) @@ -131,8 +130,7 @@ def restyle_images(timing_uuid, source_image) -> InternalFileObject: if model_name == "stable-diffusion-img2img-v2.1": output_file = prompt_model_stability(timing_uuid, source_image) elif model_name == "depth2img": - output_file = prompt_model_depth2img( - strength, timing_uuid, source_image) + output_file = prompt_model_depth2img(strength, timing_uuid, source_image) elif model_name == "pix2pix": output_file = prompt_model_pix2pix(timing_uuid, source_image) elif model_name == "LoRA": @@ -342,7 +340,7 @@ def prompt_model_dreambooth(timing_uuid, source_image_file: InternalFileObject): return None -def prompt_model_depth2img(strength, timing_uuid, source_image) -> InternalFileObject: +def prompt_model_depth2img(strength, timing_uuid, source_image_file: InternalFileObject) -> InternalFileObject: data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) @@ -351,6 +349,7 @@ def prompt_model_depth2img(strength, timing_uuid, source_image) -> InternalFileO num_inference_steps = timing.num_inteference_steps guidance_scale = timing.guidance_scale negative_prompt = timing.negative_prompt + source_image = source_image_file.location if not source_image.startswith("http"): source_image = open(source_image, "rb") @@ -429,11 +428,13 @@ def facial_expression_recognition(input_image): return emotion -def prompt_model_controlnet(timing_uuid, input_image): +def prompt_model_controlnet(timing_uuid, intput_image_file: InternalFileObject): data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) + input_image = intput_image_file.location + if timing.adapter_type == "normal": model = REPLICATE_MODEL.jagilley_controlnet_normal elif timing.adapter_type == "canny": @@ -482,10 +483,11 @@ def prompt_model_controlnet(timing_uuid, input_image): return output_file -def prompt_model_urpm_v1_3(timing_uuid, source_image): +def prompt_model_urpm_v1_3(timing_uuid, source_image_file: InternalFileObject): data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) + source_image = source_image_file.location if not source_image.startswith("http"): source_image = open(source_image, "rb") From ed996cef08454ad027b14ac5e3b74c11983edef9 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 30 Sep 2023 20:44:45 +0530 Subject: [PATCH 026/164] wip: inference flow changes --- banodoco_settings.py | 103 +--- .../components/frame_styling_page.py | 30 +- ui_components/methods/ml_methods.py | 560 +++++------------- ui_components/models.py | 6 +- .../widgets/variant_comparison_element.py | 2 +- utils/constants.py | 205 ++++++- utils/ml_processor/replicate/constants.py | 16 +- utils/ml_processor/replicate/replicate.py | 8 +- utils/ml_processor/replicate/utils.py | 139 ++++- 9 files changed, 554 insertions(+), 515 deletions(-) diff --git a/banodoco_settings.py b/banodoco_settings.py index 00c41f64..5551a80f 100644 --- a/banodoco_settings.py +++ b/banodoco_settings.py @@ -11,7 +11,7 @@ from ui_components.methods.file_methods import save_or_host_file from ui_components.models import InternalAppSettingObject, InternalFrameTimingObject, InternalProjectObject, InternalUserObject from utils.common_utils import create_working_assets -from utils.constants import ImageStage +from utils.constants import ML_MODEL_LIST, ImageStage from utils.data_repo.data_repo import DataRepo from utils.ml_processor.replicate.constants import REPLICATE_MODEL @@ -156,101 +156,12 @@ def create_predefined_models(user): data_repo = DataRepo() # create predefined models - data = [ - { - "name" : 'stable-diffusion-img2img-v2.1', - "user_id" : user.uuid, - "version": REPLICATE_MODEL.img2img_sd_2_1.version, - "replicate_url" : REPLICATE_MODEL.img2img_sd_2_1.name, - "category" : AIModelCategory.BASE_SD.value, - "keyword" : "", - "model_type": json.dumps([AIModelType.IMG2IMG.value]) - }, - { - "name" : 'depth2img', - "user_id" : user.uuid, - "version": REPLICATE_MODEL.jagilley_controlnet_depth2img.version, - "replicate_url" : REPLICATE_MODEL.jagilley_controlnet_depth2img.name, - "category" : AIModelCategory.BASE_SD.value, - "keyword" : "", - "model_type": json.dumps([AIModelType.IMG2IMG.value]) - }, - { - "name" : 'pix2pix', - "user_id" : user.uuid, - "version": REPLICATE_MODEL.arielreplicate.version, - "replicate_url" : REPLICATE_MODEL.arielreplicate.name, - "category" : AIModelCategory.BASE_SD.value, - "keyword" : "", - "model_type": json.dumps([AIModelType.IMG2IMG.value]) - }, - { - "name" : 'controlnet', - "user_id" : user.uuid, - "category" : AIModelCategory.CONTROLNET.value, - "keyword" : "", - "model_type": json.dumps([AIModelType.IMG2IMG.value]) - }, - { - "name" : 'Dreambooth', - "user_id" : user.uuid, - "category" : AIModelCategory.DREAMBOOTH.value, - "keyword" : "", - "model_type": json.dumps([AIModelType.IMG2IMG.value]) - }, - { - "name" : 'LoRA', - "user_id" : user.uuid, - "category" : AIModelCategory.LORA.value, - "keyword" : "", - "model_type": json.dumps([AIModelType.IMG2IMG.value]) - }, - { - "name" : 'StyleGAN-NADA', - "user_id" : user.uuid, - "version": REPLICATE_MODEL.stylegan_nada.version, - "replicate_url" : REPLICATE_MODEL.stylegan_nada.name, - "category" : AIModelCategory.BASE_SD.value, - "keyword" : "", - "model_type": json.dumps([AIModelType.IMG2IMG.value]) - }, - { - "name" : 'real-esrgan-upscaling', - "user_id" : user.uuid, - "version": REPLICATE_MODEL.real_esrgan_upscale.version, - "replicate_url" : REPLICATE_MODEL.real_esrgan_upscale.name, - "category" : AIModelCategory.BASE_SD.value, - "keyword" : "", - "model_type": json.dumps([AIModelType.IMG2IMG.value]) - }, - { - "name" : 'controlnet_1_1_x_realistic_vision_v2_0', - "user_id" : user.uuid, - "version": REPLICATE_MODEL.controlnet_1_1_x_realistic_vision_v2_0.version, - "replicate_url" : REPLICATE_MODEL.controlnet_1_1_x_realistic_vision_v2_0.name, - "category" : AIModelCategory.BASE_SD.value, - "keyword" : "", - "model_type": json.dumps([AIModelType.IMG2IMG.value]) - }, - { - "name" : 'urpm-v1.3', - "user_id" : user.uuid, - "version": REPLICATE_MODEL.urpm.version, - "replicate_url" : REPLICATE_MODEL.urpm.name, - "category" : AIModelCategory.BASE_SD.value, - "keyword" : "", - "model_type": json.dumps([AIModelType.IMG2IMG.value]) - }, - { - "name": "stable_diffusion_xl", - "user_id": user.uuid, - "version": REPLICATE_MODEL.sdxl.version, - "replicate_url": REPLICATE_MODEL.sdxl.name, - "category": AIModelCategory.BASE_SD.value, - "keyword": "", - "model_type": json.dumps([AIModelType.TXT2IMG.value]) - } - ] + data = [] + for model in ML_MODEL_LIST: + if model['enabled']: + del model['enabled'] + model['user_id'] = user.uuid + data.append(model) # only creating pre-defined models for the first time available_models = data_repo.get_all_ai_model_list(\ diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 722003c6..2342195a 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -137,21 +137,21 @@ def frame_styling_page(mainheader2, project_uuid: str): if st.button(f"Generate variants", key=f"new_variations_{st.session_state['current_frame_index']}", help="This will generate new variants based on the settings to the left."): for i in range(0, st.session_state['individual_number_of_variants']): trigger_restyling_process( - st.session_state['current_frame_uuid'], - st.session_state['model'], - st.session_state['prompt'], - st.session_state['strength'], - st.session_state['negative_prompt'], - st.session_state['guidance_scale'], - st.session_state['seed'], - st.session_state['num_inference_steps'], - st.session_state['transformation_stage'], - st.session_state["promote_new_generation"], - st.session_state['custom_models'], - st.session_state['adapter_type'], - True, - st.session_state['low_threshold'], - st.session_state['high_threshold'] + timing_uuid=st.session_state['current_frame_uuid'], + model_name=st.session_state['model'], + prompt=st.session_state['prompt'], + strength=st.session_state['strength'], + negative_prompt=st.session_state['negative_prompt'], + guidance_scale=st.session_state['guidance_scale'], + seed=st.session_state['seed'], + num_inference_steps=st.session_state['num_inference_steps'], + transformation_stage=st.session_state['transformation_stage'], + promote_new_generation=st.session_state["promote_new_generation"], + custom_models=st.session_state['custom_models'], + adapter_type=st.session_state['adapter_type'], + update_inference_settings=True, + low_threshold=st.session_state['low_threshold'], + high_threshold=st.session_state['high_threshold'] ) st.experimental_rerun() diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index 2f058eba..0ec4e603 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -11,40 +11,35 @@ from shared.constants import REPLICATE_USER, SERVER, InternalFileTag, InternalFileType, ServerType from ui_components.constants import MASK_IMG_LOCAL_PATH, TEMP_MASK_FILE from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject -from utils.constants import ImageStage +from utils.constants import ImageStage, MLQueryObject from utils.data_repo.data_repo import DataRepo from utils.ml_processor.ml_interface import get_ml_client from utils.ml_processor.replicate.constants import REPLICATE_MODEL -def trigger_restyling_process( - timing_uuid, - model_uuid, - prompt, - strength, - negative_prompt, - guidance_scale, - seed, - num_inference_steps, - transformation_stage, - promote_new_generation, - custom_models, - adapter_type, - update_inference_settings, - low_threshold, - high_threshold -): +def trigger_restyling_process(timing_uuid, update_inference_settings, \ + transformation_stage, promote_new_generation, **kwargs): from ui_components.methods.common_methods import add_image_variant, promote_image_variant data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - custom_pipeline = "" + + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + project_settings: InternalSettingObject = data_repo.get_project_setting(timing.project.uuid) + + if transformation_stage == ImageStage.SOURCE_IMAGE.value: + source_image = timing.source_image + else: + variants: List[InternalFileObject] = timing.alternative_images_list + number_of_variants = len(variants) + source_image = timing.primary_image - # TODO: add proper form validations throughout the code - if not prompt: - st.error("Please enter a prompt") - return + query_obj = MLQueryObject( + timing_uuid, + image_uuid=source_image.uuid, + width=project_settings.width, + height=project_settings.height, + **kwargs + ) if update_inference_settings is True: prompt = prompt.replace(",", ".") @@ -52,57 +47,23 @@ def trigger_restyling_process( data_repo.update_project_setting( timing.project.uuid, default_prompt=prompt, - default_strength=strength, - default_model_id=model_uuid, - default_custom_pipeline=custom_pipeline, - default_negative_prompt=negative_prompt, - default_guidance_scale=guidance_scale, - default_seed=seed, - default_num_inference_steps=num_inference_steps, - default_which_stage_to_run_on=transformation_stage, - default_custom_models=custom_models, - default_adapter_type=adapter_type + default_strength=query_obj.strength, + default_model_id=query_obj.model_uuid, + default_negative_prompt=query_obj.negative_prompt, + default_guidance_scale=query_obj.guidance_scale, + default_seed=query_obj.seed, + default_num_inference_steps=query_obj.num_inference_steps, + default_which_stage_to_run_on=query_obj.transformation_stage, + default_custom_models=query_obj.custom_models, + default_adapter_type=query_obj.adapter_type, + default_low_threshold=query_obj.low_threshold, + default_high_threshold=query_obj.high_threshold ) - if low_threshold != "": - data_repo.update_project_setting( - timing.project.uuid, default_low_threshold=low_threshold) - if high_threshold != "": - data_repo.update_project_setting( - timing.project.uuid, default_high_threshold=high_threshold) + dynamic_prompting(prompt, source_image, timing_uuid) - if timing.source_image == "": - source_image = "" - else: - source_image = timing.source_image - - data_repo.update_specific_timing( - uuid=timing_uuid, - model_id=model_uuid, - source_image_id=timing.source_image.uuid, - prompt=prompt, - strength=strength, - custom_pipeline=custom_pipeline, - negative_prompt=negative_prompt, - guidance_scale=guidance_scale, - seed=seed, - num_inference_steps=num_inference_steps, - custom_models=custom_models, - adapter_type=adapter_type, - low_threshold=low_threshold, - high_threshold=high_threshold - ) - dynamic_prompting(prompt, source_image, timing_uuid) - - timing = data_repo.get_timing_from_uuid(timing_uuid) - if transformation_stage == ImageStage.SOURCE_IMAGE.value: - source_image = timing.source_image - else: - variants: List[InternalFileObject] = timing.alternative_images_list - number_of_variants = len(variants) - source_image = timing.primary_image - - output_file = restyle_images(timing_uuid, source_image) + # TODO: reverse the log creation flow (create log first and then pass the uuid) + output_file = restyle_images(query_obj) if output_file != None: add_image_variant(output_file.uuid, timing_uuid) @@ -118,154 +79,123 @@ def trigger_restyling_process( else: print("No new generation to promote") - -def restyle_images(timing_uuid, source_image: InternalFileObject) -> InternalFileObject: - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - model_name = timing.model.name - strength = timing.strength - - if model_name == "stable-diffusion-img2img-v2.1": - output_file = prompt_model_stability(timing_uuid, source_image) - elif model_name == "depth2img": - output_file = prompt_model_depth2img(strength, timing_uuid, source_image) - elif model_name == "pix2pix": - output_file = prompt_model_pix2pix(timing_uuid, source_image) - elif model_name == "LoRA": - output_file = prompt_model_lora(timing_uuid, source_image) +def restyle_images(query_obj: MLQueryObject) -> InternalFileObject: + model_name = query_obj.model_name + if model_name == "LoRA": + output_file = prompt_model_lora(query_obj) elif model_name == "controlnet": - output_file = prompt_model_controlnet(timing_uuid, source_image) + output_file = prompt_model_controlnet(query_obj) elif model_name == "Dreambooth": - output_file = prompt_model_dreambooth(timing_uuid, source_image) - elif model_name == 'StyleGAN-NADA': - output_file = prompt_model_stylegan_nada(timing_uuid, source_image) - elif model_name == "stable_diffusion_xl": - output_file = prompt_model_stable_diffusion_xl(timing_uuid) - elif model_name == "real-esrgan-upscaling": - output_file = prompt_model_real_esrgan_upscaling(source_image) - elif model_name == 'controlnet_1_1_x_realistic_vision_v2_0': - output_file = prompt_model_controlnet_1_1_x_realistic_vision_v2_0( - source_image) - elif model_name == 'urpm-v1.3': - output_file = prompt_model_urpm_v1_3(source_image) + output_file = prompt_model_dreambooth(query_obj) + else: + model = REPLICATE_MODEL.get_model_by_name(model_name) # TODO: remove this dependency + output_file = prompt_model(model, query_obj) return output_file - - -def prompt_clip_interrogator(input_image, which_model, best_or_fast): - if which_model == "Stable Diffusion 1.5": - which_model = "ViT-L-14/openai" - elif which_model == "Stable Diffusion 2": - which_model = "ViT-H-14/laion2b_s32b_b79k" - - if not input_image.startswith("http"): - input_image = open(input_image, "rb") - +def prompt_model(model, query_obj: MLQueryObject) -> InternalFileObject: ml_client = get_ml_client() - output, _ = ml_client.predict_model_output( - REPLICATE_MODEL.clip_interrogator, image=input_image, clip_model_name=which_model, mode=best_or_fast) - - return output - -def prompt_model_real_esrgan_upscaling(input_image): data_repo = DataRepo() - if not input_image.startswith("http"): - input_image = open(input_image, "rb") - - ml_client = get_ml_client() - output, log = ml_client.predict_model_output( - REPLICATE_MODEL.real_esrgan_upscale, image=input_image, upscale=2 + output, log = ml_client.predict_model_output_standardized(model, query_obj) + + filename = str(uuid.uuid4()) + ".png" + output_file = data_repo.create_file( + name=filename, + type=InternalFileType.IMAGE.value, + hosted_url=output[0], + inference_log_id=log.uuid ) - filename = str(uuid.uuid4()) + ".png" - output_file = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output, inference_log_id=log.uuid) return output_file -# TODO: fix the options input, only certain words can be input in this -def prompt_model_stylegan_nada(timing_uuid, input_image): - from ui_components.methods.common_methods import resize_image - +def prompt_model_lora(query_obj: MLQueryObject) -> InternalFileObject: data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - input_file = input_image.location - if 'http' in input_image.location: - input_file = open(input_image.location, 'rb') + timing_uuid = query_obj.timing_uuid + source_image_file: InternalFileObject = data_repo.get_file_from_uuid(query_obj.image_uuid) - ml_client = get_ml_client() - output, log = ml_client.predict_model_output(REPLICATE_MODEL.stylegan_nada, input=input_file, - output_style=timing.prompt) - filename = str(uuid.uuid4()) + ".png" - image_file = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0], inference_log_id=log.uuid) - output_file = resize_image(timing.project.name, 512, 512, image_file) - - return output_file - -def prompt_model_stable_diffusion_xl(timing_uuid): - from ui_components.methods.common_methods import resize_image - - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - ml_client = get_ml_client() - output, log = ml_client.predict_model_output(REPLICATE_MODEL.sdxl, prompt=timing.prompt) - filename = str(uuid.uuid4()) + ".png" - image_file = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0], inference_log_id=log.uuid) - output_file = resize_image(timing.project.name, 512, 512, image_file) - - return output_file - -def prompt_model_stability(timing_uuid, input_image_file: InternalFileObject): - data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) project_settings: InternalSettingObject = data_repo.get_project_setting( timing.project.uuid) - index_of_current_item = timing.aux_frame_index - input_image = input_image_file.location - prompt = timing.prompt - strength = timing.strength - if not input_image.startswith("http"): - input_image = open(input_image, "rb") + lora_urls = "" + lora_scales = "" + if "lora_model_1_url" in st.session_state and st.session_state["lora_model_1_url"]: + lora_urls += st.session_state["lora_model_1_url"] + lora_scales += "0.5" + if "lora_model_2_url" in st.session_state and st.session_state["lora_model_2_url"]: + ctn = "" if not len(lora_urls) else " | " + lora_urls += ctn + st.session_state["lora_model_2_url"] + lora_scales += ctn + "0.5" + if st.session_state["lora_model_3_url"]: + ctn = "" if not len(lora_urls) else " | " + lora_urls += ctn + st.session_state["lora_model_3_url"] + lora_scales += ctn + "0.5" - ml_client = get_ml_client() - output, log = ml_client.predict_model_output( - REPLICATE_MODEL.img2img_sd_2_1, - image=input_image, - prompt_strength=float(strength), - prompt=prompt, - negative_prompt=timing.negative_prompt, - width=project_settings.width, - height=project_settings.height, - guidance_scale=timing.guidance_scale, - seed=timing.seed, - num_inference_steps=timing.num_inteference_steps - ) + source_image = source_image_file.location + if source_image[:4] == "http": + input_image = source_image + else: + input_image = open(source_image, "rb") - filename = str(uuid.uuid4()) + ".png" - image_file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0], inference_log_id=log.uuid) + if timing.adapter_type != "None": + if source_image[:4] == "http": + adapter_condition_image = source_image + else: + adapter_condition_image = open(source_image, "rb") + else: + adapter_condition_image = "" - return image_file + inputs = { + 'prompt': timing.prompt, + 'negative_prompt': timing.negative_prompt, + 'width': project_settings.width, + 'height': project_settings.height, + 'num_outputs': 1, + 'image': input_image, + 'num_inference_steps': timing.num_inteference_steps, + 'guidance_scale': timing.guidance_scale, + 'prompt_strength': timing.strength, + 'scheduler': "DPMSolverMultistep", + 'lora_urls': lora_urls, + 'lora_scales': lora_scales, + 'adapter_type': timing.adapter_type, + 'adapter_condition_image': adapter_condition_image, + } + ml_client = get_ml_client() + max_attempts = 3 + attempts = 0 + while attempts < max_attempts: + try: + output, log = ml_client.predict_model_output( + REPLICATE_MODEL.clones_lora_training_2, **inputs) + print(output) + filename = str(uuid.uuid4()) + ".png" + file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, + hosted_url=output[0], inference_log_id=log.uuid) + return file + except replicate.exceptions.ModelError as e: + if "NSFW content detected" in str(e): + print("NSFW content detected. Attempting to rerun code...") + attempts += 1 + continue + else: + raise e + except Exception as e: + raise e -def prompt_model_dreambooth(timing_uuid, source_image_file: InternalFileObject): +def prompt_model_dreambooth(query_obj: MLQueryObject): data_repo = DataRepo() if not ('dreambooth_model_uuid' in st.session_state and st.session_state['dreambooth_model_uuid']): st.error('No dreambooth model selected') return - + + timing_uuid = query_obj.timing_uuid + source_image_file: InternalFileObject = data_repo.get_file_from_uuid(query_obj.image_uuid) timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( @@ -339,100 +269,13 @@ def prompt_model_dreambooth(timing_uuid, source_image_file: InternalFileObject): return None - -def prompt_model_depth2img(strength, timing_uuid, source_image_file: InternalFileObject) -> InternalFileObject: - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - prompt = timing.prompt - num_inference_steps = timing.num_inteference_steps - guidance_scale = timing.guidance_scale - negative_prompt = timing.negative_prompt - source_image = source_image_file.location - if not source_image.startswith("http"): - source_image = open(source_image, "rb") - - ml_client = get_ml_client() - output, log = ml_client.predict_model_output(REPLICATE_MODEL.jagilley_controlnet_depth2img, input_image=source_image, - prompt_strength=float(strength), prompt=prompt, negative_prompt=negative_prompt, - num_inference_steps=num_inference_steps, guidance_scale=guidance_scale) - - filename = str(uuid.uuid4()) + ".png" - image_file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0], inference_log_id=log.uuid) - return image_file - - -def prompt_model_blip2(input_image, query): - if not input_image.startswith("http"): - input_image = open(input_image, "rb") - - ml_client = get_ml_client() - output, _ = ml_client.predict_model_output( - REPLICATE_MODEL.salesforce_blip_2, image=input_image, question=query) - - return output - - -def prompt_model_pix2pix(timing_uuid, input_image_file: InternalFileObject): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - prompt = timing.prompt - guidance_scale = timing.guidance_scale - seed = timing.seed - input_image = input_image_file.location - if not input_image.startswith("http"): - input_image = open(input_image, "rb") - - ml_client = get_ml_client() - output, log = ml_client.predict_model_output(REPLICATE_MODEL.arielreplicate, input_image=input_image, instruction_text=prompt, - seed=seed, cfg_image=1.2, cfg_text=guidance_scale, resolution=704) - - filename = str(uuid.uuid4()) + ".png" - image_file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0], inference_log_id=log.uuid) - return image_file - - -def facial_expression_recognition(input_image): - if not input_image.startswith("http"): - input_image = open(input_image, "rb") - - ml_client = get_ml_client() - output, _ = ml_client.predict_model_output( - REPLICATE_MODEL.phamquiluan_face_recognition, input_path=input_image) - - emo_label = output[0]["emo_label"] - if emo_label == "disgust": - emo_label = "disgusted" - elif emo_label == "fear": - emo_label = "fearful" - elif emo_label == "surprised": - emo_label = "surprised" - emo_proba = output[0]["emo_proba"] - if emo_proba > 0.95: - emotion = (f"very {emo_label} expression") - elif emo_proba > 0.85: - emotion = (f"{emo_label} expression") - elif emo_proba > 0.75: - emotion = (f"somewhat {emo_label} expression") - elif emo_proba > 0.65: - emotion = (f"slightly {emo_label} expression") - elif emo_proba > 0.55: - emotion = (f"{emo_label} expression") - else: - emotion = (f"neutral expression") - return emotion - - -def prompt_model_controlnet(timing_uuid, intput_image_file: InternalFileObject): +def prompt_model_controlnet(query_obj: MLQueryObject): data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) + timing_uuid = query_obj.timing_uuid + intput_image_file: InternalFileObject = data_repo.get_file_from_uuid(query_obj.image_uuid) input_image = intput_image_file.location if timing.adapter_type == "normal": @@ -482,134 +325,60 @@ def prompt_model_controlnet(timing_uuid, intput_image_file: InternalFileObject): hosted_url=output[0], inference_log_id=log.uuid) return output_file +def prompt_clip_interrogator(input_image, which_model, best_or_fast): + if which_model == "Stable Diffusion 1.5": + which_model = "ViT-L-14/openai" + elif which_model == "Stable Diffusion 2": + which_model = "ViT-H-14/laion2b_s32b_b79k" -def prompt_model_urpm_v1_3(timing_uuid, source_image_file: InternalFileObject): - data_repo = DataRepo() - timing = data_repo.get_timing_from_uuid(timing_uuid) - - source_image = source_image_file.location - if not source_image.startswith("http"): - source_image = open(source_image, "rb") - - inputs = { - 'image': source_image, - 'prompt': timing.prompt, - 'negative_prompt': timing.negative_prompt, - 'strength': timing.strength, - 'guidance_scale': timing.guidance_scale, - 'num_inference_steps': timing.num_inference_steps, - 'upscale': 1, - 'seed': timing.seed, - } + if not input_image.startswith("http"): + input_image = open(input_image, "rb") ml_client = get_ml_client() - output, log = ml_client.predict_model_output(REPLICATE_MODEL.urpm, **inputs) - - filename = str(uuid.uuid4()) + ".png" - output_file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0], inference_log_id=log.uuid) - return output_file - + output, _ = ml_client.predict_model_output( + REPLICATE_MODEL.clip_interrogator, image=input_image, clip_model_name=which_model, mode=best_or_fast) -def prompt_model_controlnet_1_1_x_realistic_vision_v2_0(timing_uuid, input_image): - data_repo = DataRepo() - timing = data_repo.get_timing_from_uuid(timing_uuid) + return output +def prompt_model_blip2(input_image, query): if not input_image.startswith("http"): input_image = open(input_image, "rb") - inputs = { - 'image': input_image, - 'prompt': timing.prompt, - 'ddim_steps': timing.num_inference_steps, - 'strength': timing.strength, - 'scale': timing.guidance_scale, - 'seed': timing.seed, - } - ml_client = get_ml_client() - output, log = ml_client.predict_model_output( - REPLICATE_MODEL.controlnet_1_1_x_realistic_vision_v2_0, **inputs) - - filename = str(uuid.uuid4()) + ".png" - output_file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[1], inference_log_id=log.uuid) - return output_file - + output, _ = ml_client.predict_model_output( + REPLICATE_MODEL.salesforce_blip_2, image=input_image, question=query) -def prompt_model_lora(timing_uuid, source_image_file: InternalFileObject) -> InternalFileObject: - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - project_settings: InternalSettingObject = data_repo.get_project_setting( - timing.project.uuid) + return output - lora_urls = "" - lora_scales = "" - if "lora_model_1_url" in st.session_state and st.session_state["lora_model_1_url"]: - lora_urls += st.session_state["lora_model_1_url"] - lora_scales += "0.5" - if "lora_model_2_url" in st.session_state and st.session_state["lora_model_2_url"]: - ctn = "" if not len(lora_urls) else " | " - lora_urls += ctn + st.session_state["lora_model_2_url"] - lora_scales += ctn + "0.5" - if st.session_state["lora_model_3_url"]: - ctn = "" if not len(lora_urls) else " | " - lora_urls += ctn + st.session_state["lora_model_3_url"] - lora_scales += ctn + "0.5" +def facial_expression_recognition(input_image): + if not input_image.startswith("http"): + input_image = open(input_image, "rb") - source_image = source_image_file.location - if source_image[:4] == "http": - input_image = source_image - else: - input_image = open(source_image, "rb") + ml_client = get_ml_client() + output, _ = ml_client.predict_model_output( + REPLICATE_MODEL.phamquiluan_face_recognition, input_path=input_image) - if timing.adapter_type != "None": - if source_image[:4] == "http": - adapter_condition_image = source_image - else: - adapter_condition_image = open(source_image, "rb") + emo_label = output[0]["emo_label"] + if emo_label == "disgust": + emo_label = "disgusted" + elif emo_label == "fear": + emo_label = "fearful" + elif emo_label == "surprised": + emo_label = "surprised" + emo_proba = output[0]["emo_proba"] + if emo_proba > 0.95: + emotion = (f"very {emo_label} expression") + elif emo_proba > 0.85: + emotion = (f"{emo_label} expression") + elif emo_proba > 0.75: + emotion = (f"somewhat {emo_label} expression") + elif emo_proba > 0.65: + emotion = (f"slightly {emo_label} expression") + elif emo_proba > 0.55: + emotion = (f"{emo_label} expression") else: - adapter_condition_image = "" - - inputs = { - 'prompt': timing.prompt, - 'negative_prompt': timing.negative_prompt, - 'width': project_settings.width, - 'height': project_settings.height, - 'num_outputs': 1, - 'image': input_image, - 'num_inference_steps': timing.num_inteference_steps, - 'guidance_scale': timing.guidance_scale, - 'prompt_strength': timing.strength, - 'scheduler': "DPMSolverMultistep", - 'lora_urls': lora_urls, - 'lora_scales': lora_scales, - 'adapter_type': timing.adapter_type, - 'adapter_condition_image': adapter_condition_image, - } - - ml_client = get_ml_client() - max_attempts = 3 - attempts = 0 - while attempts < max_attempts: - try: - output, log = ml_client.predict_model_output( - REPLICATE_MODEL.clones_lora_training_2, **inputs) - print(output) - filename = str(uuid.uuid4()) + ".png" - file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0], inference_log_id=log.uuid) - return file - except replicate.exceptions.ModelError as e: - if "NSFW content detected" in str(e): - print("NSFW content detected. Attempting to rerun code...") - attempts += 1 - continue - else: - raise e - except Exception as e: - raise e + emotion = (f"neutral expression") + return emotion def inpainting(input_image: str, prompt, negative_prompt, timing_uuid, invert_mask, pass_mask=False) -> InternalFileObject: data_repo = DataRepo() @@ -650,7 +419,6 @@ def remove_background(input_image): REPLICATE_MODEL.pollination_modnet, image=input_image) return output - def create_depth_mask_image(input_image, layer, timing_uuid): from ui_components.methods.common_methods import create_or_update_mask diff --git a/ui_components/models.py b/ui_components/models.py index 8836fc07..e3873f7d 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -19,9 +19,9 @@ def __init__(self, **kwargs): @property def location(self): - if self.hosted_url: - return self.hosted_url - return self.local_path + if self.local_path: + return self.local_path + return self.hosted_url class InternalProjectObject: diff --git a/ui_components/widgets/variant_comparison_element.py b/ui_components/widgets/variant_comparison_element.py index 49d2d076..37cd9ea8 100644 --- a/ui_components/widgets/variant_comparison_element.py +++ b/ui_components/widgets/variant_comparison_element.py @@ -69,7 +69,7 @@ def variant_comparison_element(timing_uuid, stage=CreativeProcessType.MOTION.val else: st.info(f"**Variant #{which_variant}**") - st.image(variants[which_variant- 1].location, + st.image(variants[which_variant - 1].location, use_column_width=True) if which_variant - 1 != current_variant: diff --git a/utils/constants.py b/utils/constants.py index 4fffff4b..7b084f78 100644 --- a/utils/constants.py +++ b/utils/constants.py @@ -1,5 +1,9 @@ # streamlit state constants +import json +from shared.constants import AIModelCategory, AIModelType from utils.enum import ExtendedEnum +from utils.ml_processor.replicate.constants import REPLICATE_MODEL +import streamlit as st LOGGED_USER = 'logged_user' @@ -8,4 +12,203 @@ class ImageStage(ExtendedEnum): SOURCE_IMAGE = 'Source Image' MAIN_VARIANT = 'Main Variant' - NONE = 'None' \ No newline at end of file + NONE = 'None' + + +# single template for passing query params +class MLQueryObject: + def __init__( + self, + timing_uuid, + model_name, + prompt, + strength, + negative_prompt, + guidance_scale, + seed, + num_inference_steps, + adapter_type, + height=512, + width=512, + low_threshold=100, # update these default values + high_threshold=200, + image_uuid=None, + mask_uuid=None, + **kwargs + ): + self.timing_uuid = timing_uuid + self.model_name = model_name + self.prompt = prompt + self.image_uuid = image_uuid + self.mask_uuid = mask_uuid + self.strength = strength + self.height = height + self.width = width + self.negative_prompt = negative_prompt + self.guidance_scale = guidance_scale + self.seed = seed + self.num_inference_steps = num_inference_steps + self.adapter_type = adapter_type + self.low_threshold = low_threshold + self.high_threshold = high_threshold + self.data = kwargs + + self._validate_params() + + def _validate_params(self): + if not (self.prompt or self.image_uuid): + st.error("Prompt or image is required to run the model") + raise Exception("Prompt or image is required to run the model") + + def to_json(self): + return json.dumps(self.__dict__) + +ML_MODEL_LIST = [ + { + "name" : 'stable-diffusion-img2img-v2.1', + "version": REPLICATE_MODEL.img2img_sd_2_1.version, + "replicate_url" : REPLICATE_MODEL.img2img_sd_2_1.name, + "category" : AIModelCategory.BASE_SD.value, + "keyword" : "", + "model_type": json.dumps([AIModelType.IMG2IMG.value]), + "enabled": False + }, + { + "name" : 'depth2img', + "version": REPLICATE_MODEL.jagilley_controlnet_depth2img.version, + "replicate_url" : REPLICATE_MODEL.jagilley_controlnet_depth2img.name, + "category" : AIModelCategory.BASE_SD.value, + "keyword" : "", + "model_type": json.dumps([AIModelType.IMG2IMG.value]), + "enabled": False + }, + { + "name" : 'pix2pix', + "version": REPLICATE_MODEL.arielreplicate.version, + "replicate_url" : REPLICATE_MODEL.arielreplicate.name, + "category" : AIModelCategory.BASE_SD.value, + "keyword" : "", + "model_type": json.dumps([AIModelType.IMG2IMG.value]), + "enabled": False + }, + { + "name" : 'controlnet', + "category" : AIModelCategory.CONTROLNET.value, + "keyword" : "", + "model_type": json.dumps([AIModelType.IMG2IMG.value]), + "enabled": True + }, + { + "name" : 'Dreambooth', + "category" : AIModelCategory.DREAMBOOTH.value, + "keyword" : "", + "model_type": json.dumps([AIModelType.IMG2IMG.value]), + "enabled": True + }, + { + "name" : 'LoRA', + "category" : AIModelCategory.LORA.value, + "keyword" : "", + "model_type": json.dumps([AIModelType.IMG2IMG.value]), + "enabled": True + }, + { + "name" : 'StyleGAN-NADA', + "version": REPLICATE_MODEL.stylegan_nada.version, + "replicate_url" : REPLICATE_MODEL.stylegan_nada.name, + "category" : AIModelCategory.BASE_SD.value, + "keyword" : "", + "model_type": json.dumps([AIModelType.IMG2IMG.value]), + "enabled": False + }, + { + "name" : 'real-esrgan-upscaling', + "version": REPLICATE_MODEL.real_esrgan_upscale.version, + "replicate_url" : REPLICATE_MODEL.real_esrgan_upscale.name, + "category" : AIModelCategory.BASE_SD.value, + "keyword" : "", + "model_type": json.dumps([AIModelType.IMG2IMG.value]), + "enabled": True + }, + { + "name" : 'controlnet_1_1_x_realistic_vision_v2_0', + "version": REPLICATE_MODEL.controlnet_1_1_x_realistic_vision_v2_0.version, + "replicate_url" : REPLICATE_MODEL.controlnet_1_1_x_realistic_vision_v2_0.name, + "category" : AIModelCategory.BASE_SD.value, + "keyword" : "", + "model_type": json.dumps([AIModelType.IMG2IMG.value]), + "enabled": True + }, + { + "name" : 'urpm-v1.3', + "version": REPLICATE_MODEL.urpm.version, + "replicate_url" : REPLICATE_MODEL.urpm.name, + "category" : AIModelCategory.BASE_SD.value, + "keyword" : "", + "model_type": json.dumps([AIModelType.IMG2IMG.value]), + "enabled": True + }, + { + "name": "stable_diffusion_xl", + "version": REPLICATE_MODEL.sdxl.version, + "replicate_url": REPLICATE_MODEL.sdxl.name, + "category": AIModelCategory.BASE_SD.value, + "keyword": "", + "model_type": json.dumps([AIModelType.TXT2IMG.value, AIModelType.IMG2IMG.value]), + "enabled": True + }, + { + "name": "realistic_vision_5", + "version": REPLICATE_MODEL.realistic_vision_v5.version, + "replicate_url": REPLICATE_MODEL.realistic_vision_v5.name, + "category": AIModelCategory.BASE_SD.value, + "keyword": "", + "model_type": json.dumps([AIModelType.TXT2IMG.value]), + "enabled": True + }, + { + "name": "deliberate_v3", + "version": REPLICATE_MODEL.deliberate_v3.version, + "replicate_url": REPLICATE_MODEL.deliberate_v3.name, + "category": AIModelCategory.BASE_SD.value, + "keyword": "", + "model_type": json.dumps([AIModelType.TXT2IMG.value, AIModelType.IMG2IMG.value]), + "enabled": True + }, + { + "name": "dreamshaper_v7", + "version": REPLICATE_MODEL.dreamshaper_v7.version, + "replicate_url": REPLICATE_MODEL.dreamshaper_v7.name, + "category": AIModelCategory.BASE_SD.value, + "keyword": "", + "model_type": json.dumps([AIModelType.TXT2IMG.value, AIModelType.IMG2IMG.value]), + "enabled": True + }, + { + "name": "epic_realism_v5", + "version": REPLICATE_MODEL.epicrealism_v5.version, + "replicate_url": REPLICATE_MODEL.epicrealism_v5.name, + "category": AIModelCategory.BASE_SD.value, + "keyword": "", + "model_type": json.dumps([AIModelType.TXT2IMG.value, AIModelType.IMG2IMG.value]), + "enabled": True + }, + { + "name": "sdxl_controlnet", + "version": REPLICATE_MODEL.sdxl_controlnet.version, + "replicate_url": REPLICATE_MODEL.sdxl_controlnet.name, + "category": AIModelCategory.BASE_SD.value, + "keyword": "", + "model_type": json.dumps([AIModelType.IMG2IMG.value]), + "enabled": True + }, + { + "name": "realistic_vision_img2img", + "version": REPLICATE_MODEL.realistic_vision_v5_img2img.version, + "replicate_url": REPLICATE_MODEL.realistic_vision_v5_img2img.name, + "category": AIModelCategory.BASE_SD.value, + "keyword": "", + "model_type": json.dumps([AIModelType.IMG2IMG.value]), + "enabled": True + } + ] \ No newline at end of file diff --git a/utils/ml_processor/replicate/constants.py b/utils/ml_processor/replicate/constants.py index ec163e2a..d37928e7 100644 --- a/utils/ml_processor/replicate/constants.py +++ b/utils/ml_processor/replicate/constants.py @@ -34,7 +34,21 @@ class REPLICATE_MODEL: real_esrgan_upscale = ReplicateModel("cjwbw/real-esrgan", "d0ee3d708c9b911f122a4ad90046c5d26a0293b99476d697f6bb7f2e251ce2d4") controlnet_1_1_x_realistic_vision_v2_0 = ReplicateModel("usamaehsan/controlnet-1.1-x-realistic-vision-v2.0", "7fbf4c86671738f97896c9cb4922705adfcdcf54a6edab193bb8c176c6b34a69") urpm = ReplicateModel("mcai/urpm-v1.3-img2img", "4df956e8dbfebf1afaf0c3ee98ad426ec58c4262d24360d054582e5eab2cb5f6") - sdxl = ReplicateModel("stability-ai/sdxl", "a00d0b7dcbb9c3fbb34ba87d2d5b46c56969c84a628bf778a7fdaec30b1b99c5") + sdxl = ReplicateModel("stability-ai/sdxl", "af1a68a271597604546c09c64aabcd7782c114a63539a4a8d14d1eeda5630c33") + # addition 30/9/2023 + realistic_vision_v5 = ReplicateModel("heedster/realistic-vision-v5", "c0259010b93e7a4102a4ba946d70e06d7d0c7dc007201af443cfc8f943ab1d3c") + deliberate_v3 = ReplicateModel("pagebrain/deliberate-v3", "1851b62340ae657f05f8b8c8a020e3f9a46efde9fe80f273eef026c0003252ac") + dreamshaper_v7 = ReplicateModel("pagebrain/dreamshaper-v7", "0deba88df4e49b302585e1a7b6bd155e18962c1048966a40fe60ba05805743ff") + epicrealism_v5 = ReplicateModel("pagebrain/epicrealism-v5", "222465e57e4d9812207f14133c9499d47d706ecc41a8bf400120285b2f030b42") + sdxl_controlnet = ReplicateModel("lucataco/sdxl-controlnet", "db2ffdbdc7f6cb4d6dab512434679ee3366ae7ab84f89750f8947d5594b79a47") + realistic_vision_v5_img2img = ReplicateModel("lucataco/realistic-vision-v5-img2img", "82bbb4595458d6be142450fc6d8c4d79c936b92bd184dd2d6dd71d0796159819") + + @staticmethod + def get_model_by_name(name): + for model in REPLICATE_MODEL.__dict__.values(): + if isinstance(model, ReplicateModel) and model.name == name: + return model + return None DEFAULT_LORA_MODEL_URL = "https://replicate.delivery/pbxt/nWm6eP9ojwVvBCaWoWZVawOKRfgxPJmkVk13ES7PX36Y66kQA/tmpxuz6k_k2datazip.safetensors" \ No newline at end of file diff --git a/utils/ml_processor/replicate/replicate.py b/utils/ml_processor/replicate/replicate.py index 50597e83..c1e89759 100644 --- a/utils/ml_processor/replicate/replicate.py +++ b/utils/ml_processor/replicate/replicate.py @@ -4,6 +4,7 @@ from shared.constants import REPLICATE_USER from shared.file_upload.s3 import upload_file from utils.common_utils import get_current_user_uuid +from utils.constants import MLQueryObject from utils.data_repo.data_repo import DataRepo from utils.ml_processor.ml_interface import MachineLearningProcessor import replicate @@ -16,7 +17,7 @@ from utils.ml_processor.replicate.constants import REPLICATE_MODEL, ReplicateModel from repository.data_logger import log_model_inference import utils.local_storage.local_storage as local_storage -from utils.ml_processor.replicate.utils import check_user_credits, check_user_credits_async +from utils.ml_processor.replicate.utils import check_user_credits, check_user_credits_async, get_model_params_from_query_obj class ReplicateProcessor(MachineLearningProcessor): @@ -52,6 +53,11 @@ def get_model_by_name(self, model_name, model_version=None): model_version = model.versions.get(model_version) if model_version else model return model_version + # it converts the standardized query_obj into params required by replicate + def predict_model_output_standardized(self, model: ReplicateModel, query_obj: MLQueryObject): + params = get_model_params_from_query_obj(model, query_obj) + return self.predict_model_output(model, **params) + @check_user_credits def predict_model_output(self, model: ReplicateModel, **kwargs): model_version = self.get_model(model) diff --git a/utils/ml_processor/replicate/utils.py b/utils/ml_processor/replicate/utils.py index 23d84225..d84c2af2 100644 --- a/utils/ml_processor/replicate/utils.py +++ b/utils/ml_processor/replicate/utils.py @@ -1,4 +1,7 @@ from utils.common_utils import user_credits_available +from utils.constants import MLQueryObject +from utils.data_repo.data_repo import DataRepo +from utils.ml_processor.replicate.constants import REPLICATE_MODEL def check_user_credits(method): @@ -19,4 +22,138 @@ async def wrapper(self, *args, **kwargs): else: raise RuntimeError("Insufficient credits. Please recharge") - return wrapper \ No newline at end of file + return wrapper + +def get_model_params_from_query_obj(model, query_obj: MLQueryObject): + data_repo = DataRepo() + + input_image, mask = None, None + if query_obj.image_uuid: + image = data_repo.get_file_from_uuid(query_obj.image_uuid) + if image: + input_image = image.location + if not input_image.startswith('http'): + input_image = open(input_image, 'rb') + + if query_obj.mask_uuid: + mask = data_repo.get_file_from_uuid(query_obj.mask_uuid) + if mask: + mask = mask.location + if not mask.startswith('http'): + mask = open(mask, 'rb') + + if model == REPLICATE_MODEL.img2img_sd_2_1: + data = { + "image" : input_image, + "prompt_strength" : query_obj.strength, + "prompt" : query_obj.prompt, + "negative_prompt" : query_obj.negative_prompt, + "width" : query_obj.width, + "height" : query_obj.height, + "guidance_scale" : query_obj.guidance_scale, + "seed" : query_obj.seed, + "num_inference_steps" : query_obj.num_inteference_steps + } + elif model == REPLICATE_MODEL.real_esrgan_upscale: + data = { + "image": input_image, + "upscale": query_obj.data.get('upscale', 2), + } + elif model == REPLICATE_MODEL.stylegan_nada: + data = { + "input": input_image, + "output_style": query_obj.prompt + } + elif model == REPLICATE_MODEL.sdxl: + data = { + "prompt" : query_obj.prompt, + "negative_prompt" : query_obj.negative_prompt, + "width" : query_obj.width, + "height" : query_obj.height, + "image": input_image, + "mask": mask + } + elif model == REPLICATE_MODEL.jagilley_controlnet_depth2img: + data = { + "input_image" : input_image, + "prompt_strength" : query_obj.strength, + "prompt" : query_obj.prompt, + "negative_prompt" : query_obj.negative_prompt, + "num_inference_steps" : query_obj.num_inference_steps, + "guidance_scale" : query_obj.guidance_scale + } + elif model == REPLICATE_MODEL.arielreplicate: + data = { + "input_image" : input_image, + "instruction_text" : query_obj.prompt, + "seed" : query_obj.seed, + "cfg_image" : query_obj.data.get("cfg", 1.2), + "cfg_text" : query_obj.guidance_scale, + "resolution" : 704 + } + elif model == REPLICATE_MODEL.urpm: + data = { + 'image': input_image, + 'prompt': query_obj.prompt, + 'negative_prompt': query_obj.negative_prompt, + 'strength': query_obj.strength, + 'guidance_scale': query_obj.guidance_scale, + 'num_inference_steps': query_obj.num_inference_steps, + 'upscale': 1, + 'seed': query_obj.seed, + } + elif model == REPLICATE_MODEL.controlnet_1_1_x_realistic_vision_v2_0: + data = { + 'image': input_image, + 'prompt': query_obj.prompt, + 'ddim_steps': query_obj.num_inference_steps, + 'strength': query_obj.strength, + 'scale': query_obj.guidance_scale, + 'seed': query_obj.seed + } + elif model == REPLICATE_MODEL.realistic_vision_v5: + if not (query_obj.guidance_scale >= 3.5 and query_obj.guidance_scale <= 7.0): + raise ValueError("Guidance scale must be between 3.5 and 7.0") + + data = { + 'prompt': query_obj.prompt, + 'negative_prompt': query_obj.negative_prompt, + 'guidance': query_obj.guidance_scale, + 'width': query_obj.width, + 'height': query_obj.height, + 'steps': query_obj.num_inference_steps, + 'seed': query_obj.seed + } + elif model == REPLICATE_MODEL.deliberate_v3 or model == REPLICATE_MODEL.dreamshaper_v7 or model == REPLICATE_MODEL.epicrealism_v5: + data = { + 'prompt': query_obj.prompt, + 'negative_prompt': query_obj.negative_prompt, + 'image': input_image, + 'mask': mask, + 'width': query_obj.width, + 'height': query_obj.height, + 'prompt_strength': query_obj.strength, + 'guidance_scale': query_obj.guidance_scale, + 'num_inference_steps': query_obj.num_inference_steps, + 'safety_checker': False + } + elif model == REPLICATE_MODEL.sdxl_controlnet: + data = { + 'prompt': query_obj.prompt, + 'negative_prompt': query_obj.negative_prompt, + 'image': input_image, + 'num_inference_steps': query_obj.num_inference_steps, + 'condition_scale': query_obj.data.get('condition_scale', 0.5), + } + elif model == REPLICATE_MODEL.realistic_vision_v5_img2img: + data = { + 'prompt': query_obj.prompt, + 'negative_prompt': query_obj.negative_prompt, + 'image': input_image, + 'steps': query_obj.num_inference_steps, + 'strength': query_obj.strength + } + else: + data = query_obj.to_json() + + return data \ No newline at end of file From d76dac60f83038be67a5187a88f8e4df79213b37 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 30 Sep 2023 20:51:31 +0530 Subject: [PATCH 027/164] duplicate alt img fix --- banodoco_settings.py | 3 +-- ui_components/methods/common_methods.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/banodoco_settings.py b/banodoco_settings.py index 5551a80f..5b535a9a 100644 --- a/banodoco_settings.py +++ b/banodoco_settings.py @@ -112,8 +112,7 @@ def create_new_project(user: InternalUserObject, project_name: str, width=512, h "frame_time": 0.0, "animation_style": animation_style, "aux_frame_index": 0, - "source_image_id": source_image.uuid, - "alternative_images": json.dumps([source_image.uuid]) + "source_image_id": source_image.uuid } timing: InternalFrameTimingObject = data_repo.create_timing(**timing_data) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 5a442137..dcd91d5a 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -960,7 +960,7 @@ def add_image_variant(image_file_uuid: str, timing_uuid: str): alternative_image_list = timing.alternative_images_list + [image_file] alternative_image_uuid_list = [img.uuid for img in alternative_image_list] primary_image_uuid = alternative_image_uuid_list[0] - alternative_image_uuid_list = json.dumps(alternative_image_uuid_list) + alternative_image_uuid_list = json.dumps(list(set(alternative_image_uuid_list))) data_repo.update_specific_timing( timing_uuid, alternative_images=alternative_image_uuid_list) From 83578584e72054d22010bba4f24789885d38e198 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 30 Sep 2023 22:43:46 +0530 Subject: [PATCH 028/164] inference fixed --- app.py | 2 +- .../components/frame_styling_page.py | 2 +- ui_components/methods/common_methods.py | 2 +- ui_components/methods/ml_methods.py | 29 ++++++----- utils/constants.py | 10 ++-- utils/ml_processor/replicate/constants.py | 4 +- utils/ml_processor/replicate/utils.py | 48 +++++++++++++++---- 7 files changed, 63 insertions(+), 34 deletions(-) diff --git a/app.py b/app.py index 4f6b6ae0..dfe6962b 100644 --- a/app.py +++ b/app.py @@ -81,6 +81,6 @@ def main(): try: main() except Exception as e: - sentry_sdk.capture_exception(e) + # sentry_sdk.capture_exception(e) raise e diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 2342195a..3c5210bd 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -138,7 +138,7 @@ def frame_styling_page(mainheader2, project_uuid: str): for i in range(0, st.session_state['individual_number_of_variants']): trigger_restyling_process( timing_uuid=st.session_state['current_frame_uuid'], - model_name=st.session_state['model'], + model_uuid=st.session_state['model'], prompt=st.session_state['prompt'], strength=st.session_state['strength'], negative_prompt=st.session_state['negative_prompt'], diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index dcd91d5a..5a442137 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -960,7 +960,7 @@ def add_image_variant(image_file_uuid: str, timing_uuid: str): alternative_image_list = timing.alternative_images_list + [image_file] alternative_image_uuid_list = [img.uuid for img in alternative_image_list] primary_image_uuid = alternative_image_uuid_list[0] - alternative_image_uuid_list = json.dumps(list(set(alternative_image_uuid_list))) + alternative_image_uuid_list = json.dumps(alternative_image_uuid_list) data_repo.update_specific_timing( timing_uuid, alternative_images=alternative_image_uuid_list) diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index 0ec4e603..e757f089 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -8,7 +8,7 @@ import uuid import urllib from backend.models import InternalFileObject -from shared.constants import REPLICATE_USER, SERVER, InternalFileTag, InternalFileType, ServerType +from shared.constants import REPLICATE_USER, SERVER, AIModelCategory, InternalFileTag, InternalFileType, ServerType from ui_components.constants import MASK_IMG_LOCAL_PATH, TEMP_MASK_FILE from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject from utils.constants import ImageStage, MLQueryObject @@ -41,6 +41,7 @@ def trigger_restyling_process(timing_uuid, update_inference_settings, \ **kwargs ) + prompt = query_obj.prompt if update_inference_settings is True: prompt = prompt.replace(",", ".") prompt = prompt.replace("\n", "") @@ -53,14 +54,14 @@ def trigger_restyling_process(timing_uuid, update_inference_settings, \ default_guidance_scale=query_obj.guidance_scale, default_seed=query_obj.seed, default_num_inference_steps=query_obj.num_inference_steps, - default_which_stage_to_run_on=query_obj.transformation_stage, - default_custom_models=query_obj.custom_models, + default_which_stage_to_run_on=transformation_stage, + default_custom_models=query_obj.data.get('custom_models', []), default_adapter_type=query_obj.adapter_type, default_low_threshold=query_obj.low_threshold, default_high_threshold=query_obj.high_threshold ) - dynamic_prompting(prompt, source_image, timing_uuid) + query_obj.prompt = dynamic_prompting(prompt, source_image) # TODO: reverse the log creation flow (create log first and then pass the uuid) output_file = restyle_images(query_obj) @@ -80,15 +81,17 @@ def trigger_restyling_process(timing_uuid, update_inference_settings, \ print("No new generation to promote") def restyle_images(query_obj: MLQueryObject) -> InternalFileObject: - model_name = query_obj.model_name - if model_name == "LoRA": + data_repo = DataRepo() + model = data_repo.get_ai_model_from_uuid(query_obj.model_uuid) + + if model.category == AIModelCategory.LORA.value: output_file = prompt_model_lora(query_obj) - elif model_name == "controlnet": + elif model.category == AIModelCategory.CONTROLNET.value: output_file = prompt_model_controlnet(query_obj) - elif model_name == "Dreambooth": + elif model.category == AIModelCategory.DREAMBOOTH.value: output_file = prompt_model_dreambooth(query_obj) else: - model = REPLICATE_MODEL.get_model_by_name(model_name) # TODO: remove this dependency + model = REPLICATE_MODEL.get_model_by_db_obj(model) # TODO: remove this dependency output_file = prompt_model(model, query_obj) return output_file @@ -473,11 +476,7 @@ def create_depth_mask_image(input_image, layer, timing_uuid): return create_or_update_mask(timing_uuid, mask) -def dynamic_prompting(prompt, source_image, timing_uuid): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - +def dynamic_prompting(prompt, source_image): if "[expression]" in prompt: prompt_expression = facial_expression_recognition(source_image) prompt = prompt.replace("[expression]", prompt_expression) @@ -497,4 +496,4 @@ def dynamic_prompting(prompt, source_image, timing_uuid): source_image, "the person is looking") prompt = prompt.replace("[looking]", "looking " + str(prompt_looking)) - data_repo.update_specific_timing(timing_uuid, prompt=prompt) \ No newline at end of file + return prompt \ No newline at end of file diff --git a/utils/constants.py b/utils/constants.py index 7b084f78..01348d07 100644 --- a/utils/constants.py +++ b/utils/constants.py @@ -20,14 +20,14 @@ class MLQueryObject: def __init__( self, timing_uuid, - model_name, - prompt, - strength, - negative_prompt, + model_uuid, guidance_scale, seed, num_inference_steps, adapter_type, + strength, + prompt="", + negative_prompt="", height=512, width=512, low_threshold=100, # update these default values @@ -37,7 +37,7 @@ def __init__( **kwargs ): self.timing_uuid = timing_uuid - self.model_name = model_name + self.model_uuid = model_uuid self.prompt = prompt self.image_uuid = image_uuid self.mask_uuid = mask_uuid diff --git a/utils/ml_processor/replicate/constants.py b/utils/ml_processor/replicate/constants.py index d37928e7..09f33fac 100644 --- a/utils/ml_processor/replicate/constants.py +++ b/utils/ml_processor/replicate/constants.py @@ -45,9 +45,9 @@ class REPLICATE_MODEL: realistic_vision_v5_img2img = ReplicateModel("lucataco/realistic-vision-v5-img2img", "82bbb4595458d6be142450fc6d8c4d79c936b92bd184dd2d6dd71d0796159819") @staticmethod - def get_model_by_name(name): + def get_model_by_db_obj(model_db_obj): for model in REPLICATE_MODEL.__dict__.values(): - if isinstance(model, ReplicateModel) and model.name == name: + if isinstance(model, ReplicateModel) and model.name == model_db_obj.replicate_url and model.version == model_db_obj.version: return model return None diff --git a/utils/ml_processor/replicate/utils.py b/utils/ml_processor/replicate/utils.py index d84c2af2..b4f45b35 100644 --- a/utils/ml_processor/replicate/utils.py +++ b/utils/ml_processor/replicate/utils.py @@ -24,6 +24,7 @@ async def wrapper(self, *args, **kwargs): return wrapper +# TODO: add data validation (like prompt can't be empty...) def get_model_params_from_query_obj(model, query_obj: MLQueryObject): data_repo = DataRepo() @@ -44,7 +45,6 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): if model == REPLICATE_MODEL.img2img_sd_2_1: data = { - "image" : input_image, "prompt_strength" : query_obj.strength, "prompt" : query_obj.prompt, "negative_prompt" : query_obj.negative_prompt, @@ -54,6 +54,10 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): "seed" : query_obj.seed, "num_inference_steps" : query_obj.num_inteference_steps } + + if input_image: + data['image'] = input_image + elif model == REPLICATE_MODEL.real_esrgan_upscale: data = { "image": input_image, @@ -70,30 +74,38 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): "negative_prompt" : query_obj.negative_prompt, "width" : query_obj.width, "height" : query_obj.height, - "image": input_image, "mask": mask } + + if input_image: + data['image'] = input_image + elif model == REPLICATE_MODEL.jagilley_controlnet_depth2img: data = { - "input_image" : input_image, "prompt_strength" : query_obj.strength, "prompt" : query_obj.prompt, "negative_prompt" : query_obj.negative_prompt, "num_inference_steps" : query_obj.num_inference_steps, "guidance_scale" : query_obj.guidance_scale } + + if input_image: + data['input_image'] = input_image + elif model == REPLICATE_MODEL.arielreplicate: data = { - "input_image" : input_image, "instruction_text" : query_obj.prompt, "seed" : query_obj.seed, "cfg_image" : query_obj.data.get("cfg", 1.2), "cfg_text" : query_obj.guidance_scale, "resolution" : 704 } + + if input_image: + data['input_image'] = input_image + elif model == REPLICATE_MODEL.urpm: data = { - 'image': input_image, 'prompt': query_obj.prompt, 'negative_prompt': query_obj.negative_prompt, 'strength': query_obj.strength, @@ -102,15 +114,22 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): 'upscale': 1, 'seed': query_obj.seed, } + + if input_image: + data['image'] = input_image + elif model == REPLICATE_MODEL.controlnet_1_1_x_realistic_vision_v2_0: data = { - 'image': input_image, 'prompt': query_obj.prompt, 'ddim_steps': query_obj.num_inference_steps, 'strength': query_obj.strength, 'scale': query_obj.guidance_scale, 'seed': query_obj.seed } + + if input_image: + data['image'] = input_image + elif model == REPLICATE_MODEL.realistic_vision_v5: if not (query_obj.guidance_scale >= 3.5 and query_obj.guidance_scale <= 7.0): raise ValueError("Guidance scale must be between 3.5 and 7.0") @@ -128,8 +147,6 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): data = { 'prompt': query_obj.prompt, 'negative_prompt': query_obj.negative_prompt, - 'image': input_image, - 'mask': mask, 'width': query_obj.width, 'height': query_obj.height, 'prompt_strength': query_obj.strength, @@ -137,14 +154,23 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): 'num_inference_steps': query_obj.num_inference_steps, 'safety_checker': False } + + if input_image: + data['image'] = input_image + if mask: + data['mask'] = mask + elif model == REPLICATE_MODEL.sdxl_controlnet: data = { 'prompt': query_obj.prompt, 'negative_prompt': query_obj.negative_prompt, - 'image': input_image, 'num_inference_steps': query_obj.num_inference_steps, 'condition_scale': query_obj.data.get('condition_scale', 0.5), } + + if input_image: + data['image'] = input_image + elif model == REPLICATE_MODEL.realistic_vision_v5_img2img: data = { 'prompt': query_obj.prompt, @@ -153,6 +179,10 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): 'steps': query_obj.num_inference_steps, 'strength': query_obj.strength } + + if input_image: + data['image'] = input_image + else: data = query_obj.to_json() From 38ac90e2c7467371dec2ae7f6be0b25a88f1bee3 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sun, 1 Oct 2023 13:27:08 +0530 Subject: [PATCH 029/164] wip: inference restructuring --- shared/constants.py | 4 +- .../components/frame_styling_page.py | 6 +- ui_components/methods/ml_methods.py | 186 ++++-------------- ui_components/methods/training_methods.py | 4 +- ui_components/widgets/drawing_element.py | 2 +- ui_components/widgets/styling_element.py | 5 +- utils/constants.py | 4 +- utils/ml_processor/replicate/constants.py | 13 +- utils/ml_processor/replicate/replicate.py | 5 +- utils/ml_processor/replicate/utils.py | 66 ++++++- 10 files changed, 129 insertions(+), 166 deletions(-) diff --git a/shared/constants.py b/shared/constants.py index 30f4feb7..08b23100 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -78,6 +78,4 @@ class AnimationToolType(ExtendedEnum): AWS_S3_REGION = 'ap-south-1' # TODO: discuss this OFFLINE_MODE = os.getenv('OFFLINE_MODE', False) # for picking up secrets and file storage -LOCAL_DATABASE_NAME = 'banodoco_local.db' - -REPLICATE_USER = "piyushk52" \ No newline at end of file +LOCAL_DATABASE_NAME = 'banodoco_local.db' \ No newline at end of file diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 3c5210bd..87140c1b 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -151,7 +151,11 @@ def frame_styling_page(mainheader2, project_uuid: str): adapter_type=st.session_state['adapter_type'], update_inference_settings=True, low_threshold=st.session_state['low_threshold'], - high_threshold=st.session_state['high_threshold'] + high_threshold=st.session_state['high_threshold'], + canny_image=st.session_state['canny_image'], + lora_model_1_url=st.session_state['lora_model_1_url'] if st.session_state['lora_model_1_url'] else None, + lora_model_2_url=st.session_state['lora_model_2_url'] if st.session_state['lora_model_2_url'] else None, + lora_model_3_url=st.session_state['lora_model_3_url'] if st.session_state['lora_model_3_url'] else None, ) st.experimental_rerun() diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index e757f089..36e54c37 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -8,7 +8,7 @@ import uuid import urllib from backend.models import InternalFileObject -from shared.constants import REPLICATE_USER, SERVER, AIModelCategory, InternalFileTag, InternalFileType, ServerType +from shared.constants import SERVER, AIModelCategory, InternalFileTag, InternalFileType, ServerType from ui_components.constants import MASK_IMG_LOCAL_PATH, TEMP_MASK_FILE from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject from utils.constants import ImageStage, MLQueryObject @@ -82,26 +82,41 @@ def trigger_restyling_process(timing_uuid, update_inference_settings, \ def restyle_images(query_obj: MLQueryObject) -> InternalFileObject: data_repo = DataRepo() - model = data_repo.get_ai_model_from_uuid(query_obj.model_uuid) - - if model.category == AIModelCategory.LORA.value: - output_file = prompt_model_lora(query_obj) - elif model.category == AIModelCategory.CONTROLNET.value: - output_file = prompt_model_controlnet(query_obj) - elif model.category == AIModelCategory.DREAMBOOTH.value: - output_file = prompt_model_dreambooth(query_obj) - else: - model = REPLICATE_MODEL.get_model_by_db_obj(model) # TODO: remove this dependency - output_file = prompt_model(model, query_obj) - - return output_file - -def prompt_model(model, query_obj: MLQueryObject) -> InternalFileObject: ml_client = get_ml_client() - data_repo = DataRepo() + db_model = data_repo.get_ai_model_from_uuid(query_obj.model_uuid) + + if db_model.category == AIModelCategory.LORA.value: + model = REPLICATE_MODEL.clones_lora_training_2 + output, log = ml_client.predict_model_output_standardized(model, query_obj) + + elif db_model.category == AIModelCategory.CONTROLNET.value: + adapter_type = query_obj.adapter_type + if adapter_type == "normal": + model = REPLICATE_MODEL.jagilley_controlnet_normal + elif adapter_type == "canny": + model = REPLICATE_MODEL.jagilley_controlnet_canny + elif adapter_type == "hed": + model = REPLICATE_MODEL.jagilley_controlnet_hed + elif adapter_type == "scribble": + model = REPLICATE_MODEL.jagilley_controlnet_scribble + elif adapter_type == "seg": + model = REPLICATE_MODEL.jagilley_controlnet_seg + elif adapter_type == "hough": + model = REPLICATE_MODEL.jagilley_controlnet_hough + elif adapter_type == "depth2img": + model = REPLICATE_MODEL.jagilley_controlnet_depth2img + elif adapter_type == "pose": + model = REPLICATE_MODEL.jagilley_controlnet_pose + output, log = ml_client.predict_model_output_standardized(model, query_obj) + + elif db_model.category == AIModelCategory.DREAMBOOTH.value: + output, log = prompt_model_dreambooth(query_obj) - output, log = ml_client.predict_model_output_standardized(model, query_obj) + else: + model = REPLICATE_MODEL.get_model_by_db_obj(model) # TODO: remove this dependency + output, log = ml_client.predict_model_output_standardized(model, query_obj) + filename = str(uuid.uuid4()) + ".png" output_file = data_repo.create_file( name=filename, @@ -112,84 +127,6 @@ def prompt_model(model, query_obj: MLQueryObject) -> InternalFileObject: return output_file -def prompt_model_lora(query_obj: MLQueryObject) -> InternalFileObject: - data_repo = DataRepo() - - timing_uuid = query_obj.timing_uuid - source_image_file: InternalFileObject = data_repo.get_file_from_uuid(query_obj.image_uuid) - - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - project_settings: InternalSettingObject = data_repo.get_project_setting( - timing.project.uuid) - - lora_urls = "" - lora_scales = "" - if "lora_model_1_url" in st.session_state and st.session_state["lora_model_1_url"]: - lora_urls += st.session_state["lora_model_1_url"] - lora_scales += "0.5" - if "lora_model_2_url" in st.session_state and st.session_state["lora_model_2_url"]: - ctn = "" if not len(lora_urls) else " | " - lora_urls += ctn + st.session_state["lora_model_2_url"] - lora_scales += ctn + "0.5" - if st.session_state["lora_model_3_url"]: - ctn = "" if not len(lora_urls) else " | " - lora_urls += ctn + st.session_state["lora_model_3_url"] - lora_scales += ctn + "0.5" - - source_image = source_image_file.location - if source_image[:4] == "http": - input_image = source_image - else: - input_image = open(source_image, "rb") - - if timing.adapter_type != "None": - if source_image[:4] == "http": - adapter_condition_image = source_image - else: - adapter_condition_image = open(source_image, "rb") - else: - adapter_condition_image = "" - - inputs = { - 'prompt': timing.prompt, - 'negative_prompt': timing.negative_prompt, - 'width': project_settings.width, - 'height': project_settings.height, - 'num_outputs': 1, - 'image': input_image, - 'num_inference_steps': timing.num_inteference_steps, - 'guidance_scale': timing.guidance_scale, - 'prompt_strength': timing.strength, - 'scheduler': "DPMSolverMultistep", - 'lora_urls': lora_urls, - 'lora_scales': lora_scales, - 'adapter_type': timing.adapter_type, - 'adapter_condition_image': adapter_condition_image, - } - - ml_client = get_ml_client() - max_attempts = 3 - attempts = 0 - while attempts < max_attempts: - try: - output, log = ml_client.predict_model_output( - REPLICATE_MODEL.clones_lora_training_2, **inputs) - print(output) - filename = str(uuid.uuid4()) + ".png" - file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0], inference_log_id=log.uuid) - return file - except replicate.exceptions.ModelError as e: - if "NSFW content detected" in str(e): - print("NSFW content detected. Attempting to rerun code...") - attempts += 1 - continue - else: - raise e - except Exception as e: - raise e - def prompt_model_dreambooth(query_obj: MLQueryObject): data_repo = DataRepo() @@ -238,8 +175,9 @@ def prompt_model_dreambooth(query_obj: MLQueryObject): else: version = dreambooth_model.version + app_setting = data_repo.get_app_setting_from_uuid() model_version = ml_client.get_model_by_name( - f"{REPLICATE_USER}/{model_name}", version) + f"{app_setting.replicate_username}/{model_name}", version) if source_image.startswith("http"): input_image = source_image @@ -272,61 +210,7 @@ def prompt_model_dreambooth(query_obj: MLQueryObject): return None -def prompt_model_controlnet(query_obj: MLQueryObject): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - timing_uuid = query_obj.timing_uuid - intput_image_file: InternalFileObject = data_repo.get_file_from_uuid(query_obj.image_uuid) - input_image = intput_image_file.location - - if timing.adapter_type == "normal": - model = REPLICATE_MODEL.jagilley_controlnet_normal - elif timing.adapter_type == "canny": - model = REPLICATE_MODEL.jagilley_controlnet_canny - elif timing.adapter_type == "hed": - model = REPLICATE_MODEL.jagilley_controlnet_hed - elif timing.adapter_type == "scribble": - model = REPLICATE_MODEL.jagilley_controlnet_scribble - if timing.canny_image != "": - input_image = timing.canny_image - elif timing.adapter_type == "seg": - model = REPLICATE_MODEL.jagilley_controlnet_seg - elif timing.adapter_type == "hough": - model = REPLICATE_MODEL.jagilley_controlnet_hough - elif timing.adapter_type == "depth2img": - model = REPLICATE_MODEL.jagilley_controlnet_depth2img - elif timing.adapter_type == "pose": - model = REPLICATE_MODEL.jagilley_controlnet_pose - - if not input_image.startswith("http"): - input_image = open(input_image, "rb") - - inputs = { - 'image': input_image, - 'prompt': timing.prompt, - 'num_samples': "1", - 'image_resolution': "512", - 'ddim_steps': timing.num_inteference_steps, - 'scale': timing.guidance_scale, - 'eta': 0, - 'seed': timing.seed, - 'a_prompt': "best quality, extremely detailed", - 'n_prompt': timing.negative_prompt + ", longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality", - 'detect_resolution': 512, - 'bg_threshold': 0, - 'low_threshold': timing.low_threshold, - 'high_threshold': timing.high_threshold, - } - - ml_client = get_ml_client() - output, log = ml_client.predict_model_output(model, **inputs) - - filename = str(uuid.uuid4()) + ".png" - output_file: InternalFileObject = data_repo.create_file(name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0], inference_log_id=log.uuid) - return output_file def prompt_clip_interrogator(input_image, which_model, best_or_fast): if which_model == "Stable Diffusion 1.5": diff --git a/ui_components/methods/training_methods.py b/ui_components/methods/training_methods.py index b331064c..f1f308cf 100644 --- a/ui_components/methods/training_methods.py +++ b/ui_components/methods/training_methods.py @@ -31,8 +31,10 @@ def train_dreambooth_model(instance_prompt, class_prompt, training_file_url, max from ui_components.methods.common_methods import convert_image_list_to_file_list ml_client = get_ml_client() + app_setting = DataRepo().get_app_setting_from_uuid() + response = ml_client.dreambooth_training( - training_file_url, instance_prompt, class_prompt, max_train_steps, model_name, controller_type, len(images_list)) + training_file_url, instance_prompt, class_prompt, max_train_steps, model_name, controller_type, len(images_list), app_setting.replicate_username) training_status = response["status"] model_id = response["id"] diff --git a/ui_components/widgets/drawing_element.py b/ui_components/widgets/drawing_element.py index 92e891cf..e7661eff 100644 --- a/ui_components/widgets/drawing_element.py +++ b/ui_components/widgets/drawing_element.py @@ -131,7 +131,7 @@ def drawing_element(timing_details,project_settings,project_uuid,stage=WorkflowS st.image(canny_image.location) if st.button(f"Make Into Guidance Image"): - data_repo.update_specific_timing(st.session_state['current_frame_uuid'], source_image_id=st.session_state['canny_image']) + # data_repo.update_specific_timing(st.session_state['current_frame_uuid'], source_image_id=st.session_state['canny_image']) st.session_state['reset_canvas'] = True st.session_state['canny_image'] = None st.experimental_rerun() diff --git a/ui_components/widgets/styling_element.py b/ui_components/widgets/styling_element.py index 6a604662..4237f9f1 100644 --- a/ui_components/widgets/styling_element.py +++ b/ui_components/widgets/styling_element.py @@ -251,11 +251,12 @@ def styling_element(timing_uuid, view_type="Single"): # st.session_state['custom_models'] = next((obj.uuid for i, obj in enumerate( # dreambooth_model_list) if getattr(obj, 'name') == selected_dreambooth_model_name), "") selected_dreambooth_model_index = next((i for i, obj in enumerate( - dreambooth_model_list) if getattr(obj, 'name') == selected_dreambooth_model_name), "") + dreambooth_model_list) if getattr(obj, 'name') == selected_dreambooth_model_name), 0) if st.session_state['index_of_dreambooth_model'] != selected_dreambooth_model_index: st.session_state['index_of_dreambooth_model'] = selected_dreambooth_model_index - st.session_state['dreambooth_model_uuid'] = dreambooth_model_list[st.session_state['index_of_dreambooth_model']].uuid + if len(dreambooth_model_list): + st.session_state['dreambooth_model_uuid'] = dreambooth_model_list[st.session_state['index_of_dreambooth_model']].uuid else: st.session_state['custom_models'] = [] st.session_state['adapter_type'] = "N" diff --git a/utils/constants.py b/utils/constants.py index 01348d07..960ec8e4 100644 --- a/utils/constants.py +++ b/utils/constants.py @@ -23,9 +23,9 @@ def __init__( model_uuid, guidance_scale, seed, - num_inference_steps, - adapter_type, + num_inference_steps, strength, + adapter_type=None, prompt="", negative_prompt="", height=512, diff --git a/utils/ml_processor/replicate/constants.py b/utils/ml_processor/replicate/constants.py index 09f33fac..ad7e653c 100644 --- a/utils/ml_processor/replicate/constants.py +++ b/utils/ml_processor/replicate/constants.py @@ -51,4 +51,15 @@ def get_model_by_db_obj(model_db_obj): return model return None -DEFAULT_LORA_MODEL_URL = "https://replicate.delivery/pbxt/nWm6eP9ojwVvBCaWoWZVawOKRfgxPJmkVk13ES7PX36Y66kQA/tmpxuz6k_k2datazip.safetensors" \ No newline at end of file +DEFAULT_LORA_MODEL_URL = "https://replicate.delivery/pbxt/nWm6eP9ojwVvBCaWoWZVawOKRfgxPJmkVk13ES7PX36Y66kQA/tmpxuz6k_k2datazip.safetensors" + +CONTROLNET_MODELS = [ + REPLICATE_MODEL.jagilley_controlnet_normal, + REPLICATE_MODEL.jagilley_controlnet_canny, + REPLICATE_MODEL.jagilley_controlnet_hed, + REPLICATE_MODEL.jagilley_controlnet_scribble, + REPLICATE_MODEL.jagilley_controlnet_seg, + REPLICATE_MODEL.jagilley_controlnet_hough, + REPLICATE_MODEL.jagilley_controlnet_depth2img, + REPLICATE_MODEL.jagilley_controlnet_pose, +] \ No newline at end of file diff --git a/utils/ml_processor/replicate/replicate.py b/utils/ml_processor/replicate/replicate.py index c1e89759..271e5f8a 100644 --- a/utils/ml_processor/replicate/replicate.py +++ b/utils/ml_processor/replicate/replicate.py @@ -1,7 +1,6 @@ import asyncio import io import time -from shared.constants import REPLICATE_USER from shared.file_upload.s3 import upload_file from utils.common_utils import get_current_user_uuid from utils.constants import MLQueryObject @@ -170,7 +169,7 @@ def upload_training_data(self, images_list): # TODO: figure how to resolve model location setting, right now it's hardcoded to peter942/modnet @check_user_credits def dreambooth_training(self, training_file_url, instance_prompt, \ - class_prompt, max_train_steps, model_name, controller_type, image_len): + class_prompt, max_train_steps, model_name, controller_type, image_len, replicate_user): if controller_type == "normal": template_version = "b65d36e378a01ef81d81ba49be7deb127e9bb8b74a28af3aa0eaca16b9bcd0eb" elif controller_type == "canny": @@ -199,7 +198,7 @@ def dreambooth_training(self, training_file_url, instance_prompt, \ "instance_data": training_file_url, "max_train_steps": max_train_steps }, - "model": REPLICATE_USER + "/" + str(model_name), + "model": replicate_user + "/" + str(model_name), "trainer_version": "cd3f925f7ab21afaef7d45224790eedbb837eeac40d22e8fefe015489ab644aa", "template_version": template_version, "webhook_completed": "https://example.com/dreambooth-webhook" diff --git a/utils/ml_processor/replicate/utils.py b/utils/ml_processor/replicate/utils.py index b4f45b35..44c59a42 100644 --- a/utils/ml_processor/replicate/utils.py +++ b/utils/ml_processor/replicate/utils.py @@ -1,7 +1,7 @@ from utils.common_utils import user_credits_available from utils.constants import MLQueryObject from utils.data_repo.data_repo import DataRepo -from utils.ml_processor.replicate.constants import REPLICATE_MODEL +from utils.ml_processor.replicate.constants import CONTROLNET_MODELS, REPLICATE_MODEL def check_user_credits(method): @@ -183,6 +183,70 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): if input_image: data['image'] = input_image + elif model in CONTROLNET_MODELS: + if model == REPLICATE_MODEL.jagilley_controlnet_scribble and query_obj.data.get('canny_image', None): + input_image = data_repo.get_file_from_uuid(query_obj.data['canny_image']).location + if not input_image.startswith('http'): + input_image = open(input_image, 'rb') + + data = { + 'image': input_image, + 'prompt': query_obj.prompt, + 'num_samples': "1", + 'image_resolution': query_obj.width, + 'ddim_steps': query_obj.num_inteference_steps, + 'scale': query_obj.guidance_scale, + 'eta': 0, + 'seed': query_obj.seed, + 'a_prompt': query_obj.data.get('a_prompt', "best quality, extremely detailed"), + 'n_prompt': query_obj.negative_prompt + ", longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality", + 'detect_resolution': query_obj.width, + 'bg_threshold': 0, + 'low_threshold': query_obj.low_threshold, + 'high_threshold': query_obj.high_threshold, + } + + elif model in [REPLICATE_MODEL.clones_lora_training_2]: + + if query_obj.adapter_type: + adapter_condition_image = input_image + else: + adapter_condition_image = "" + + lora_urls = "" + lora_scales = "" + lora_model_1_url = query_obj.data.get("lora_model_1_url", None) + lora_model_2_url = query_obj.data.get("lora_model_2_url", None) + lora_model_3_url = query_obj.data.get("lora_model_3_url", None) + if lora_model_1_url: + lora_urls += lora_model_1_url + lora_scales += "0.5" + if lora_model_2_url: + ctn = "" if not len(lora_urls) else " | " + lora_urls += ctn + lora_model_2_url + lora_scales += ctn + "0.5" + if lora_model_3_url: + ctn = "" if not len(lora_urls) else " | " + lora_urls += ctn + lora_model_3_url + lora_scales += ctn + "0.5" + + data = { + 'prompt': query_obj.prompt, + 'negative_prompt': query_obj.negative_prompt, + 'width': query_obj.width, + 'height': query_obj.height, + 'num_outputs': 1, + 'image': input_image, + 'num_inference_steps': query_obj.num_inteference_steps, + 'guidance_scale': query_obj.guidance_scale, + 'prompt_strength': query_obj.strength, + 'scheduler': "DPMSolverMultistep", + 'lora_urls': lora_urls, + 'lora_scales': lora_scales, + 'adapter_type': query_obj.adapter_type, + 'adapter_condition_image': adapter_condition_image, + } + else: data = query_obj.to_json() From 472406494d064ddc503d073d8da0a6993bdd7dd5 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sun, 1 Oct 2023 15:45:18 +0530 Subject: [PATCH 030/164] controlnet generation fixes --- backend/serializers/dao.py | 4 ++-- banodoco_settings.py | 4 ++-- ui_components/components/frame_styling_page.py | 11 +++++------ ui_components/widgets/styling_element.py | 2 +- utils/ml_processor/replicate/replicate.py | 2 +- utils/ml_processor/replicate/utils.py | 16 ++-------------- 6 files changed, 13 insertions(+), 26 deletions(-) diff --git a/backend/serializers/dao.py b/backend/serializers/dao.py index ccd42c44..218519c9 100644 --- a/backend/serializers/dao.py +++ b/backend/serializers/dao.py @@ -99,8 +99,8 @@ class CreateTimingDao(serializers.Serializer): clip_duration = serializers.FloatField(default=0, required=False) animation_style = serializers.CharField(max_length=100, default=AnimationStyleType.INTERPOLATION.value, required=False) interpolation_steps = serializers.IntegerField(required=False) - low_threshold = serializers.FloatField(default=0, required=False) - high_threshold = serializers.FloatField(default=0, required=False) + low_threshold = serializers.FloatField(default=100, required=False) + high_threshold = serializers.FloatField(default=200, required=False) aux_frame_index = serializers.IntegerField(required=False) diff --git a/banodoco_settings.py b/banodoco_settings.py index 5b535a9a..6a75c156 100644 --- a/banodoco_settings.py +++ b/banodoco_settings.py @@ -140,8 +140,8 @@ def create_new_project(user: InternalUserObject, project_name: str, width=512, h "default_adapter_type" : "N", "guidance_type" : guidance_type, "default_animation_style" : animation_style, - "default_low_threshold" : 0, - "default_high_threshold" : 0 + "default_low_threshold" : 50, + "default_high_threshold" : 100 } project_setting = data_repo.create_project_setting(**project_setting_data) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 87140c1b..512751e6 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -132,10 +132,9 @@ def frame_styling_page(mainheader2, project_uuid: str): f"How many variants?", min_value=1, max_value=100, key=f"number_of_variants_{st.session_state['current_frame_index']}") with detail2: - # TODO: add custom model validation such for sd img2img the value of strength can only be 1 if st.button(f"Generate variants", key=f"new_variations_{st.session_state['current_frame_index']}", help="This will generate new variants based on the settings to the left."): - for i in range(0, st.session_state['individual_number_of_variants']): + for i in range(0, max(st.session_state['individual_number_of_variants'], 1)): trigger_restyling_process( timing_uuid=st.session_state['current_frame_uuid'], model_uuid=st.session_state['model'], @@ -152,10 +151,10 @@ def frame_styling_page(mainheader2, project_uuid: str): update_inference_settings=True, low_threshold=st.session_state['low_threshold'], high_threshold=st.session_state['high_threshold'], - canny_image=st.session_state['canny_image'], - lora_model_1_url=st.session_state['lora_model_1_url'] if st.session_state['lora_model_1_url'] else None, - lora_model_2_url=st.session_state['lora_model_2_url'] if st.session_state['lora_model_2_url'] else None, - lora_model_3_url=st.session_state['lora_model_3_url'] if st.session_state['lora_model_3_url'] else None, + canny_image=st.session_state['canny_image'] if 'canny_image' in st.session_state else None, + lora_model_1_url=st.session_state['lora_model_1_url'] if ('lora_model_1_url' in st.session_state and st.session_state['lora_model_1_url']) else None, + lora_model_2_url=st.session_state['lora_model_2_url'] if ('lora_model_2_url' in st.session_state and st.session_state['lora_model_2_url']) else None, + lora_model_3_url=st.session_state['lora_model_3_url'] if ('lora_model_3_url' in st.session_state and st.session_state['lora_model_3_url']) else None, ) st.experimental_rerun() diff --git a/ui_components/widgets/styling_element.py b/ui_components/widgets/styling_element.py index 4237f9f1..6db4b5ed 100644 --- a/ui_components/widgets/styling_element.py +++ b/ui_components/widgets/styling_element.py @@ -264,7 +264,7 @@ def styling_element(timing_uuid, view_type="Single"): if not ( 'adapter_type' in st.session_state and st.session_state['adapter_type']): st.session_state['adapter_type'] = 'N' - if st.session_state['adapter_type'] == "canny": + if st.session_state['adapter_type'] in ["canny", "pose"]: canny1, canny2 = st.columns(2) diff --git a/utils/ml_processor/replicate/replicate.py b/utils/ml_processor/replicate/replicate.py index 271e5f8a..0feb79fa 100644 --- a/utils/ml_processor/replicate/replicate.py +++ b/utils/ml_processor/replicate/replicate.py @@ -66,7 +66,7 @@ def predict_model_output(self, model: ReplicateModel, **kwargs): log = log_model_inference(model, end_time - start_time, **kwargs) self._update_usage_credits(end_time - start_time) - return output, log + return [output[-1]], log @check_user_credits def predict_model_output_async(self, model: ReplicateModel, **kwargs): diff --git a/utils/ml_processor/replicate/utils.py b/utils/ml_processor/replicate/utils.py index 44c59a42..af296ab5 100644 --- a/utils/ml_processor/replicate/utils.py +++ b/utils/ml_processor/replicate/utils.py @@ -79,18 +79,6 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): if input_image: data['image'] = input_image - - elif model == REPLICATE_MODEL.jagilley_controlnet_depth2img: - data = { - "prompt_strength" : query_obj.strength, - "prompt" : query_obj.prompt, - "negative_prompt" : query_obj.negative_prompt, - "num_inference_steps" : query_obj.num_inference_steps, - "guidance_scale" : query_obj.guidance_scale - } - - if input_image: - data['input_image'] = input_image elif model == REPLICATE_MODEL.arielreplicate: data = { @@ -193,8 +181,8 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): 'image': input_image, 'prompt': query_obj.prompt, 'num_samples': "1", - 'image_resolution': query_obj.width, - 'ddim_steps': query_obj.num_inteference_steps, + 'image_resolution': str(query_obj.width), + 'ddim_steps': query_obj.num_inference_steps, 'scale': query_obj.guidance_scale, 'eta': 0, 'seed': query_obj.seed, From f26a6d90c94f53074eba8140ee3c8c6954712a86 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Mon, 2 Oct 2023 16:39:14 +0530 Subject: [PATCH 031/164] wip: styling copy fix --- backend/serializers/dto.py | 1 + ui_components/constants.py | 18 +++++ ui_components/methods/common_methods.py | 66 +++++++++--------- ui_components/methods/ml_methods.py | 2 +- ui_components/models.py | 61 ++--------------- ui_components/widgets/inpainting_element.py | 2 +- ui_components/widgets/styling_element.py | 75 ++++++++------------- utils/common_decorators.py | 31 ++++++++- utils/common_utils.py | 17 ++++- utils/ml_processor/replicate/replicate.py | 1 + utils/ml_processor/replicate/utils.py | 4 +- 11 files changed, 134 insertions(+), 144 deletions(-) diff --git a/backend/serializers/dto.py b/backend/serializers/dto.py index df78cc09..1ae515ad 100644 --- a/backend/serializers/dto.py +++ b/backend/serializers/dto.py @@ -54,6 +54,7 @@ class InferenceLogDto(serializers.ModelSerializer): class Meta: model = InferenceLog fields = ( + "uuid", "project", "model", "input_params", diff --git a/ui_components/constants.py b/ui_components/constants.py index dc148052..85d66b50 100644 --- a/ui_components/constants.py +++ b/ui_components/constants.py @@ -1,3 +1,5 @@ +from shared.constants import AnimationStyleType, AnimationToolType +from utils.constants import ImageStage from utils.enum import ExtendedEnum @@ -15,6 +17,22 @@ class CreativeProcessType(ExtendedEnum): STYLING = "Styling" MOTION = "Motion" +class DefaultTimingStyleParams: + prompt = "" + negative_prompt = "bad image, worst quality" + strength = 1 + guidance_scale = 0.5 + seed = 0 + num_inference_steps = 25 + low_threshold = 100 + high_threshold = 200 + adapter_type = None + interpolation_steps = 3 + transformation_stage = ImageStage.SOURCE_IMAGE.value + custom_model_id_list = [] + animation_tool = AnimationToolType.G_FILM.value + animation_style = AnimationStyleType.INTERPOLATION.value + model = None # TODO: make proper paths for every file CROPPED_IMG_LOCAL_PATH = "videos/temp/cropped.png" diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 5a442137..1440c5ce 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -19,9 +19,10 @@ from ui_components.constants import CROPPED_IMG_LOCAL_PATH, MASK_IMG_LOCAL_PATH, SECOND_MASK_FILE, SECOND_MASK_FILE_PATH, TEMP_MASK_FILE, CreativeProcessType, WorkflowStageType from ui_components.methods.file_methods import add_temp_file_to_project, generate_pil_image, save_or_host_file, save_or_host_file_bytes from ui_components.methods.ml_methods import create_depth_mask_image, inpainting, remove_background -from ui_components.methods.video_methods import calculate_desired_duration_of_individual_clip +from ui_components.methods.video_methods import calculate_desired_duration_of_individual_clip, create_or_get_single_preview_video from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject -from utils.constants import ImageStage +from utils.common_utils import reset_styling_settings +from utils.constants import ImageStage, MLQueryObject from utils.data_repo.data_repo import DataRepo from shared.constants import AnimationStyleType @@ -89,22 +90,20 @@ def style_cloning_element(timing_details): if open_copier is True: copy1, copy2 = st.columns([1, 1]) with copy1: - which_frame_to_copy_from = st.number_input("Which frame would you like to copy styling settings from?", min_value=1, max_value=len( + frame_index = st.number_input("Which frame would you like to copy styling settings from?", min_value=1, max_value=len( timing_details), value=st.session_state['current_frame_index'], step=1) if st.button("Copy styling settings from this frame"): - clone_styling_settings(which_frame_to_copy_from - 1, st.session_state['current_frame_uuid']) + clone_styling_settings(frame_index - 1, st.session_state['current_frame_uuid']) + reset_styling_settings(st.session_state['current_frame_uuid']) st.experimental_rerun() with copy2: - display_image( - idx=which_frame_to_copy_from, stage=WorkflowStageType.STYLED.value, clickable=False, timing_details=timing_details) + display_image(timing_details[frame_index - 1].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) st.caption("Prompt:") - st.caption( - timing_details[which_frame_to_copy_from].prompt) - if timing_details[which_frame_to_copy_from].model is not None: + st.caption(timing_details[frame_index - 1].prompt) + if timing_details[frame_index - 1].model is not None: st.caption("Model:") - st.caption( - timing_details[which_frame_to_copy_from].model.name) + st.caption(timing_details[frame_index - 1].model.name) def jump_to_single_frame_view_button(display_number, timing_details): if st.button(f"Jump to #{display_number}"): @@ -153,8 +152,7 @@ def add_key_frame(selected_image, inherit_styling_settings, how_long_after, whic if inherit_styling_settings == "Yes": clone_styling_settings(index_of_current_item - 1, timing_details[index_of_current_item].uuid) - data_repo.update_specific_timing(timing_details[index_of_current_item].uuid, \ - animation_style=project_settings.default_animation_style) + timing_details[index_of_current_item].animation_style = project_settings.default_animation_style if len(timing_details) == 1: st.session_state['current_frame_index'] = 1 @@ -167,31 +165,33 @@ def add_key_frame(selected_image, inherit_styling_settings, how_long_after, whic st.session_state['section_index'] = 0 st.experimental_rerun() + +# TODO: work with source_frame_uuid, instead of source_frame_number def clone_styling_settings(source_frame_number, target_frame_uuid): data_repo = DataRepo() target_timing = data_repo.get_timing_from_uuid(target_frame_uuid) timing_details = data_repo.get_timing_list_from_project( target_timing.project.uuid) - - data_repo.update_specific_timing( - target_frame_uuid, - custom_pipeline=timing_details[source_frame_number].custom_pipeline, - negative_prompt=timing_details[source_frame_number].negative_prompt, - guidance_scale=timing_details[source_frame_number].guidance_scale, - seed=timing_details[source_frame_number].seed, - num_inteference_steps=timing_details[source_frame_number].num_inteference_steps, - transformation_stage=timing_details[source_frame_number].transformation_stage, - strength=timing_details[source_frame_number].strength, - custom_models=timing_details[source_frame_number].custom_model_id_list, - adapter_type=timing_details[source_frame_number].adapter_type, - low_threshold=timing_details[source_frame_number].low_threshold, - high_threshold=timing_details[source_frame_number].high_threshold, - prompt=timing_details[source_frame_number].prompt - ) - if timing_details[source_frame_number].model: - data_repo.update_specific_timing( - target_frame_uuid, model_id=timing_details[source_frame_number].model.uuid) + primary_image = data_repo.get_file_from_uuid(timing_details[source_frame_number].primary_image.uuid) + if primary_image and primary_image.inference_log and primary_image.inference_log.input_params: + params = json.loads(primary_image.inference_log.input_params) + + if 'query_dict' in params: + params = MLQueryObject(**json.loads(params['query_dict'])) + target_timing.prompt = params.prompt + target_timing.negative_prompt = params.negative_prompt + target_timing.guidance_scale = params.guidance_scale + target_timing.seed = params.seed + target_timing.num_inference_steps = params.num_inference_steps + target_timing.strength = params.strength + target_timing.adapter_type = params.adapter_type + target_timing.low_threshold = params.low_threshold + target_timing.high_threshold = params.high_threshold + + if params.model_uuid: + model = data_repo.get_ai_model_from_uuid(params.model_uuid) + target_timing.model = model # TODO: image format is assumed to be PNG, change this later def save_new_image(img: Union[Image.Image, str, np.ndarray, io.BytesIO], project_uuid) -> InternalFileObject: @@ -451,7 +451,7 @@ def save_zoomed_image(image, timing_uuid, stage, promote=False): data_repo.update_project_setting(project_uuid, **project_update_data) - # TODO: CORRECT-CODE - make a proper column for zoom details + # TODO: **CORRECT-CODE - make a proper column for zoom details timing_update_data = { "zoom_details": f"{st.session_state['zoom_level_input']},{st.session_state['rotation_angle_input']},{st.session_state['x_shift']},{st.session_state['y_shift']}", diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index 36e54c37..1ee739a7 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -153,7 +153,7 @@ def prompt_model_dreambooth(query_obj: MLQueryObject): negative_prompt = timing.negative_prompt guidance_scale = timing.guidance_scale seed = timing.seed - num_inference_steps = timing.num_inteference_steps + num_inference_steps = timing.num_inference_steps model_id = dreambooth_model.replicate_url diff --git a/ui_components/models.py b/ui_components/models.py index e3873f7d..bd58be97 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -3,7 +3,8 @@ import json from shared.constants import AnimationStyleType, AnimationToolType -from ui_components.constants import TEMP_MASK_FILE +from ui_components.constants import TEMP_MASK_FILE, DefaultTimingStyleParams +from utils.common_decorators import session_state_attributes class InternalFileObject: @@ -15,7 +16,7 @@ def __init__(self, **kwargs): self.hosted_url = kwargs['hosted_url'] if key_present('hosted_url', kwargs) else None self.tag = kwargs['tag'] if key_present('tag', kwargs) else None self.created_on = kwargs['created_on'] if key_present('created_on', kwargs) else None - self.inference_log = InferenceLogObject(kwargs['inference_log']) if key_present('inference_log', kwargs) else None + self.inference_log = InferenceLogObject(**kwargs['inference_log']) if key_present('inference_log', kwargs) else None @property def location(self): @@ -77,14 +78,12 @@ def _get_training_image_list(self, training_image_list): training_image_list) return file_list - +@session_state_attributes(DefaultTimingStyleParams) class InternalFrameTimingObject: def __init__(self, **kwargs): self.uuid = kwargs['uuid'] if 'uuid' in kwargs else None self.project = InternalProjectObject( **kwargs["project"]) if 'project' in kwargs and kwargs["project"] else None - self.model = InternalAIModelObject( - **kwargs["model"]) if 'model' in kwargs and kwargs["model"] else None self.source_image = InternalFileObject( **kwargs["source_image"]) if 'source_image' in kwargs and kwargs["source_image"] else None self.interpolated_clip_list = [InternalFileObject(**file) for file in kwargs["interpolated_clip_list"]] \ @@ -99,28 +98,14 @@ def __init__(self, **kwargs): **kwargs["preview_video"]) if 'preview_video' in kwargs and kwargs["preview_video"] else None self.primary_image = InternalFileObject( **kwargs["primary_image"]) if 'primary_image' in kwargs and kwargs["primary_image"] else None - self.custom_model_id_list = kwargs['custom_model_id_list'] if 'custom_model_id_list' in kwargs and kwargs["custom_model_id_list"] else [ - ] self.frame_time = kwargs['frame_time'] if 'frame_time' in kwargs else None self.frame_number = kwargs['frame_number'] if 'frame_number' in kwargs else None self.alternative_images = kwargs['alternative_images'] if 'alternative_images' in kwargs and kwargs["alternative_images"] else [ ] self.custom_pipeline = kwargs['custom_pipeline'] if 'custom_pipeline' in kwargs and kwargs["custom_pipeline"] else None - self.prompt = kwargs['prompt'] if 'prompt' in kwargs and kwargs["prompt"] else "" - self.negative_prompt = kwargs['negative_prompt'] if 'negative_prompt' in kwargs and kwargs["negative_prompt"] else "" - self.guidance_scale = kwargs['guidance_scale'] if 'guidance_scale' in kwargs else None - self.seed = kwargs['seed'] if 'seed' in kwargs else None - self.num_inteference_steps = kwargs['num_inteference_steps'] if 'num_inteference_steps' in kwargs and kwargs["num_inteference_steps"] else None - self.strength = kwargs['strength'] if 'strength' in kwargs else None self.notes = kwargs['notes'] if 'notes' in kwargs and kwargs["notes"] else "" - self.adapter_type = kwargs['adapter_type'] if 'adapter_type' in kwargs and kwargs["adapter_type"] else None self.clip_duration = kwargs['clip_duration'] if 'clip_duration' in kwargs and kwargs["clip_duration"] else 0 - #self.animation_style = kwargs['animation_style'] if 'animation_style' in kwargs and kwargs["animation_style"] else None - #self.interpolation_steps = kwargs['interpolation_steps'] if 'interpolation_steps' in kwargs and kwargs["interpolation_steps"] else 0 - self.low_threshold = kwargs['low_threshold'] if 'low_threshold' in kwargs and kwargs["low_threshold"] else 0 - self.high_threshold = kwargs['high_threshold'] if 'high_threshold' in kwargs and kwargs["high_threshold"] else 0 self.aux_frame_index = kwargs['aux_frame_index'] if 'aux_frame_index' in kwargs else 0 - self.transformation_stage = kwargs['transformation_stage'] if 'transformation_stage' in kwargs else None @property def alternative_images_list(self): @@ -169,44 +154,6 @@ def primary_interpolated_video_index(self): return -1 - @property - def animation_tool(self): - key = f"{self.uuid}_animation_tool" - if not (key in st.session_state and st.session_state[key]): - st.session_state[key] = AnimationToolType.G_FILM.value - - return st.session_state[key] - - @animation_tool.setter - def animation_tool(self, val): - key = f"{self.uuid}_animation_tool" - st.session_state[key] = val - - @property - def animation_style(self): - key = f"{self.uuid}_animation_style" - if not (key in st.session_state and st.session_state[key]): - st.session_state[key] = AnimationStyleType.INTERPOLATION.value - - return st.session_state[key] - - @animation_style.setter - def animation_style(self, val): - key = f"{self.uuid}_animation_style" - st.session_state[key] = val - - @property - def interpolation_steps(self): - key = f"{self.uuid}_interpolation_steps" - if not (key in st.session_state and st.session_state[key]): - st.session_state[key] = 3 - - return st.session_state[key] - - @interpolation_steps.setter - def interpolation_steps(self, val): - key = f"{self.uuid}_interpolation_steps" - st.session_state[key] = val class InternalAppSettingObject: diff --git a/ui_components/widgets/inpainting_element.py b/ui_components/widgets/inpainting_element.py index 900c3af5..7f7e8b48 100644 --- a/ui_components/widgets/inpainting_element.py +++ b/ui_components/widgets/inpainting_element.py @@ -13,7 +13,7 @@ from utils import st_memory from utils.data_repo.data_repo import DataRepo from utils import st_memory -from ui_components.methods.common_methods import execute_image_edit, create_or_update_mask +from ui_components.methods.common_methods import add_image_variant, execute_image_edit, create_or_update_mask, promote_image_variant from ui_components.models import InternalFrameTimingObject, InternalSettingObject from streamlit_image_comparison import image_comparison diff --git a/ui_components/widgets/styling_element.py b/ui_components/widgets/styling_element.py index 6db4b5ed..09bf4675 100644 --- a/ui_components/widgets/styling_element.py +++ b/ui_components/widgets/styling_element.py @@ -30,7 +30,7 @@ def styling_element(timing_uuid, view_type="Single"): st.session_state['index_of_which_stage_to_run_on'] = 0 if view_type == "Single": - append_to_item_name = f"{st.session_state['current_frame_index']}" + append_to_item_name = f"{st.session_state['current_frame_uuid']}" elif view_type == "List": append_to_item_name = "bulk" st.markdown("## Batch queries") @@ -77,38 +77,6 @@ def styling_element(timing_uuid, view_type="Single"): st.session_state['transformation_stage']) st.experimental_rerun() - # NOTE: code not is use - # custom_pipelines = ["None", "Mystique"] - # if 'index_of_last_custom_pipeline' not in st.session_state: - # st.session_state['index_of_last_custom_pipeline'] = 0 - # st.session_state['custom_pipeline'] = st.selectbox( - # f"Custom Pipeline:", custom_pipelines, index=st.session_state['index_of_last_custom_pipeline']) - # if custom_pipelines.index(st.session_state['custom_pipeline']) != st.session_state['index_of_last_custom_pipeline']: - # st.session_state['index_of_last_custom_pipeline'] = custom_pipelines.index( - # st.session_state['custom_pipeline']) - # st.experimental_rerun() - - # if st.session_state['custom_pipeline'] == "Mystique": - # if st.session_state['index_of_default_model'] > 1: - # st.session_state['index_of_default_model'] = 0 - # st.experimental_rerun() - # with st.expander("Mystique is a custom pipeline that uses a multiple models to generate a consistent character and style transformation."): - # st.markdown("## How to use the Mystique pipeline") - # st.markdown( - # "1. Create a fine-tined model in the Custom Model section of the app - we recommend Dreambooth for character transformations.") - # st.markdown( - # "2. It's best to include a detailed prompt. We recommend taking an example input image and running it through the Prompt Finder") - # st.markdown("3. Use [expression], [location], [mouth], and [looking] tags to vary the expression and location of the character dynamically if that changes throughout the clip. Varying this in the prompt will make the character look more natural - especially useful if the character is speaking.") - # st.markdown("4. In our experience, the best strength for coherent character transformations is 0.25-0.3 - any more than this and details like eye position change.") - # models = ["LoRA", "Dreambooth"] - # st.session_state['model'] = st.selectbox( - # f"Which type of model is trained on your character?", models, index=st.session_state['index_of_default_model']) - # if st.session_state['index_of_default_model'] != models.index(st.session_state['model']): - # st.session_state['index_of_default_model'] = models.index( - # st.session_state['model']) - # st.experimental_rerun() - # else: - if st.session_state['transformation_stage'] != ImageStage.NONE.value: model_list = data_repo.get_all_ai_model_list(model_type_list=[AIModelType.IMG2IMG.value], custom_trained=False) else: @@ -117,10 +85,10 @@ def styling_element(timing_uuid, view_type="Single"): model_name_list = [m.name for m in model_list] if not ('index_of_default_model' in st.session_state and st.session_state['index_of_default_model']): - if project_settings.default_model: - st.session_state['model'] = project_settings.default_model.uuid + if timing.model: + st.session_state['model'] = timing.model.uuid st.session_state['index_of_default_model'] = next((i for i, obj in enumerate( - model_list) if getattr(obj, 'uuid') == project_settings.default_model.uuid), 0) + model_list) if getattr(obj, 'uuid') == timing.model.uuid), 0) # st.write( # f"Index of last model: {st.session_state['index_of_default_model']}") else: @@ -139,7 +107,7 @@ def styling_element(timing_uuid, view_type="Single"): model_list) if getattr(obj, 'name') == selected_model_name), None) if st.session_state['index_of_default_model'] != selected_model_index: st.session_state['index_of_default_model'] = selected_model_index - # st.experimental_rerun() + st.experimental_rerun() current_model_name = data_repo.get_ai_model_from_uuid( st.session_state['model']).name @@ -149,7 +117,12 @@ def styling_element(timing_uuid, view_type="Single"): controlnet_adapter_types = [ "scribble", "normal", "canny", "hed", "seg", "hough", "depth2img", "pose"] if 'index_of_controlnet_adapter_type' not in st.session_state: - st.session_state['index_of_controlnet_adapter_type'] = 0 + if timing.adapter_type: + st.session_state['index_of_controlnet_adapter_type'] = controlnet_adapter_types.index( + timing.adapter_type) + else: + st.session_state['index_of_controlnet_adapter_type'] = 0 + st.session_state['adapter_type'] = st.selectbox( f"Adapter Type", controlnet_adapter_types, index=st.session_state['index_of_controlnet_adapter_type']) @@ -313,22 +286,25 @@ def styling_element(timing_uuid, view_type="Single"): else: if view_type == "List": - if project_settings.default_prompt != "": - st.session_state[f'prompt_value_{append_to_item_name}'] = project_settings.default_prompt - else: - st.session_state[f'prompt_value_{append_to_item_name}'] = "" - - elif view_type == "Single": if timing.prompt != "": st.session_state[f'prompt_value_{append_to_item_name}'] = timing.prompt else: st.session_state[f'prompt_value_{append_to_item_name}'] = "" + elif view_type == "Single": + if not (f'prompt_value_{append_to_item_name}' in st.session_state and st.session_state[f'prompt_value_{append_to_item_name}']): + if timing.prompt != "": + st.session_state[f'prompt_value_{append_to_item_name}'] = timing.prompt + else: + st.session_state[f'prompt_value_{append_to_item_name}'] = "" + st.session_state['prompt'] = st.text_area( f"Prompt", label_visibility="visible", value=st.session_state[f'prompt_value_{append_to_item_name}'], height=150) - if st.session_state['prompt'] != st.session_state['prompt_value']: - st.session_state['prompt_value'] = st.session_state['prompt'] + + if st.session_state['prompt'] != st.session_state[f'prompt_value_{append_to_item_name}']: + st.session_state[f'prompt_value_{append_to_item_name}'] = st.session_state['prompt'] st.experimental_rerun() + if view_type == "List": st.info( "You can include the following tags in the prompt to vary the prompt dynamically: [expression], [location], [mouth], and [looking]") @@ -374,6 +350,9 @@ def styling_element(timing_uuid, view_type="Single"): else: st.session_state['guidance_scale'] = 7.5 + if not ('negative_prompt_value' in st.session_state and st.session_state['negative_prompt_value']) and timing.negative_prompt: + st.session_state['negative_prompt_value'] = timing.negative_prompt + st.session_state['negative_prompt'] = st.text_area( f"Negative prompt", value=st.session_state['negative_prompt_value'], label_visibility="visible") @@ -405,8 +384,8 @@ def styling_element(timing_uuid, view_type="Single"): else: st.session_state['num_inference_steps'] = 50 elif view_type == "Single": - if timing.num_inteference_steps: - st.session_state['num_inference_steps'] = timing.num_inteference_steps + if timing.num_inference_steps: + st.session_state['num_inference_steps'] = timing.num_inference_steps else: st.session_state['num_inference_steps'] = 50 st.session_state['num_inference_steps'] = st.number_input( diff --git a/utils/common_decorators.py b/utils/common_decorators.py index e7486c2c..16b9e623 100644 --- a/utils/common_decorators.py +++ b/utils/common_decorators.py @@ -1,4 +1,5 @@ import time +import streamlit as st def count_calls(cls): class Wrapper(cls): @@ -55,4 +56,32 @@ def wrapper(*args, **kwargs): return result return wrapper - return WrapperClass \ No newline at end of file + return WrapperClass + +def session_state_attributes(default_value_cls): + def decorator(cls): + original_getattr = cls.__getattribute__ + original_setattr = cls.__setattr__ + + def custom_attr(self, attr): + if hasattr(default_value_cls, attr): + key = f"{self.uuid}_{attr}" + if not (key in st.session_state and st.session_state[key]): + st.session_state[key] = getattr(default_value_cls, attr) + + return st.session_state[key] + else: + return original_getattr(self, attr) + + def custom_setattr(self, attr, value): + if hasattr(default_value_cls, attr): + key = f"{self.uuid}_{attr}" + st.session_state[key] = value + else: + original_setattr(self, attr, value) + + cls.__getattribute__ = custom_attr + cls.__setattr__ = custom_setattr + return cls + + return decorator \ No newline at end of file diff --git a/utils/common_utils.py b/utils/common_utils.py index 2cfab1f6..35aa20bb 100644 --- a/utils/common_utils.py +++ b/utils/common_utils.py @@ -151,4 +151,19 @@ def reset_project_state(): # reset cache - StCache.clear_entire_cache() \ No newline at end of file + StCache.clear_entire_cache() + + +def reset_styling_settings(timing_uuid): + keys_to_delete = [ + f"index_of_which_stage_to_run_on_{timing_uuid}", + "index_of_default_model", + "index_of_controlnet_adapter_type", + "index_of_dreambooth_model", + f'prompt_value_{timing_uuid}', + "negative_prompt_value", + ] + + for k in keys_to_delete: + if k in st.session_state: + del st.session_state[k] \ No newline at end of file diff --git a/utils/ml_processor/replicate/replicate.py b/utils/ml_processor/replicate/replicate.py index 0feb79fa..ab9e8b64 100644 --- a/utils/ml_processor/replicate/replicate.py +++ b/utils/ml_processor/replicate/replicate.py @@ -55,6 +55,7 @@ def get_model_by_name(self, model_name, model_version=None): # it converts the standardized query_obj into params required by replicate def predict_model_output_standardized(self, model: ReplicateModel, query_obj: MLQueryObject): params = get_model_params_from_query_obj(model, query_obj) + params['query_dict'] = query_obj.to_json() return self.predict_model_output(model, **params) @check_user_credits diff --git a/utils/ml_processor/replicate/utils.py b/utils/ml_processor/replicate/utils.py index af296ab5..fb9addd4 100644 --- a/utils/ml_processor/replicate/utils.py +++ b/utils/ml_processor/replicate/utils.py @@ -52,7 +52,7 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): "height" : query_obj.height, "guidance_scale" : query_obj.guidance_scale, "seed" : query_obj.seed, - "num_inference_steps" : query_obj.num_inteference_steps + "num_inference_steps" : query_obj.num_inference_steps } if input_image: @@ -225,7 +225,7 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): 'height': query_obj.height, 'num_outputs': 1, 'image': input_image, - 'num_inference_steps': query_obj.num_inteference_steps, + 'num_inference_steps': query_obj.num_inference_steps, 'guidance_scale': query_obj.guidance_scale, 'prompt_strength': query_obj.strength, 'scheduler': "DPMSolverMultistep", From 5b33fc81af495e51378660772326aa8b1d4318a0 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Mon, 2 Oct 2023 18:38:37 +0530 Subject: [PATCH 032/164] copy style caption fixed --- ui_components/methods/common_methods.py | 57 ++++++++++++++----------- ui_components/models.py | 18 ++++++++ 2 files changed, 51 insertions(+), 24 deletions(-) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 1440c5ce..46e2944f 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -13,7 +13,7 @@ from io import BytesIO import numpy as np import urllib3 -from shared.constants import SERVER, InternalFileType, ServerType +from shared.constants import SERVER, AIModelCategory, AIModelType, InternalFileType, ServerType from pydub import AudioSegment from backend.models import InternalFileObject from ui_components.constants import CROPPED_IMG_LOCAL_PATH, MASK_IMG_LOCAL_PATH, SECOND_MASK_FILE, SECOND_MASK_FILE_PATH, TEMP_MASK_FILE, CreativeProcessType, WorkflowStageType @@ -99,11 +99,23 @@ def style_cloning_element(timing_details): with copy2: display_image(timing_details[frame_index - 1].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) - st.caption("Prompt:") - st.caption(timing_details[frame_index - 1].prompt) - if timing_details[frame_index - 1].model is not None: - st.caption("Model:") - st.caption(timing_details[frame_index - 1].model.name) + + if timing_details[frame_index - 1].primary_image.inference_params: + st.text("Prompt: ") + st.caption(timing_details[frame_index - 1].primary_image.inference_params.prompt) + st.text("Negative Prompt: ") + st.caption(timing_details[frame_index - 1].primary_image.inference_params.negative_prompt) + + if timing_details[frame_index - 1].primary_image.inference_params.model_uuid: + data_repo = DataRepo() + model: InternalAIModelObject = data_repo.get_ai_model_from_uuid(timing_details[frame_index - 1].primary_image.inference_params.model_uuid) + + st.text("Model:") + st.caption(model.name) + + if model.category.lower() == AIModelCategory.CONTROLNET.value: + st.text("Adapter Type:") + st.caption(timing_details[frame_index - 1].primary_image.inference_params.adapter_type) def jump_to_single_frame_view_button(display_number, timing_details): if st.button(f"Jump to #{display_number}"): @@ -174,24 +186,21 @@ def clone_styling_settings(source_frame_number, target_frame_uuid): target_timing.project.uuid) primary_image = data_repo.get_file_from_uuid(timing_details[source_frame_number].primary_image.uuid) - if primary_image and primary_image.inference_log and primary_image.inference_log.input_params: - params = json.loads(primary_image.inference_log.input_params) - - if 'query_dict' in params: - params = MLQueryObject(**json.loads(params['query_dict'])) - target_timing.prompt = params.prompt - target_timing.negative_prompt = params.negative_prompt - target_timing.guidance_scale = params.guidance_scale - target_timing.seed = params.seed - target_timing.num_inference_steps = params.num_inference_steps - target_timing.strength = params.strength - target_timing.adapter_type = params.adapter_type - target_timing.low_threshold = params.low_threshold - target_timing.high_threshold = params.high_threshold - - if params.model_uuid: - model = data_repo.get_ai_model_from_uuid(params.model_uuid) - target_timing.model = model + params = primary_image.inference_params + + target_timing.prompt = params.prompt + target_timing.negative_prompt = params.negative_prompt + target_timing.guidance_scale = params.guidance_scale + target_timing.seed = params.seed + target_timing.num_inference_steps = params.num_inference_steps + target_timing.strength = params.strength + target_timing.adapter_type = params.adapter_type + target_timing.low_threshold = params.low_threshold + target_timing.high_threshold = params.high_threshold + + if params.model_uuid: + model = data_repo.get_ai_model_from_uuid(params.model_uuid) + target_timing.model = model # TODO: image format is assumed to be PNG, change this later def save_new_image(img: Union[Image.Image, str, np.ndarray, io.BytesIO], project_uuid) -> InternalFileObject: diff --git a/ui_components/models.py b/ui_components/models.py index bd58be97..6ef2f1a7 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -5,6 +5,7 @@ from ui_components.constants import TEMP_MASK_FILE, DefaultTimingStyleParams from utils.common_decorators import session_state_attributes +from utils.constants import MLQueryObject class InternalFileObject: @@ -23,6 +24,23 @@ def location(self): if self.local_path: return self.local_path return self.hosted_url + + @property + def inference_params(self) -> MLQueryObject: + log = self.inference_log + if not log: + from utils.data_repo.data_repo import DataRepo + + data_repo = DataRepo() + fresh_obj = data_repo.get_file_from_uuid(self.uuid) + log = fresh_obj.inference_log + + if log and log.input_params: + params = json.loads(log.input_params) + if 'query_dict' in params: + return MLQueryObject(**json.loads(params['query_dict'])) + + return None class InternalProjectObject: From 160dea770ea5cfcce6f8e128c8261a5720f31552 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Mon, 2 Oct 2023 19:08:42 +0530 Subject: [PATCH 033/164] model data cached --- utils/cache/cache.py | 1 + utils/cache/cache_methods.py | 48 ++++++++++++++++++++++++++++++------ 2 files changed, 42 insertions(+), 7 deletions(-) diff --git a/utils/cache/cache.py b/utils/cache/cache.py index f542f3db..0f86b9e1 100644 --- a/utils/cache/cache.py +++ b/utils/cache/cache.py @@ -7,6 +7,7 @@ class CacheKey(ExtendedEnum): APP_SETTING = "app_setting" APP_SECRET = "app_secret" PROJECT_SETTING = "project_setting" + AI_MODEL = "ai_model" class StCache: diff --git a/utils/cache/cache_methods.py b/utils/cache/cache_methods.py index 012ee9d8..52fc0045 100644 --- a/utils/cache/cache_methods.py +++ b/utils/cache/cache_methods.py @@ -6,7 +6,7 @@ logger = AppLogger() -# NOTE: caching only timing_details, project settings and app settings. invalidating cache everytime a related data is updated +# NOTE: caching only timing_details, project settings, models and app settings. invalidating cache everytime a related data is updated def cache_data(cls): def _cache_create_or_update_file(self, *args, **kwargs): original_func = getattr(cls, '_original_create_or_update_file') @@ -74,13 +74,44 @@ def _cache_delete_project_from_uuid(self, *args, **kwargs): setattr(cls, '_original_delete_project_from_uuid', cls.delete_project_from_uuid) setattr(cls, "delete_project_from_uuid", _cache_delete_project_from_uuid) - + + def _cache_get_ai_model_from_uuid(self, *args, **kwargs): + model_list = StCache.get_all(CacheKey.AI_MODEL.value) + if model_list and len(model_list) and len(args) > 0: + for model in model_list: + if model.uuid == args[0]: + return model + + original_func = getattr(cls, '_original_get_ai_model_from_uuid') + model = original_func(self, *args, **kwargs) + StCache.add(model, CacheKey.AI_MODEL.value) + + return model + + setattr(cls, '_original_get_ai_model_from_uuid', cls.get_ai_model_from_uuid) + setattr(cls, "get_ai_model_from_uuid", _cache_get_ai_model_from_uuid) + + def _cache_get_ai_model_from_name(self, *args, **kwargs): + model_list = StCache.get_all(CacheKey.AI_MODEL.value) + if model_list and len(model_list) and len(args) > 0: + for model in model_list: + if model.name == args[0]: + return model + + original_func = getattr(cls, '_original_get_ai_model_from_name') + model = original_func(self, *args, **kwargs) + StCache.add(model, CacheKey.AI_MODEL.value) + + return model + + setattr(cls, '_original_get_ai_model_from_name', cls.get_ai_model_from_name) + setattr(cls, "get_ai_model_from_name", _cache_get_ai_model_from_name) def _cache_create_ai_model(self, *args, **kwargs): original_func = getattr(cls, '_original_create_ai_model') ai_model = original_func(self, *args, **kwargs) if ai_model: - StCache.delete_all(CacheKey.PROJECT_SETTING.value) + StCache.delete_all(CacheKey.AI_MODEL.value) return ai_model @@ -91,7 +122,7 @@ def _cache_update_ai_model(self, *args, **kwargs): original_func = getattr(cls, '_original_update_ai_model') ai_model = original_func(self, *args, **kwargs) if ai_model: - StCache.delete_all(CacheKey.PROJECT_SETTING.value) + StCache.delete_all(CacheKey.AI_MODEL.value) return ai_model @@ -103,13 +134,14 @@ def _cache_delete_ai_model_from_uuid(self, *args, **kwargs): status = original_func(self, *args, **kwargs) if status: - StCache.delete_all(CacheKey.PROJECT_SETTING.value) + StCache.delete_all(CacheKey.AI_MODEL.value) setattr(cls, '_original_delete_ai_model_from_uuid', cls.delete_ai_model_from_uuid) setattr(cls, "delete_ai_model_from_uuid", _cache_delete_ai_model_from_uuid) def _cache_get_timing_list_from_project(self, *args, **kwargs): + # checking if it's already present in the cache timing_list = StCache.get_all(CacheKey.TIMING_DETAILS.value) if timing_list and len(timing_list) and len(args) > 0: project_specific_list = [] @@ -117,7 +149,9 @@ def _cache_get_timing_list_from_project(self, *args, **kwargs): if timing.project.uuid == args[0]: project_specific_list.append(timing) - return project_specific_list + # if there are any timings for the project, return them + if len(project_specific_list): + return project_specific_list original_func = getattr(cls, '_original_get_timing_list_from_project') timing_list = original_func(self, *args, **kwargs) @@ -341,5 +375,5 @@ def _cache_bulk_update_project_setting(self, *args, **kwargs): setattr(cls, '_original_bulk_update_project_setting', cls.bulk_update_project_setting) setattr(cls, "bulk_update_project_setting", _cache_bulk_update_project_setting) - + return cls \ No newline at end of file From 8b29c7af3a3eeff923e1cf5a7995e69cfc3513b0 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Mon, 2 Oct 2023 21:18:26 +0530 Subject: [PATCH 034/164] streamlit updated --- app.py | 18 +++++++----- banodoco_settings.py | 2 +- requirements.txt | 2 +- ui_components/components/app_settings_page.py | 4 +-- .../components/custom_models_page.py | 2 +- .../components/frame_styling_page.py | 4 +-- ui_components/components/new_project_page.py | 2 +- .../components/project_settings_page.py | 6 ++-- .../components/video_rendering_page.py | 4 +-- ui_components/methods/common_methods.py | 28 +++++++++---------- ui_components/setup.py | 2 +- .../widgets/animation_style_element.py | 4 +-- ui_components/widgets/attach_audio_element.py | 2 +- ui_components/widgets/cropping_element.py | 10 +++---- ui_components/widgets/drawing_element.py | 6 ++-- .../widgets/frame_clip_generation_elements.py | 10 +++---- ui_components/widgets/frame_selector.py | 4 +-- ui_components/widgets/frame_switch_btn.py | 8 +++--- ui_components/widgets/frame_time_selector.py | 4 +-- ui_components/widgets/image_carousal.py | 2 +- ui_components/widgets/inpainting_element.py | 18 ++++++------ ui_components/widgets/list_view.py | 8 +++--- ui_components/widgets/prompt_finder.py | 2 +- ui_components/widgets/styling_element.py | 12 ++++---- .../widgets/variant_comparison_element.py | 2 +- utils/data_repo/api_repo.py | 2 +- utils/data_repo/data_repo.py | 2 +- utils/st_memory.py | 14 +++++----- 28 files changed, 94 insertions(+), 90 deletions(-) diff --git a/app.py b/app.py index dfe6962b..39320930 100644 --- a/app.py +++ b/app.py @@ -11,16 +11,20 @@ from utils.local_storage.url_storage import delete_url_param, get_url_param, set_url_param from utils.third_party_auth.google.google_auth import get_google_auth_url -# loading the django app -os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_settings") -# Initialize Django -django.setup() + +if 'django_init' in st.session_state and st.session_state['django_init']: + print("************ django initialized ************") + # loading the django app + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_settings") + # Initialize Django + django.setup() + st.session_state['django_init'] = True from banodoco_settings import project_init from ui_components.models import InternalAppSettingObject from utils.data_repo.data_repo import DataRepo - + if OFFLINE_MODE: SENTRY_DSN = os.getenv('SENTRY_DSN', '') @@ -46,7 +50,7 @@ def main(): and SERVER != ServerType.DEVELOPMENT.value: st.markdown("# :red[ba]:green[no]:orange[do]:blue[co]") st.subheader("Login with Google to proceed") - + auth_url = get_google_auth_url() st.markdown(auth_url, unsafe_allow_html=True) @@ -62,7 +66,7 @@ def main(): st.session_state[LOGGED_USER] = user.to_json() if user else None set_url_param(AUTH_TOKEN, str(token)) # st.experimental_set_query_params(test='testing') - st.experimental_rerun() + st.rerun() else: delete_url_param(AUTH_TOKEN) st.error("please login again") diff --git a/banodoco_settings.py b/banodoco_settings.py index 6a75c156..272feec9 100644 --- a/banodoco_settings.py +++ b/banodoco_settings.py @@ -12,7 +12,6 @@ from ui_components.models import InternalAppSettingObject, InternalFrameTimingObject, InternalProjectObject, InternalUserObject from utils.common_utils import create_working_assets from utils.constants import ML_MODEL_LIST, ImageStage -from utils.data_repo.data_repo import DataRepo from utils.ml_processor.replicate.constants import REPLICATE_MODEL ENCRYPTION_KEY = 'J2684nBgNUYa_K0a6oBr5H8MpSRW0EJ52Qmq7jExE-w=' @@ -20,6 +19,7 @@ logger = AppLogger() def project_init(): + from utils.data_repo.data_repo import DataRepo data_repo = DataRepo() # db initialization takes some time diff --git a/requirements.txt b/requirements.txt index 63e4be7d..11dd3072 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -streamlit==1.23.1 +streamlit==1.27.0 streamlit-image-comparison==0.0.3 opencv-python-headless sahi diff --git a/ui_components/components/app_settings_page.py b/ui_components/components/app_settings_page.py index 3664b877..d4a1c54d 100644 --- a/ui_components/components/app_settings_page.py +++ b/ui_components/components/app_settings_page.py @@ -19,7 +19,7 @@ def app_settings_page(): if st.button("Save Settings"): data_repo.update_app_setting(replicate_username=replicate_username) data_repo.update_app_setting(replicate_key=replicate_key) - st.experimental_rerun() + st.rerun() if SERVER != ServerType.DEVELOPMENT.value: with st.expander("Purchase Credits", expanded=True): @@ -33,7 +33,7 @@ def app_settings_page(): credits = st.number_input("Credits (1 credit = $1)", value = st.session_state['input_credits'], step = 10) if credits != st.session_state['input_credits']: st.session_state['input_credits'] = credits - st.experimental_rerun() + st.rerun() if st.button("Generate payment link"): payment_link = data_repo.generate_payment_link(credits) diff --git a/ui_components/components/custom_models_page.py b/ui_components/components/custom_models_page.py index 621532d8..0874cb16 100644 --- a/ui_components/components/custom_models_page.py +++ b/ui_components/components/custom_models_page.py @@ -172,4 +172,4 @@ def custom_models_page(project_uuid): # st.success( # f"Successfully uploaded - the model '{model_name}' is now available for use!") # time.sleep(1.5) - # st.experimental_rerun() + # st.rerun() diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 512751e6..2d6c6cf5 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -156,7 +156,7 @@ def frame_styling_page(mainheader2, project_uuid: str): lora_model_2_url=st.session_state['lora_model_2_url'] if ('lora_model_2_url' in st.session_state and st.session_state['lora_model_2_url']) else None, lora_model_3_url=st.session_state['lora_model_3_url'] if ('lora_model_3_url' in st.session_state and st.session_state['lora_model_3_url']) else None, ) - st.experimental_rerun() + st.rerun() st.markdown("***") @@ -192,7 +192,7 @@ def frame_styling_page(mainheader2, project_uuid: str): if st.button(f"Add key frame",type="primary",use_container_width=True): add_key_frame(selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image) - st.experimental_rerun() + st.rerun() elif st.session_state['frame_styling_view_type'] == "List View": diff --git a/ui_components/components/new_project_page.py b/ui_components/components/new_project_page.py index 09c4722f..4cac2a8d 100644 --- a/ui_components/components/new_project_page.py +++ b/ui_components/components/new_project_page.py @@ -113,4 +113,4 @@ def new_project_page(): st.session_state['app_settings'] = 0 st.success("Project created successfully!") time.sleep(1) - st.experimental_rerun() \ No newline at end of file + st.rerun() \ No newline at end of file diff --git a/ui_components/components/project_settings_page.py b/ui_components/components/project_settings_page.py index d0a29673..40cc898c 100644 --- a/ui_components/components/project_settings_page.py +++ b/ui_components/components/project_settings_page.py @@ -63,12 +63,12 @@ def project_settings_page(project_uuid): # st.success( # "Version restored successfully! Just in case, the previous version has been saved as last_timings.csv") # time.sleep(2) - # st.experimental_rerun() + # st.rerun() # with col4: # if st.button("Delete this version", key=f"delete_version_{backup.name}"): # data_repo.delete_backup(backup.uuid) # st.success("backup deleted successfully!") - # st.experimental_rerun() + # st.rerun() with st.expander("Frame Size", expanded=True): st.write("Current Size = ", @@ -80,4 +80,4 @@ def project_settings_page(project_uuid): if st.button("Save"): data_repo.update_project_setting(project_uuid, width=width) data_repo.update_project_setting(project_uuid, height=height) - st.experimental_rerun() + st.rerun() diff --git a/ui_components/components/video_rendering_page.py b/ui_components/components/video_rendering_page.py index c37fa8b1..d6a23147 100644 --- a/ui_components/components/video_rendering_page.py +++ b/ui_components/components/video_rendering_page.py @@ -50,7 +50,7 @@ def video_rendering_page(mainheader2, project_uuid): render_video(final_video_name, project_uuid, quality_of_video, InternalFileTag.COMPLETE_GENERATED_VIDEO.value) st.success("Video rendered!") time.sleep(1.5) - st.experimental_rerun() + st.rerun() st.markdown("***") @@ -81,6 +81,6 @@ def video_rendering_page(mainheader2, project_uuid): # removing from database data_repo.delete_file_from_uuid(video.uuid) - st.experimental_rerun() + st.rerun() else: st.button(f"Delete {video}", disabled=True) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 46e2944f..45aa7be3 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -95,7 +95,7 @@ def style_cloning_element(timing_details): if st.button("Copy styling settings from this frame"): clone_styling_settings(frame_index - 1, st.session_state['current_frame_uuid']) reset_styling_settings(st.session_state['current_frame_uuid']) - st.experimental_rerun() + st.rerun() with copy2: display_image(timing_details[frame_index - 1].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) @@ -123,7 +123,7 @@ def jump_to_single_frame_view_button(display_number, timing_details): st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid st.session_state['frame_styling_view_type'] = "Individual View" st.session_state['change_view_type'] = True - st.experimental_rerun() + st.rerun() def add_key_frame(selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image): @@ -175,7 +175,7 @@ def add_key_frame(selected_image, inherit_styling_settings, how_long_after, whic st.session_state['page'] = CreativeProcessType.STYLING.value st.session_state['section_index'] = 0 - st.experimental_rerun() + st.rerun() # TODO: work with source_frame_uuid, instead of source_frame_number @@ -476,7 +476,7 @@ def reset_zoom_element(): st.session_state['rotation_angle_input'] = 0 st.session_state['x_shift'] = 0 st.session_state['y_shift'] = 0 - st.experimental_rerun() + st.rerun() @@ -564,7 +564,7 @@ def inpaint_in_black_space_element(cropped_img, project_uuid, stage=WorkflowStag data_repo.update_specific_timing( st.session_state['current_frame_uuid'], source_image_id=img_file.uuid) st.session_state['precision_cropping_inpainted_image_uuid'] = "" - st.experimental_rerun() + st.rerun() elif stage == WorkflowStageType.STYLED.value: if st.button("Save + Promote Image"): @@ -575,7 +575,7 @@ def inpaint_in_black_space_element(cropped_img, project_uuid, stage=WorkflowStag promote_image_variant( st.session_state['current_frame_uuid'], number_of_image_variants - 1) st.session_state['precision_cropping_inpainted_image_uuid'] = "" - st.experimental_rerun() + st.rerun() # returns a PIL image object def rotate_image(location, degree): @@ -646,7 +646,7 @@ def change_frame_position_input(timing_uuid, src): value=timing.aux_frame_index + 1, step=1, key=f"new_position_{timing.aux_frame_index}_{src}") if st.button('Update Position',key=f"change_frame_position_{timing.aux_frame_index}_{src}"): change_frame_position(timing_uuid, new_position - 1) - st.experimental_rerun() + st.rerun() # if new_position != timing.aux_frame_index: # print(f"Changing frame position from {timing.aux_frame_index + 1} to {new_position}") # change_frame_position(timing_uuid, new_position - 1) @@ -699,7 +699,7 @@ def move_frame_back_button(timing_uuid, orientation): if st.button(arrow, key=f"move_frame_back_{timing_uuid}", help="Move frame back"): move_frame(direction, timing_uuid) - st.experimental_rerun() + st.rerun() @@ -718,13 +718,13 @@ def move_frame_forward_button(timing_uuid, orientation): if st.button(arrow, key=f"move_frame_forward_{timing_uuid}", help="Move frame forward"): move_frame(direction, timing_uuid) - st.experimental_rerun() + st.rerun() def delete_frame_button(timing_uuid): if st.button("🗑️", key=f"delete_frame_{timing_uuid}", help="Delete frame"): delete_frame(timing_uuid) - st.experimental_rerun() + st.rerun() def delete_frame(timing_uuid): data_repo = DataRepo() @@ -783,7 +783,7 @@ def replace_image_widget(timing_uuid, stage): data_repo.update_specific_timing(timing.uuid, source_image_id=selected_image.uuid) st.success("Replaced") time.sleep(1) - st.experimental_rerun() + st.rerun() else: number_of_image_variants = add_image_variant( @@ -792,7 +792,7 @@ def replace_image_widget(timing_uuid, stage): timing.uuid, number_of_image_variants - 1) st.success("Replaced") time.sleep(1) - st.experimental_rerun() + st.rerun() elif replace_with == "Uploaded Frame": if stage == "source": @@ -803,7 +803,7 @@ def replace_image_widget(timing_uuid, stage): timing = data_repo.get_timing_from_uuid(timing.uuid) if save_uploaded_image(uploaded_file, timing.project.uuid, timing.uuid, "source"): time.sleep(1.5) - st.experimental_rerun() + st.rerun() else: replacement_frame = st.file_uploader("Upload a replacement frame here", type=[ "png", "jpeg"], accept_multiple_files=False, key=f"replacement_frame_upload_{stage}") @@ -818,7 +818,7 @@ def replace_image_widget(timing_uuid, stage): timing.uuid, number_of_image_variants - 1) st.success("Replaced") time.sleep(1) - st.experimental_rerun() + st.rerun() def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): data_repo = DataRepo() diff --git a/ui_components/setup.py b/ui_components/setup.py index b850b2fc..0ebb1703 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -91,7 +91,7 @@ def setup_app_ui(): st.session_state["index_of_project_name"] = next((i for i, p in enumerate( project_list) if p.uuid == st.session_state["project_uuid"]), None) data_repo.update_app_setting(previous_project=st.session_state["project_uuid"]) - st.experimental_rerun() + st.rerun() if st.session_state["project_uuid"] == "": st.info( diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 558e28a3..78755a4e 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -76,7 +76,7 @@ def animation_style_element(timing_uuid, project_uuid): with bottom3: if st.button(f"Delete Prompt {i+1}"): del st.session_state['travel_list'][i] - st.experimental_rerun() + st.rerun() # Update the item if it has been edited if new_prompt != item['prompt'] or new_frame_count != item['frame_count']: st.session_state['travel_list'][i] = {'prompt': new_prompt, 'frame_count': new_frame_count} @@ -96,5 +96,5 @@ def animation_style_element(timing_uuid, project_uuid): settings, variant_count ) - st.experimental_rerun() + st.rerun() \ No newline at end of file diff --git a/ui_components/widgets/attach_audio_element.py b/ui_components/widgets/attach_audio_element.py index dc66ca91..deb13664 100644 --- a/ui_components/widgets/attach_audio_element.py +++ b/ui_components/widgets/attach_audio_element.py @@ -16,7 +16,7 @@ def attach_audio_element(project_uuid, expanded): if st.button("Upload and attach new audio"): if uploaded_file: save_audio_file(uploaded_file, project_uuid) - st.experimental_rerun() + st.rerun() else: st.warning('No file selected') diff --git a/ui_components/widgets/cropping_element.py b/ui_components/widgets/cropping_element.py index 18522406..26ebcf06 100644 --- a/ui_components/widgets/cropping_element.py +++ b/ui_components/widgets/cropping_element.py @@ -74,7 +74,7 @@ def precision_cropping_element(stage, project_uuid): save_zoomed_image(output_image, st.session_state['current_frame_uuid'], stage, promote=True) st.success("Image saved successfully!") time.sleep(1) - st.experimental_rerun() + st.rerun() inpaint_in_black_space_element( output_image, project_settings.project.uuid, stage) @@ -111,7 +111,7 @@ def get_working_image(): if 'working_image' not in st.session_state or st.session_state['current_working_image_number'] != st.session_state['current_frame_index'] or st.session_state['current_stage'] != stage: get_working_image() - st.experimental_rerun() + st.rerun() options1, options2, option3, option4 = st.columns([3, 1, 1, 1]) with options1: @@ -126,7 +126,7 @@ def get_working_image(): st.session_state['working_image'] = st.session_state['working_image'].rotate( -st.session_state['degree'], resample=Image.BICUBIC, expand=True) st.session_state['degrees_rotated_to'] = st.session_state['degree'] - st.experimental_rerun() + st.rerun() with sub_options_2: st.write("") @@ -135,7 +135,7 @@ def get_working_image(): st.session_state['degree'] = 0 get_working_image() st.session_state['degrees_rotated_to'] = 0 - st.experimental_rerun() + st.rerun() project_settings: InternalProjectObject = data_repo.get_project_setting( timing.project.uuid) @@ -186,7 +186,7 @@ def get_working_image(): data_repo.update_specific_timing( st.session_state['current_frame_uuid'], source_image_id=cropped_image.uuid) time.sleep(1) - st.experimental_rerun() + st.rerun() with cropbtn2: st.warning("Warning: This will overwrite the original image") diff --git a/ui_components/widgets/drawing_element.py b/ui_components/widgets/drawing_element.py index e7661eff..32acad06 100644 --- a/ui_components/widgets/drawing_element.py +++ b/ui_components/widgets/drawing_element.py @@ -134,7 +134,7 @@ def drawing_element(timing_details,project_settings,project_uuid,stage=WorkflowS # data_repo.update_specific_timing(st.session_state['current_frame_uuid'], source_image_id=st.session_state['canny_image']) st.session_state['reset_canvas'] = True st.session_state['canny_image'] = None - st.experimental_rerun() + st.rerun() with canvas2: realtime_update = True @@ -168,7 +168,7 @@ def drawing_element(timing_details,project_settings,project_uuid,stage=WorkflowS st.session_state['reset_canvas'] = False canvas_result = st_canvas() time.sleep(0.1) - st.experimental_rerun() + st.rerun() if canvas_result is not None: if canvas_result.json_data is not None and not canvas_result.json_data.get('objects'): st.button("Save New Image", key="save_canvas", disabled=True, help="Draw something first") @@ -251,4 +251,4 @@ def drawing_element(timing_details,project_settings,project_uuid,stage=WorkflowS st.success("New Canny Image Saved") st.session_state['reset_canvas'] = True time.sleep(1) - st.experimental_rerun() \ No newline at end of file + st.rerun() \ No newline at end of file diff --git a/ui_components/widgets/frame_clip_generation_elements.py b/ui_components/widgets/frame_clip_generation_elements.py index 90936183..c7efb542 100644 --- a/ui_components/widgets/frame_clip_generation_elements.py +++ b/ui_components/widgets/frame_clip_generation_elements.py @@ -23,7 +23,7 @@ def current_individual_clip_element(timing_uuid): st.error("Low Resolution") if st.button("Generate Full Resolution Clip", key=f"generate_full_resolution_video_{idx}"): create_single_interpolated_clip(timing.uuid, 'full') - st.experimental_rerun() + st.rerun() else: st.success("Full Resolution") else: @@ -49,11 +49,11 @@ def current_individual_clip_element(timing_uuid): with gen1: if st.button("Generate Low-Resolution Clip", key=f"generate_preview_video_{idx}"): create_single_interpolated_clip(timing.uuid, 'preview') - st.experimental_rerun() + st.rerun() with gen2: if st.button("Generate Full Resolution Clip", key=f"generate_full_resolution_video_{idx}"): create_single_interpolated_clip(timing.uuid, 'full') - st.experimental_rerun() + st.rerun() def update_animation_style_element(timing_uuid, horizontal=True): @@ -74,7 +74,7 @@ def update_animation_style_element(timing_uuid, horizontal=True): if st.session_state[f"animation_style_{idx}"] != timing.animation_style: st.session_state[f"animation_style_index_{idx}"] = animation_styles.index(st.session_state[f"animation_style_{idx}"]) timing.animation_style = st.session_state[f"animation_style_{idx}"] - st.experimental_rerun() + st.rerun() def current_preview_video_element(timing_uuid): @@ -114,4 +114,4 @@ def current_preview_video_element(timing_uuid): timing.uuid, 1.0) data_repo.update_specific_timing( timing.uuid, preview_video_id=preview_video.uuid) - st.experimental_rerun() \ No newline at end of file + st.rerun() \ No newline at end of file diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index 2e3c4d9d..cc199c1a 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -31,7 +31,7 @@ def frame_selector_widget(): st.session_state['frame_styling_view_type_index'] = 0 st.session_state['frame_styling_view_type'] = "Individual View" - st.experimental_rerun() + st.rerun() with time2: single_frame_time_selector(st.session_state['current_frame_uuid'], 'navbar', shift_frames=False) @@ -55,4 +55,4 @@ def frame_selector_widget(): if st.button("Delete key frame"): delete_frame(st.session_state['current_frame_uuid']) - st.experimental_rerun() \ No newline at end of file + st.rerun() \ No newline at end of file diff --git a/ui_components/widgets/frame_switch_btn.py b/ui_components/widgets/frame_switch_btn.py index dda5c4f0..108218ab 100644 --- a/ui_components/widgets/frame_switch_btn.py +++ b/ui_components/widgets/frame_switch_btn.py @@ -22,7 +22,7 @@ def back_and_forward_buttons(): st.session_state['current_frame_index'] = st.session_state['current_frame_index'] - 2 st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid - st.experimental_rerun() + st.rerun() with smallbutton1: # if it's not the first image if display_idx != 1: @@ -30,7 +30,7 @@ def back_and_forward_buttons(): st.session_state['current_frame_index'] = st.session_state['current_frame_index'] - 1 st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid - st.experimental_rerun() + st.rerun() with smallbutton2: st.button(f"{display_idx} 📍", disabled=True) @@ -41,11 +41,11 @@ def back_and_forward_buttons(): st.session_state['current_frame_index'] = st.session_state['current_frame_index'] + 1 st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid - st.experimental_rerun() + st.rerun() with smallbutton4: if display_idx <= len(timing_details)-2: if st.button(f"{display_idx+2} ⏭️", key=f"Next Next Image for {display_idx}"): st.session_state['current_frame_index'] = st.session_state['current_frame_index'] + 2 st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid - st.experimental_rerun() + st.rerun() diff --git a/ui_components/widgets/frame_time_selector.py b/ui_components/widgets/frame_time_selector.py index cd4cbed0..1ff5f3c7 100644 --- a/ui_components/widgets/frame_time_selector.py +++ b/ui_components/widgets/frame_time_selector.py @@ -33,7 +33,7 @@ def update_frame_duration(timing_uuid, frame_duration, next_timing, shift_frames # updating clip_duration update_clip_duration_of_all_timing_frames(timing.project.uuid) - st.experimental_rerun() + st.rerun() def update_frame_time(timing_uuid, frame_time, shift_frames): data_repo = DataRepo() @@ -50,7 +50,7 @@ def update_frame_time(timing_uuid, frame_time, shift_frames): # updating clip_duration update_clip_duration_of_all_timing_frames(timing.project.uuid) - st.experimental_rerun() + st.rerun() def single_frame_time_duration_setter(timing_uuid, src, shift_frames=True): diff --git a/ui_components/widgets/image_carousal.py b/ui_components/widgets/image_carousal.py index ad1ec9d5..46b84b00 100644 --- a/ui_components/widgets/image_carousal.py +++ b/ui_components/widgets/image_carousal.py @@ -46,7 +46,7 @@ def display_image(timing_uuid, stage=None, clickable=False): # st.session_state['frame_styling_view_type_index'] = 0 st.session_state['frame_styling_view_type'] = "Individual View" st.session_state['counter'] += 1 - st.experimental_rerun() + st.rerun() elif clickable is False: st.image(image, use_column_width=True) diff --git a/ui_components/widgets/inpainting_element.py b/ui_components/widgets/inpainting_element.py index 7f7e8b48..d3cf65a2 100644 --- a/ui_components/widgets/inpainting_element.py +++ b/ui_components/widgets/inpainting_element.py @@ -75,7 +75,7 @@ def inpainting_element(timing_uuid): if st.session_state['index_of_type_of_mask_selection'] != mask_selection_options.index(type_of_mask_selection): st.session_state['index_of_type_of_mask_selection'] = mask_selection_options.index( type_of_mask_selection) - st.experimental_rerun() + st.rerun() if "which_layer" not in st.session_state: st.session_state['which_layer'] = "Background" @@ -159,7 +159,7 @@ def inpainting_element(timing_uuid): img2=image_file.location, starting_position=5, label1="Original", label2="Edited") if st.button("Reset Canvas"): st.session_state['edited_image'] = "" - st.experimental_rerun() + st.rerun() elif type_of_mask_selection == "Automated Background Selection" or type_of_mask_selection == "Automated Layer Selection" or type_of_mask_selection == "Re-Use Previous Mask" or type_of_mask_selection == "Invert Previous Mask": with main_col_1: @@ -189,7 +189,7 @@ def inpainting_element(timing_uuid): img2=image_file.location, starting_position=5, label1="Original", label2="Edited") if st.button("Reset Canvas"): st.session_state['edited_image'] = "" - st.experimental_rerun() + st.rerun() with main_col_1: @@ -205,7 +205,7 @@ def inpainting_element(timing_uuid): if st.session_state["index_of_type_of_mask_replacement"] != types_of_mask_replacement.index(st.session_state["type_of_mask_replacement"]): st.session_state["index_of_type_of_mask_replacement"] = types_of_mask_replacement.index( st.session_state["type_of_mask_replacement"]) - st.experimental_rerun() + st.rerun() if st.session_state["type_of_mask_replacement"] == "Replace With Image": prompt = "" @@ -223,7 +223,7 @@ def inpainting_element(timing_uuid): if st.session_state['index_of_source_of_image'] != sources_of_images.index(source_of_image): st.session_state['index_of_source_of_image'] = sources_of_images.index( source_of_image) - st.experimental_rerun() + st.rerun() if source_of_image == "Uploaded": btn1, btn2 = st.columns([1, 1]) @@ -239,7 +239,7 @@ def inpainting_element(timing_uuid): background_list.append( uploaded_file.name) time.sleep(1.5) - st.experimental_rerun() + st.rerun() with btn2: background_selection = st.selectbox( "Range background", background_list) @@ -284,7 +284,7 @@ def inpainting_element(timing_uuid): edited_image = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], background_image, editing_image, "", "", width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) st.session_state['edited_image'] = edited_image.uuid - st.experimental_rerun() + st.rerun() with edit2: if st.session_state['edited_image'] != "": @@ -298,7 +298,7 @@ def inpainting_element(timing_uuid): promote_image_variant( st.session_state['current_frame_uuid'], number_of_image_variants - 1) st.session_state['edited_image'] = "" - st.experimental_rerun() + st.rerun() else: if st.button("Run Edit & Promote"): if st.session_state["type_of_mask_replacement"] == "Inpainting": @@ -321,4 +321,4 @@ def inpainting_element(timing_uuid): st.session_state['edited_image'] = "" st.success("Image promoted!") - st.experimental_rerun() \ No newline at end of file + st.rerun() \ No newline at end of file diff --git a/ui_components/widgets/list_view.py b/ui_components/widgets/list_view.py index a4a03ab5..b672f4a0 100644 --- a/ui_components/widgets/list_view.py +++ b/ui_components/widgets/list_view.py @@ -36,7 +36,7 @@ def page_toggle(num_pages, items_per_page, project_uuid, position): 1, num_pages), horizontal=True, index=st.session_state['index_of_current_page'] - 1, key=f"page_selection_radio_{position}") if st.session_state['current_page'] != st.session_state['index_of_current_page']: st.session_state['index_of_current_page'] = st.session_state['current_page'] - st.experimental_rerun() + st.rerun() start_index = (st.session_state['current_page'] - 1) * items_per_page end_index = min(start_index + items_per_page,len(timing_details)) @@ -74,15 +74,15 @@ def styling_list_view(start_index, end_index, shift_frames_setting, project_uuid with btn1: if st.button("Delete this keyframe", key=f'{i}'): delete_frame(timing_details[i].uuid) - st.experimental_rerun() + st.rerun() with btn2: if st.button("⬆️", key=f"Promote {display_number}"): move_frame("Up", timing_details[i].uuid) - st.experimental_rerun() + st.rerun() with btn3: if st.button("⬇️", key=f"Demote {display_number}"): move_frame("Down", timing_details[i].uuid) - st.experimental_rerun() + st.rerun() st.markdown("***") diff --git a/ui_components/widgets/prompt_finder.py b/ui_components/widgets/prompt_finder.py index 2930b143..80f5c39b 100644 --- a/ui_components/widgets/prompt_finder.py +++ b/ui_components/widgets/prompt_finder.py @@ -39,7 +39,7 @@ def prompt_finder_element(project_uuid): st.success("Prompt added successfully!") time.sleep(0.3) uploaded_file = "" - st.experimental_rerun() + st.rerun() if 'last_generated_prompt' in st.session_state and st.session_state['last_generated_prompt']: st.write("Generated prompt - ", st.session_state['last_generated_prompt']) diff --git a/ui_components/widgets/styling_element.py b/ui_components/widgets/styling_element.py index 09bf4675..17eb0225 100644 --- a/ui_components/widgets/styling_element.py +++ b/ui_components/widgets/styling_element.py @@ -75,7 +75,7 @@ def styling_element(timing_uuid, view_type="Single"): if stages.index(st.session_state['transformation_stage']) != st.session_state['index_of_which_stage_to_run_on']: st.session_state['index_of_which_stage_to_run_on'] = stages.index( st.session_state['transformation_stage']) - st.experimental_rerun() + st.rerun() if st.session_state['transformation_stage'] != ImageStage.NONE.value: model_list = data_repo.get_all_ai_model_list(model_type_list=[AIModelType.IMG2IMG.value], custom_trained=False) @@ -107,7 +107,7 @@ def styling_element(timing_uuid, view_type="Single"): model_list) if getattr(obj, 'name') == selected_model_name), None) if st.session_state['index_of_default_model'] != selected_model_index: st.session_state['index_of_default_model'] = selected_model_index - st.experimental_rerun() + st.rerun() current_model_name = data_repo.get_ai_model_from_uuid( st.session_state['model']).name @@ -129,7 +129,7 @@ def styling_element(timing_uuid, view_type="Single"): if st.session_state['index_of_controlnet_adapter_type'] != controlnet_adapter_types.index(st.session_state['adapter_type']): st.session_state['index_of_controlnet_adapter_type'] = controlnet_adapter_types.index( st.session_state['adapter_type']) - st.experimental_rerun() + st.rerun() st.session_state['custom_models'] = [] elif current_model_name == AIModelCategory.LORA.value: @@ -303,7 +303,7 @@ def styling_element(timing_uuid, view_type="Single"): if st.session_state['prompt'] != st.session_state[f'prompt_value_{append_to_item_name}']: st.session_state[f'prompt_value_{append_to_item_name}'] = st.session_state['prompt'] - st.experimental_rerun() + st.rerun() if view_type == "List": st.info( @@ -358,7 +358,7 @@ def styling_element(timing_uuid, view_type="Single"): if st.session_state['negative_prompt'] != st.session_state['negative_prompt_value']: st.session_state['negative_prompt_value'] = st.session_state['negative_prompt'] - st.experimental_rerun() + st.rerun() st.session_state['guidance_scale'] = st.number_input( f"Guidance scale", value=float(st.session_state['guidance_scale'])) @@ -435,5 +435,5 @@ def styling_element(timing_uuid, view_type="Single"): for _ in range(0, batch_number_of_variants): trigger_restyling_process(timing_details[i].uuid, st.session_state['model'], st.session_state['prompt'], st.session_state['strength'], st.session_state['negative_prompt'], st.session_state['guidance_scale'], st.session_state['seed'], st.session_state[ 'num_inference_steps'], st.session_state['transformation_stage'], st.session_state["promote_new_generation"], st.session_state['custom_models'], st.session_state['adapter_type'], st.session_state["use_new_settings"], st.session_state['low_threshold'], st.session_state['high_threshold']) - st.experimental_rerun() + st.rerun() diff --git a/ui_components/widgets/variant_comparison_element.py b/ui_components/widgets/variant_comparison_element.py index 37cd9ea8..28fd9356 100644 --- a/ui_components/widgets/variant_comparison_element.py +++ b/ui_components/widgets/variant_comparison_element.py @@ -79,4 +79,4 @@ def variant_comparison_element(timing_uuid, stage=CreativeProcessType.MOTION.val else: promote_image_variant(timing.uuid, which_variant - 1) time.sleep(0.5) - st.experimental_rerun() \ No newline at end of file + st.rerun() \ No newline at end of file diff --git a/utils/data_repo/api_repo.py b/utils/data_repo/api_repo.py index c7f87a2b..b9367578 100644 --- a/utils/data_repo/api_repo.py +++ b/utils/data_repo/api_repo.py @@ -79,7 +79,7 @@ def _setup_urls(self): def logout(self): delete_url_param(AUTH_TOKEN) - st.experimental_rerun() + st.rerun() ################### base http methods def _get_headers(self, content_type="application/json"): diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 6c77e96d..21ff2d1c 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -1,7 +1,6 @@ # this repo serves as a middlerware between API backend and the frontend import threading from shared.constants import InternalFileType, InternalResponse -from backend.db_repo import DBRepo from shared.constants import SERVER, ServerType from ui_components.models import InferenceLogObject, InternalAIModelObject, InternalAppSettingObject, InternalBackupObject, InternalFrameTimingObject, InternalProjectObject, InternalFileObject, InternalSettingObject, InternalUserObject from utils.cache.cache_methods import cache_data @@ -25,6 +24,7 @@ def __new__(cls): def __init__(self): if not self._initialized: if SERVER == ServerType.DEVELOPMENT.value: + from backend.db_repo import DBRepo self.db_repo = DBRepo() else: self.db_repo = APIRepo() diff --git a/utils/st_memory.py b/utils/st_memory.py index 8f1df234..41b5f622 100644 --- a/utils/st_memory.py +++ b/utils/st_memory.py @@ -16,7 +16,7 @@ def radio(label, options, index=0, key=None, help=None, on_change=None, disabled if options.index(selection) != st.session_state[key]: st.session_state[key] = options.index(selection) - st.experimental_rerun() + st.rerun() return selection @@ -29,7 +29,7 @@ def selectbox(label, options, index=0, key=None, help=None, on_change=None, disa if options.index(selection) != st.session_state[key]: st.session_state[key] = options.index(selection) - st.experimental_rerun() + st.rerun() return selection @@ -43,7 +43,7 @@ def number_input(label, min_value=None, max_value=None, step=None, format=None, if selection != st.session_state[key]: st.session_state[key] = selection - st.experimental_rerun() + st.rerun() return selection @@ -56,7 +56,7 @@ def slider(label, min_value=None, max_value=None, value=None, step=None, format= if selection != st.session_state[key]: st.session_state[key] = selection - st.experimental_rerun() + st.rerun() return selection @@ -74,7 +74,7 @@ def select_slider(label, options=(), value=None, format_func=None, key=None, hel if getattr(project_settings, key, default_value): data_repo = DataRepo() data_repo.update_project_setting(project_settings.project.uuid, key=value) - st.experimental_rerun() + st.rerun() return selection @@ -88,7 +88,7 @@ def toggle(label, value=True,key=None, help=None, on_change=None, disabled=False if selection != st.session_state[key]: st.session_state[key] = selection - st.experimental_rerun() + st.rerun() return selection @@ -102,6 +102,6 @@ def menu(menu_title,options, icons=None, menu_icon=None, default_index=0, key=No if options.index(selection) != st.session_state[key]: st.session_state[key] = options.index(selection) - st.experimental_rerun() + st.rerun() return selection \ No newline at end of file From e057eb77dd721aea995186769a2d40aa57e11dff Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Tue, 3 Oct 2023 00:14:01 +0530 Subject: [PATCH 035/164] wip: timed & preview video fixes --- .../components/frame_styling_page.py | 2 +- ui_components/methods/video_methods.py | 56 ++++++++++------- ui_components/models.py | 1 + .../widgets/animation_style_element.py | 6 +- .../widgets/frame_clip_generation_elements.py | 3 +- .../widgets/variant_comparison_element.py | 62 ++++++++++--------- utils/cache/cache_methods.py | 6 +- utils/media_processor/video.py | 8 ++- 8 files changed, 83 insertions(+), 61 deletions(-) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 2d6c6cf5..c170070b 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -78,7 +78,7 @@ def frame_styling_page(mainheader2, project_uuid: str): idx = st.session_state['current_frame_index'] - 1 - st.session_state['show_comparison'] = st_memory.radio("Show:", options=["Other Variants", "Preview Video in Context"], horizontal=True, project_settings=project_settings, key="show_comparison_radio_motion") + st.session_state['show_comparison'] = st_memory.radio("Show:", options=["Other Variants", "Preview Video in Context"], horizontal=True, key="show_comparison_radio_motion") if st.session_state['show_comparison'] == "Other Variants": variant_comparison_element(st.session_state['current_frame_uuid']) diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index 457db77e..3e0050b8 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -20,8 +20,8 @@ from utils.media_processor.video import VideoProcessor -# returns the timed_clip, which is the interpolated video with correct length -# interpolated_clip_uuid signals which clip to promote to timed clip +# NOTE: interpolated_clip_uuid signals which clip to promote to timed clip (this is the main variant) +# this function returns the preview_clip, which is the interpolated video with correct length def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None): from ui_components.methods.file_methods import generate_temp_file, save_or_host_file_bytes from ui_components.methods.common_methods import get_audio_bytes_for_slice @@ -50,18 +50,26 @@ def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None) data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video_fie.uuid) if not timing.timed_clip: - timing = data_repo.get_timing_from_uuid(timing_uuid) - interpolated_clip = data_repo.get_file_from_uuid(interpolated_clip_uuid) if interpolated_clip_uuid \ else timing.interpolated_clip_list[0] + output_video = update_speed_of_video_clip(interpolated_clip, timing_uuid) + data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) + + if not timing.preview: + timing = data_repo.get_timing_from_uuid(timing_uuid) + timed_clip = timing.timed_clip + temp_video_file = None - if interpolated_clip.hosted_url: - temp_video_file = generate_temp_file(interpolated_clip.hosted_url, '.mp4') + if timed_clip.hosted_url and is_s3_image_url(timed_clip.hosted_url): + temp_video_file = generate_temp_file(timed_clip.hosted_url, '.mp4') - file_path = temp_video_file.name if temp_video_file else interpolated_clip.local_path + file_path = temp_video_file.name if temp_video_file else timed_clip.local_path clip = VideoFileClip(file_path) - + + if temp_video_file: + os.remove(temp_video_file.name) + number_text = TextClip(str(timing.aux_frame_index), fontsize=24, color='white') number_background = TextClip(" ", fontsize=24, color='black', bg_color='black', size=( @@ -72,36 +80,38 @@ def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None) (number_background.w - number_text.w - 5, number_background.h - number_text.h - 5)).set_duration(clip.duration) clip_with_number = CompositeVideoClip([clip, number_background, number_text]) - clip_with_number.write_videofile(filename=file_path, codec='libx264', audio_codec='aac') + temp_output_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4", mode='wb') + clip_with_number.write_videofile(filename=temp_output_file.name, codec='libx264', audio_codec='aac') - if temp_video_file: + if temp_output_file: video_bytes = None with open(file_path, 'rb') as f: video_bytes = f.read() - hosted_url = save_or_host_file_bytes(video_bytes, interpolated_clip.local_path) - if hosted_url: - data_repo.update_file(interpolated_clip.uuid, hosted_url=hosted_url) + preview_video = convert_bytes_to_file( + file_location_to_save="videos/" + str(timing.project.uuid) + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".png", + mime_type="video/mp4", + file_bytes=video_bytes, + project_uuid=timing.project.uuid, + inference_log_id=None + ) - os.remove(temp_video_file.name) + data_repo.update_specific_timing(timing_uuid, preview_id=preview_video.uuid) + os.remove(temp_output_file.name) - # timed_clip has the correct length (equal to the time difference between the current and the next frame) + # preview has the correct length (equal to the time difference between the current and the next frame) # which the interpolated video may or maynot have - clip_duration = calculate_desired_duration_of_individual_clip(timing_uuid) - data_repo.update_specific_timing(timing_uuid, clip_duration=clip_duration) - - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - output_video = update_speed_of_video_clip(interpolated_clip, timing_uuid) - data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) + # clip_duration = calculate_desired_duration_of_individual_clip(timing_uuid) + # data_repo.update_specific_timing(timing_uuid, clip_duration=clip_duration) # adding audio if the audio file is present if project_details.audio: audio_bytes = get_audio_bytes_for_slice(timing_uuid) - add_audio_to_video_slice(timing.timed_clip, audio_bytes) + add_audio_to_video_slice(timing.preview, audio_bytes) timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) - return timing.timed_clip + return timing.preview # this includes all the animation styles [direct morphing, interpolation, image to video] def create_single_interpolated_clip(timing_uuid, quality, settings={}, variant_count=1): diff --git a/ui_components/models.py b/ui_components/models.py index 6ef2f1a7..5f15c1ad 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -108,6 +108,7 @@ def __init__(self, **kwargs): if 'interpolated_clip_list' in kwargs and kwargs["interpolated_clip_list"] else [] self.timed_clip = InternalFileObject( **kwargs["timed_clip"]) if 'timed_clip' in kwargs and kwargs["timed_clip"] else None + self.preview_video = InternalFileObject(**kwargs['preview_video']) if 'preview_video' in kwargs and kwargs['preview_video'] else None self.mask = InternalFileObject( **kwargs["mask"]) if 'mask' in kwargs and kwargs["mask"] else None self.canny_image = InternalFileObject( diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 78755a4e..0890fae9 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -1,7 +1,7 @@ import time import streamlit as st from typing import List -from shared.constants import AnimationToolType +from shared.constants import AnimationStyleType, AnimationToolType from ui_components.methods.video_methods import create_single_interpolated_clip from utils.data_repo.data_repo import DataRepo from utils.ml_processor.motion_module import AnimateDiffCheckpoint @@ -14,7 +14,7 @@ def animation_style_element(timing_uuid, project_uuid): current_animation_style = timing.animation_style variant_count = 1 - if current_animation_style == "Interpolation": + if current_animation_style == AnimationStyleType.INTERPOLATION.value: animation_tool = st.radio("Animation Tool:", options=AnimationToolType.value_list(), key="animation_tool", horizontal=True) video_resolution = st.radio("Video Resolution:", options=["Preview Resolution", "Full Resolution"], key="video_resolution", horizontal=True) @@ -46,7 +46,7 @@ def animation_style_element(timing_uuid, project_uuid): normalise_speed=normalise_speed ) - elif current_animation_style == "Image to Video": + elif current_animation_style == AnimationStyleType.IMAGE_TO_VIDEO.value: st.info("For image to video, you can select one or more prompts, and how many frames you want to generate for each prompt - it'll attempt to travel from one prompt to the next.") which_motion_module = st.selectbox("Which motion module would you like to use?", options=motion_modules, key="which_motion_module") diff --git a/ui_components/widgets/frame_clip_generation_elements.py b/ui_components/widgets/frame_clip_generation_elements.py index c7efb542..ea9d8ea0 100644 --- a/ui_components/widgets/frame_clip_generation_elements.py +++ b/ui_components/widgets/frame_clip_generation_elements.py @@ -110,8 +110,7 @@ def current_preview_video_element(timing_uuid): st.info("This allows you to preview the video with the surrounding clips attached.") if st.button("Generate New Preview Video", key=f"generate_preview_{idx}"): - preview_video = create_full_preview_video( - timing.uuid, 1.0) + preview_video = create_full_preview_video(timing.uuid, 1.0) data_repo.update_specific_timing( timing.uuid, preview_video_id=preview_video.uuid) st.rerun() \ No newline at end of file diff --git a/ui_components/widgets/variant_comparison_element.py b/ui_components/widgets/variant_comparison_element.py index 28fd9356..40af5807 100644 --- a/ui_components/widgets/variant_comparison_element.py +++ b/ui_components/widgets/variant_comparison_element.py @@ -17,6 +17,7 @@ def variant_comparison_element(timing_uuid, stage=CreativeProcessType.MOTION.val aboveimage1, aboveimage2, aboveimage3 = st.columns([1, 0.25, 0.75]) which_variant = 1 + number_of_variants = 0 with aboveimage1: st.info(f"Current variant = {timing.primary_variant_index + 1}") @@ -27,18 +28,19 @@ def variant_comparison_element(timing_uuid, stage=CreativeProcessType.MOTION.val with aboveimage3: number_of_variants = len(timing.interpolated_clip_list) if stage == CreativeProcessType.MOTION.value else len(variants) - if show_more_than_10_variants is True: - current_variant = timing.primary_interpolated_video_index if stage == CreativeProcessType.MOTION.value else int( - timing.primary_variant_index) - which_variant = st.radio(f'Main variant = {current_variant + 1}', range(1, - number_of_variants + 1), index=number_of_variants-1, horizontal=True, key=f"Main variant for {st.session_state['current_frame_index']}") - else: - last_ten_variants = range( - max(1, number_of_variants - 10), number_of_variants + 1) - current_variant = timing.primary_interpolated_video_index if stage == CreativeProcessType.MOTION.value else int( - timing.primary_variant_index) - which_variant = st.radio(f'Main variant = {current_variant + 1}', last_ten_variants, index=len( - last_ten_variants)-1, horizontal=True, key=f"Main variant for {st.session_state['current_frame_index']}") + if number_of_variants: + if show_more_than_10_variants is True: + current_variant = timing.primary_interpolated_video_index if stage == CreativeProcessType.MOTION.value else int( + timing.primary_variant_index) + which_variant = st.radio(f'Main variant = {current_variant + 1}', range(1, + number_of_variants + 1), index=number_of_variants-1, horizontal=True, key=f"Main variant for {st.session_state['current_frame_index']}") + else: + last_ten_variants = range( + max(1, number_of_variants - 10), number_of_variants + 1) + current_variant = timing.primary_interpolated_video_index if stage == CreativeProcessType.MOTION.value else int( + timing.primary_variant_index) + which_variant = st.radio(f'Main variant = {current_variant + 1}', last_ten_variants, index=len( + last_ten_variants)-1, horizontal=True, key=f"Main variant for {st.session_state['current_frame_index']}") with mainimages1: st.success("**Main variant**") @@ -52,16 +54,19 @@ def variant_comparison_element(timing_uuid, stage=CreativeProcessType.MOTION.val with mainimages2: if stage == CreativeProcessType.MOTION.value: - if not (timing.interpolated_clip_list and len(timing.interpolated_clip_list)): - st.error("No variant for this frame") + if number_of_variants: + if not (timing.interpolated_clip_list and len(timing.interpolated_clip_list)): + st.error("No variant for this frame") + + if which_variant - 1 == current_variant: + st.success("**Main variant**") + else: + st.info(f"**Variant #{which_variant}**") - if which_variant - 1 == current_variant: - st.success("**Main variant**") + st.video(timing.interpolated_clip_list[which_variant - 1].location, format='mp4', start_time=0) if \ + (timing.interpolated_clip_list and len(timing.interpolated_clip_list)) else st.error("No video present") else: - st.info(f"**Variant #{which_variant}**") - - st.video(timing.interpolated_clip_list[which_variant - 1].location, format='mp4', start_time=0) if \ - (timing.interpolated_clip_list and len(timing.interpolated_clip_list)) else st.error("No video present") + st.error("No variants found for this frame") else: if len(timing.alternative_images_list): if which_variant - 1 == current_variant: @@ -72,11 +77,12 @@ def variant_comparison_element(timing_uuid, stage=CreativeProcessType.MOTION.val st.image(variants[which_variant - 1].location, use_column_width=True) - if which_variant - 1 != current_variant: - if st.button(f"Promote Variant #{which_variant}", key=f"Promote Variant #{which_variant} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image"): - if stage == CreativeProcessType.MOTION.value: - promote_video_variant(timing.uuid, which_variant - 1) - else: - promote_image_variant(timing.uuid, which_variant - 1) - time.sleep(0.5) - st.rerun() \ No newline at end of file + if number_of_variants: + if which_variant - 1 != current_variant: + if st.button(f"Promote Variant #{which_variant}", key=f"Promote Variant #{which_variant} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image"): + if stage == CreativeProcessType.MOTION.value: + promote_video_variant(timing.uuid, which_variant - 1) + else: + promote_image_variant(timing.uuid, which_variant - 1) + time.sleep(0.5) + st.rerun() \ No newline at end of file diff --git a/utils/cache/cache_methods.py b/utils/cache/cache_methods.py index 52fc0045..9473927e 100644 --- a/utils/cache/cache_methods.py +++ b/utils/cache/cache_methods.py @@ -84,7 +84,8 @@ def _cache_get_ai_model_from_uuid(self, *args, **kwargs): original_func = getattr(cls, '_original_get_ai_model_from_uuid') model = original_func(self, *args, **kwargs) - StCache.add(model, CacheKey.AI_MODEL.value) + if model: + StCache.add(model, CacheKey.AI_MODEL.value) return model @@ -100,7 +101,8 @@ def _cache_get_ai_model_from_name(self, *args, **kwargs): original_func = getattr(cls, '_original_get_ai_model_from_name') model = original_func(self, *args, **kwargs) - StCache.add(model, CacheKey.AI_MODEL.value) + if model: + StCache.add(model, CacheKey.AI_MODEL.value) return model diff --git a/utils/media_processor/video.py b/utils/media_processor/video.py index 5724afd0..4ff20e68 100644 --- a/utils/media_processor/video.py +++ b/utils/media_processor/video.py @@ -14,9 +14,13 @@ def update_video_speed(video_location, animation_style, desired_duration): @staticmethod def update_video_bytes_speed(video_bytes, animation_style, desired_duration): - video_io = BytesIO(video_bytes) - clip = VideoFileClip(video_io) + # video_io = BytesIO(video_bytes) + temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png", mode='wb') + with open(temp_file.name, 'wb') as out_file: + out_file.write(video_bytes) + clip = VideoFileClip(temp_file.name) + os.remove(temp_file.name) return VideoProcessor.update_clip_speed(clip, animation_style, desired_duration) @staticmethod From 1b84f65c6984a642fb70513da8e22a8a1e2e7fa7 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Tue, 3 Oct 2023 14:31:19 +0530 Subject: [PATCH 036/164] wip: video gen fixes --- backend/models.py | 7 +- backend/serializers/dto.py | 1 - ui_components/methods/common_methods.py | 22 ++--- ui_components/methods/video_methods.py | 119 ++++++++++-------------- ui_components/models.py | 2 - 5 files changed, 64 insertions(+), 87 deletions(-) diff --git a/backend/models.py b/backend/models.py index b319f01e..027729a0 100644 --- a/backend/models.py +++ b/backend/models.py @@ -175,6 +175,7 @@ def __init__(self, *args, **kwargs): super(Timing, self).__init__(*args, **kwargs) self.old_is_disabled = self.is_disabled self.old_aux_frame_index = self.aux_frame_index + self.old_timed_clip = self.timed_clip def save(self, *args, **kwargs): # TODO: updating details of every frame this way can be slow - implement a better strategy @@ -233,7 +234,11 @@ def save(self, *args, **kwargs): self.interpolated_video_id = None self.timed_clip_id = None - + + # if timed_clip is deleted then preview_video will also be deleted + if self.old_timed_clip != self.timed_clip and self.old_timed_clip and not self.timed_clip: + self.preview_video = None + super().save(*args, **kwargs) def add_interpolated_clip_list(self, clip_uuid_list): diff --git a/backend/serializers/dto.py b/backend/serializers/dto.py index 1ae515ad..6120fb36 100644 --- a/backend/serializers/dto.py +++ b/backend/serializers/dto.py @@ -119,7 +119,6 @@ class Meta: ) def get_interpolated_clip_list(self, obj): - res = [] id_list = json.loads(obj.interpolated_clip_list) if obj.interpolated_clip_list else [] file_list = InternalFileObject.objects.filter(uuid__in=id_list, is_disabled=False).all() return [InternalFileDto(file).data for file in file_list] diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 45aa7be3..68b20b3b 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -64,7 +64,8 @@ def compare_to_previous_and_next_frame(project_uuid, timing_details): prev_frame_timing = data_repo.get_prev_timing(st.session_state['current_frame_uuid']) create_or_get_single_preview_video(prev_frame_timing.uuid) prev_frame_timing = data_repo.get_timing_from_uuid(prev_frame_timing.uuid) - st.video(prev_frame_timing.timed_clip.location) + if prev_frame_timing.preview_video: + st.video(prev_frame_timing.preview_video.location) with mainimages2: st.success(f"Current image:") @@ -78,8 +79,7 @@ def compare_to_previous_and_next_frame(project_uuid, timing_details): display_image(timing_uuid=next_image.uuid, stage=WorkflowStageType.STYLED.value, clickable=False) if st.button(f"Preview Interpolation From #{st.session_state['current_frame_index']} to #{st.session_state['current_frame_index']+1}", key=f"Preview Interpolation From #{st.session_state['current_frame_index']} to #{st.session_state['current_frame_index']+1}", use_container_width=True): - create_or_get_single_preview_video( - st.session_state['current_frame_uuid']) + create_or_get_single_preview_video(st.session_state['current_frame_uuid']) current_frame = data_repo.get_timing_from_uuid(st.session_state['current_frame_uuid']) st.video(current_frame.timed_clip.location) @@ -702,13 +702,6 @@ def move_frame_back_button(timing_uuid, orientation): st.rerun() - - - - - - - def move_frame_forward_button(timing_uuid, orientation): direction = "Down" if orientation == "side-to-side": @@ -735,10 +728,10 @@ def delete_frame(timing_uuid): if next_timing: data_repo.update_specific_timing( - next_timing.uuid, interpolated_video_id=None) - - data_repo.update_specific_timing( - next_timing.uuid, timed_clip_id=None) + next_timing.uuid, + interpolated_clip_list=None, + preview_video_id=None + ) # If the deleted frame is the first one, set the time of the next frame to 0.00 if timing.aux_frame_index == 0 and next_timing: @@ -853,6 +846,7 @@ def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): if frame_idx < len(timing_details): data_repo.update_specific_timing(timing.uuid, timed_clip_id=None) +# updates the clip duration of the variant_to_promote and sets it as the timed_clip def promote_video_variant(timing_uuid, variant_to_promote_frame_number: str): data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index 3e0050b8..b827ae8f 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -21,7 +21,7 @@ # NOTE: interpolated_clip_uuid signals which clip to promote to timed clip (this is the main variant) -# this function returns the preview_clip, which is the interpolated video with correct length +# this function returns the 'single' preview_clip, which is basically timed_clip with the frame number def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None): from ui_components.methods.file_methods import generate_temp_file, save_or_host_file_bytes from ui_components.methods.common_methods import get_audio_bytes_for_slice @@ -96,7 +96,7 @@ def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None) inference_log_id=None ) - data_repo.update_specific_timing(timing_uuid, preview_id=preview_video.uuid) + data_repo.update_specific_timing(timing_uuid, preview_video_id=preview_video.uuid) os.remove(temp_output_file.name) # preview has the correct length (equal to the time difference between the current and the next frame) @@ -151,8 +151,9 @@ def create_single_interpolated_clip(timing_uuid, quality, settings={}, variant_c ) data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video.uuid) - output_video = update_speed_of_video_clip(video, timing_uuid) - data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) + if not timing.timed_clip: + output_video = update_speed_of_video_clip(video, timing_uuid) + data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) output_video_list.append(output_video) @@ -340,7 +341,7 @@ def add_audio_to_video_slice(video_file, audio_bytes): os.rename("output_with_audio.mp4", video_location) -# final video rendering of all the frames involved +# final video rendering of all the frames involved (it bascially combines all the timed clips) def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileTag.GENERATED_VIDEO.value): from ui_components.methods.common_methods import update_clip_duration_of_all_timing_frames from ui_components.methods.file_methods import convert_bytes_to_file, generate_temp_file @@ -354,51 +355,64 @@ def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileT total_number_of_videos = len(timing_details) - 1 + # creating timed clip for every frame for i in range(0, total_number_of_videos): index_of_current_item = i current_timing: InternalFrameTimingObject = data_repo.get_timing_from_frame_number( project_uuid, i) timing = timing_details[i] + + # updating the interpolation steps if quality == VideoQuality.HIGH.value: - data_repo.update_specific_timing( - current_timing.uuid, timed_clip_id=None) + # data_repo.update_specific_timing(current_timing.uuid, timed_clip_id=None) interpolation_steps = VideoInterpolator.calculate_dynamic_interpolations_steps( timing_details[index_of_current_item].clip_duration) - if not timing.interpolation_steps or timing.interpolation_steps < interpolation_steps: - data_repo.update_specific_timing( - current_timing.uuid, interpolation_steps=interpolation_steps, interpolated_clip_id=None) + timing.interpolation_steps = interpolation_steps else: if not timing.interpolation_steps or timing.interpolation_steps < 3: data_repo.update_specific_timing( current_timing.uuid, interpolation_steps=3) - if not timing.interpolated_clip: - next_timing = data_repo.get_next_timing(current_timing.uuid) - settings = { - "animation_tool": current_timing.animation_tool, - "interpolation_steps": current_timing.interpolation_steps - } - - res = VideoInterpolator.create_interpolated_clip( - img_location_list=[current_timing.source_image.location, next_timing.source_image.location], - settings=settings, - interpolation_steps=current_timing.interpolation_steps - ) + # creating timed clips if not already present + if not timing.timed_clip: + video_clip = None + + # creating an interpolated clip if not already present + if not len(timing.interpolated_clip_list): + next_timing = data_repo.get_next_timing(current_timing.uuid) + settings = { + "animation_tool": current_timing.animation_tool, + "interpolation_steps": current_timing.interpolation_steps + } + + res = VideoInterpolator.create_interpolated_clip( + img_location_list=[current_timing.source_image.location, next_timing.source_image.location], + animation_style=current_timing.animation_style, + settings=settings, + interpolation_steps=current_timing.interpolation_steps + ) + + video_bytes, log = res[0] + + file_location = "videos/" + current_timing.project.name + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" + video_file = convert_bytes_to_file( + file_location_to_save=file_location, + mime_type="video/mp4", + file_bytes=video_bytes, + project_uuid=current_timing.project.uuid, + inference_log_id=log.uuid + ) + + data_repo.add_interpolated_clip( + current_timing.uuid, interpolated_clip_id=video_file.uuid) + else: + video_file = timing.interpolated_clip_list[0] + + # add timed clip + output_video = update_speed_of_video_clip(video_file, current_timing.uuid) + data_repo.update_specific_timing(current_timing.uuid, timed_clip_id=output_video.uuid) - video_bytes, log = res[0] - - file_location = "videos/" + current_timing.project.name + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" - video_file = convert_bytes_to_file( - file_location_to_save=file_location, - mime_type="video/mp4", - file_bytes=video_bytes, - project_uuid=current_timing.project.uuid, - inference_log_id=log.uuid - ) - - data_repo.update_specific_timing( - current_timing.uuid, interpolated_clip_id=video_file.uuid) project_settings: InternalSettingObject = data_repo.get_project_setting( timing.project.uuid) @@ -406,46 +420,13 @@ def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileT timing.project.uuid) total_number_of_videos = len(timing_details) - 2 - for i in timing_details: - index_of_current_item = timing_details.index(i) - timing = timing_details[index_of_current_item] - current_timing: InternalFrameTimingObject = data_repo.get_timing_from_frame_number( - timing.project.uuid, index_of_current_item) - if index_of_current_item <= total_number_of_videos: - if not current_timing.timed_clip: - desired_duration = current_timing.clip_duration - location_of_input_video_file = current_timing.interpolated_clip - - output_video = update_speed_of_video_clip( - location_of_input_video_file, timing.uuid) - - if quality == VideoQuality.PREVIEW.value: - print("") - ''' - clip = VideoFileClip(location_of_output_video) - - number_text = TextClip(str(index_of_current_item), fontsize=24, color='white') - number_background = TextClip(" ", fontsize=24, color='black', bg_color='black', size=(number_text.w + 10, number_text.h + 10)) - number_background = number_background.set_position(('right', 'bottom')).set_duration(clip.duration) - number_text = number_text.set_position((number_background.w - number_text.w - 5, number_background.h - number_text.h - 5)).set_duration(clip.duration) - - clip_with_number = CompositeVideoClip([clip, number_background, number_text]) - - # remove existing preview video - os.remove(location_of_output_video) - clip_with_number.write_videofile(location_of_output_video, codec='libx264', bitrate='3000k') - ''' - - data_repo.update_specific_timing( - current_timing.uuid, timed_clip_id=output_video.uuid) - video_list = [] temp_file_list = [] timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( timing.project.uuid) - # TODO: CORRECT-CODE + # joining all the timed clips for i in timing_details: index_of_current_item = timing_details.index(i) current_timing: InternalFrameTimingObject = data_repo.get_timing_from_frame_number( diff --git a/ui_components/models.py b/ui_components/models.py index 5f15c1ad..9b056484 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -113,8 +113,6 @@ def __init__(self, **kwargs): **kwargs["mask"]) if 'mask' in kwargs and kwargs["mask"] else None self.canny_image = InternalFileObject( **kwargs["canny_image"]) if 'canny_image' in kwargs and kwargs["canny_image"] else None - self.preview_video = InternalFileObject( - **kwargs["preview_video"]) if 'preview_video' in kwargs and kwargs["preview_video"] else None self.primary_image = InternalFileObject( **kwargs["primary_image"]) if 'primary_image' in kwargs and kwargs["primary_image"] else None self.frame_time = kwargs['frame_time'] if 'frame_time' in kwargs else None From cf5691b104b703c872b0e911f8faa0b644a97749 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Tue, 3 Oct 2023 16:43:55 +0530 Subject: [PATCH 037/164] video generation fixed --- app.py | 10 +--- backend/models.py | 6 +- ui_components/methods/video_methods.py | 82 +++++++++++++------------- 3 files changed, 47 insertions(+), 51 deletions(-) diff --git a/app.py b/app.py index 39320930..967e1887 100644 --- a/app.py +++ b/app.py @@ -12,13 +12,9 @@ from utils.third_party_auth.google.google_auth import get_google_auth_url -if 'django_init' in st.session_state and st.session_state['django_init']: - print("************ django initialized ************") - # loading the django app - os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_settings") - # Initialize Django - django.setup() - st.session_state['django_init'] = True +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_settings") +django.setup() +st.session_state['django_init'] = True from banodoco_settings import project_init from ui_components.models import InternalAppSettingObject diff --git a/backend/models.py b/backend/models.py index 027729a0..1beae25c 100644 --- a/backend/models.py +++ b/backend/models.py @@ -235,10 +235,10 @@ def save(self, *args, **kwargs): self.interpolated_video_id = None self.timed_clip_id = None - # if timed_clip is deleted then preview_video will also be deleted - if self.old_timed_clip != self.timed_clip and self.old_timed_clip and not self.timed_clip: + # if timed_clip is deleted/changed then preview_video will be deleted + if self.old_timed_clip and (not self.timed_clip or self.old_timed_clip != self.timed_clip): self.preview_video = None - + super().save(*args, **kwargs) def add_interpolated_clip_list(self, clip_uuid_list): diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index b827ae8f..047d307d 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -56,7 +56,7 @@ def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None) output_video = update_speed_of_video_clip(interpolated_clip, timing_uuid) data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) - if not timing.preview: + if not timing.preview_video: timing = data_repo.get_timing_from_uuid(timing_uuid) timed_clip = timing.timed_clip @@ -107,57 +107,57 @@ def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None) # adding audio if the audio file is present if project_details.audio: audio_bytes = get_audio_bytes_for_slice(timing_uuid) - add_audio_to_video_slice(timing.preview, audio_bytes) + add_audio_to_video_slice(timing.preview_video, audio_bytes) timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) - return timing.preview + return timing.preview_video # this includes all the animation styles [direct morphing, interpolation, image to video] def create_single_interpolated_clip(timing_uuid, quality, settings={}, variant_count=1): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - next_timing: InternalFrameTimingObject = data_repo.get_next_timing(timing_uuid) - - if quality == 'full': - interpolation_steps = VideoInterpolator.calculate_dynamic_interpolations_steps(timing.clip_duration) - elif quality == 'preview': - interpolation_steps = 3 + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + next_timing: InternalFrameTimingObject = data_repo.get_next_timing(timing_uuid) + + if quality == 'full': + interpolation_steps = VideoInterpolator.calculate_dynamic_interpolations_steps(timing.clip_duration) + elif quality == 'preview': + interpolation_steps = 3 + + timing.interpolated_steps = interpolation_steps + img_list = [timing.primary_image.location, next_timing.primary_image.location] + settings.update(interpolation_steps=timing.interpolation_steps) + + # res is an array of tuples (video_bytes, log) + res = VideoInterpolator.create_interpolated_clip( + img_list, + timing.animation_style, + settings, + variant_count + ) - timing.interpolated_steps = interpolation_steps - img_list = [timing.source_image.location, next_timing.source_image.location] - settings.update(interpolation_steps=timing.interpolation_steps) - - # res is an array of tuples (video_bytes, log) - res = VideoInterpolator.create_interpolated_clip( - img_list, - timing.animation_style, - settings, - variant_count + output_video_list = [] + for (video_bytes, log) in res: + if 'normalise_speed' in settings and settings['normalise_speed']: + video_bytes = VideoProcessor.update_video_bytes_speed(video_bytes, timing.animation_style, timing.clip_duration) + + video_location = "videos/" + str(timing.project.uuid) + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" + video = convert_bytes_to_file( + file_location_to_save=video_location, + mime_type="video/mp4", + file_bytes=video_bytes, + project_uuid=timing.project.uuid, + inference_log_id=log.uuid ) - output_video_list = [] - for (video_bytes, log) in res: - if 'normalise_speed' in settings and settings['normalise_speed']: - video_bytes = VideoProcessor.update_video_bytes_speed(video_bytes, timing.animation_style, timing.clip_duration) - - video_location = "videos/" + str(timing.project.uuid) + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" - video = convert_bytes_to_file( - file_location_to_save=video_location, - mime_type="video/mp4", - file_bytes=video_bytes, - project_uuid=timing.project.uuid, - inference_log_id=log.uuid - ) - - data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video.uuid) - if not timing.timed_clip: - output_video = update_speed_of_video_clip(video, timing_uuid) - data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) + data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video.uuid) + if not timing.timed_clip: + output_video = update_speed_of_video_clip(video, timing_uuid) + data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) - output_video_list.append(output_video) + output_video_list.append(video) - return output_video_list + return output_video_list # preview_clips have frame numbers on them. Preview clip is generated from index-2 to index+2 frames From be4a6193e4278ab8d0c87bb889ff920db0058bd8 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 4 Oct 2023 05:30:18 +0530 Subject: [PATCH 038/164] timing and duration update fix --- backend/models.py | 14 +++-- .../components/frame_styling_page.py | 4 +- ui_components/methods/common_methods.py | 40 ++------------ ui_components/widgets/frame_time_selector.py | 54 +++++++------------ ui_components/widgets/timeline_view.py | 31 ++++++----- utils/common_utils.py | 2 + 6 files changed, 52 insertions(+), 93 deletions(-) diff --git a/backend/models.py b/backend/models.py index 1beae25c..59d52802 100644 --- a/backend/models.py +++ b/backend/models.py @@ -176,13 +176,16 @@ def __init__(self, *args, **kwargs): self.old_is_disabled = self.is_disabled self.old_aux_frame_index = self.aux_frame_index self.old_timed_clip = self.timed_clip + self.old_frame_time = self.frame_time def save(self, *args, **kwargs): # TODO: updating details of every frame this way can be slow - implement a better strategy + + # ------ handling aux_frame_index ------ # if the frame is being deleted (disabled) if self.old_is_disabled != self.is_disabled and self.is_disabled: timing_list = Timing.objects.filter(project_id=self.project_id, \ - aux_frame_index__gte=self.aux_frame_index, is_disabled=False).order_by('frame_number') + aux_frame_index__gte=self.aux_frame_index, is_disabled=False).order_by('aux_frame_index') # shifting aux_frame_index of all frames after this frame one backwards if self.is_disabled: @@ -203,12 +206,12 @@ def save(self, *args, **kwargs): if self.aux_frame_index >= self.old_aux_frame_index: timings_to_move = Timing.objects.filter(project_id=self.project_id, aux_frame_index__gt=self.old_aux_frame_index, \ aux_frame_index__lte=self.aux_frame_index, is_disabled=False) - frame_time_list = [self.frame_time] + frame_time_list = [int(self.frame_time * 100) / 100] for t in timings_to_move: frame_time_list.append(t.frame_time) # updating frame time for idx, t in enumerate(timings_to_move): - Timing.objects.filter(uuid=t.uuid, is_disabled=False).update(frame_time=frame_time_list[idx]) + Timing.objects.filter(uuid=t.uuid, is_disabled=False).update(frame_time=int(frame_time_list[idx] * 100) / 100) self.frame_time = frame_time_list[-1] # moving the frames between old and new index one step backwards @@ -223,10 +226,10 @@ def save(self, *args, **kwargs): # updating frame time frame_time_list.reverse() idx = 0 - self.frame_time = frame_time_list[idx] + self.frame_time = int(frame_time_list[idx] * 100) / 100 idx += 1 for t in timings_to_move: - Timing.objects.filter(uuid=t.uuid, is_disabled=False).update(frame_time=frame_time_list[idx]) + Timing.objects.filter(uuid=t.uuid, is_disabled=False).update(frame_time=int(frame_time_list[idx] * 100) / 100) idx += 1 # moving frames timings_to_move.update(aux_frame_index=F('aux_frame_index') + 1) @@ -235,6 +238,7 @@ def save(self, *args, **kwargs): self.interpolated_video_id = None self.timed_clip_id = None + # ------ handling timed_clip ------ # if timed_clip is deleted/changed then preview_video will be deleted if self.old_timed_clip and (not self.timed_clip or self.old_timed_clip != self.timed_clip): self.preview_video = None diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index c170070b..238e1f40 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -236,9 +236,9 @@ def frame_styling_page(mainheader2, project_uuid: str): if st.session_state['page'] == "Styling": - timeline_view(shift_frames_setting, project_uuid,"Styling",header_col_3,header_col_4) + timeline_view(shift_frames_setting, project_uuid, "Styling", header_col_3, header_col_4) elif st.session_state['page'] == "Motion": - timeline_view(shift_frames_setting, project_uuid,"Motion",header_col_3,header_col_4) + timeline_view(shift_frames_setting, project_uuid, "Motion", header_col_3, header_col_4) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 68b20b3b..361eeb90 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -30,7 +30,7 @@ from streamlit_image_comparison import image_comparison from ui_components.models import InternalFileObject - +from datetime import datetime from typing import Union def compare_to_source_frame(timing_details): @@ -591,36 +591,6 @@ def rotate_image(location, degree): rotated_image = image.rotate(-degree, resample=Image.BICUBIC, expand=False) return rotated_image - -def change_frame_position(timing_uuid, new_position): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - - timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) - - # Check if the new position is within the valid range - if new_position < 0 or new_position >= len(timing_list): - print(f"Invalid position: {new_position}") - st.error("Invalid position") - time.sleep(1) - return - - print(f"Updating timing {timing.uuid} to new position {new_position}") - data_repo.update_specific_timing(timing.uuid, aux_frame_index=new_position) - - # Shift the other frames - if new_position > timing.aux_frame_index: - for i in range(timing.aux_frame_index + 1, new_position + 1): - print(f"Shifting timing {timing_list[i].uuid} to position {i-1}") - data_repo.update_specific_timing(timing_list[i].uuid, aux_frame_index=i-1) - else: - for i in range(new_position, timing.aux_frame_index): - print(f"Shifting timing {timing_list[i].uuid} to position {i+1}") - data_repo.update_specific_timing(timing_list[i].uuid, aux_frame_index=i+1) - - # Update the clip duration of all timing frames - print("Updating timings in order") - update_timings_in_order(timing.project.uuid) def update_timings_in_order(project_uuid): data_repo = DataRepo() @@ -644,14 +614,10 @@ def change_frame_position_input(timing_uuid, src): new_position = st.number_input("Move to new position:", min_value=min_value, max_value=max_value, value=timing.aux_frame_index + 1, step=1, key=f"new_position_{timing.aux_frame_index}_{src}") + if st.button('Update Position',key=f"change_frame_position_{timing.aux_frame_index}_{src}"): - change_frame_position(timing_uuid, new_position - 1) + data_repo.update_specific_timing(timing.uuid, aux_frame_index=new_position - 1) st.rerun() - # if new_position != timing.aux_frame_index: - # print(f"Changing frame position from {timing.aux_frame_index + 1} to {new_position}") - # change_frame_position(timing_uuid, new_position - 1) - -from datetime import datetime def move_frame(direction, timing_uuid): diff --git a/ui_components/widgets/frame_time_selector.py b/ui_components/widgets/frame_time_selector.py index 1ff5f3c7..20f8aad9 100644 --- a/ui_components/widgets/frame_time_selector.py +++ b/ui_components/widgets/frame_time_selector.py @@ -1,6 +1,7 @@ from typing import List from ui_components.methods.common_methods import update_clip_duration_of_all_timing_frames from ui_components.models import InternalFrameTimingObject +from utils.common_utils import truncate_decimal from utils.data_repo.data_repo import DataRepo import streamlit as st @@ -16,40 +17,21 @@ def shift_subsequent_frames(timing, time_delta): new_frame_time = frame.frame_time + time_delta data_repo.update_specific_timing(frame.uuid, frame_time=new_frame_time, timed_clip_id=None) + def update_frame_duration(timing_uuid, frame_duration, next_timing, shift_frames): data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) if next_timing: # Calculate time_delta before updating next_timing.frame_time - time_delta = frame_duration - (next_timing.frame_time - timing.frame_time) - + time_delta = frame_duration - truncate_decimal(next_timing.frame_time - timing.frame_time) next_timing.frame_time = timing.frame_time + frame_duration data_repo.update_specific_timing(next_timing.uuid, frame_time=next_timing.frame_time, timed_clip_id=None) - if shift_frames: shift_subsequent_frames(timing, time_delta) # updating clip_duration update_clip_duration_of_all_timing_frames(timing.project.uuid) - - st.rerun() - -def update_frame_time(timing_uuid, frame_time, shift_frames): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - - data_repo.update_specific_timing(timing_uuid, frame_time=frame_time, timed_clip_id=None) - - if shift_frames: - next_timing = data_repo.get_next_timing(timing_uuid) - if next_timing is not None: - time_delta = (frame_time + timing.clip_duration) - next_timing.frame_time - shift_subsequent_frames(timing, time_delta) - - # updating clip_duration - update_clip_duration_of_all_timing_frames(timing.project.uuid) - st.rerun() @@ -61,7 +43,7 @@ def single_frame_time_duration_setter(timing_uuid, src, shift_frames=True): # Calculate clip_duration if next_timing: - clip_duration = next_timing.frame_time - timing.frame_time + clip_duration = max(truncate_decimal(next_timing.frame_time - timing.frame_time), 0) else: clip_duration = 0.0 # or some default value @@ -70,9 +52,10 @@ def single_frame_time_duration_setter(timing_uuid, src, shift_frames=True): disable_duration_input = False if next_timing else True help_text = None if shift_frames else "You currently won't shift subsequent frames - to do this, go to the List View and turn on Shift Frames." frame_duration = st.number_input("Duration:", min_value=0.0, max_value=max_value, - value=clip_duration, step=0.1, key=f"frame_duration_{timing.aux_frame_index}_{src}", + value=clip_duration, step=0.1, key=f"frame_duration_{timing.uuid}_{src}", disabled=disable_duration_input, help=help_text) + frame_duration = truncate_decimal(frame_duration) if frame_duration != clip_duration: update_frame_duration(timing_uuid, frame_duration, next_timing, shift_frames) @@ -80,18 +63,10 @@ def single_frame_time_duration_setter(timing_uuid, src, shift_frames=True): def single_frame_time_selector(timing_uuid, src, shift_frames=True): data_repo = DataRepo() - - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - timing_list: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project(timing.project.uuid) - prev_timing = None - if timing.aux_frame_index > 0: - prev_timing_uuid = timing_list[timing.aux_frame_index - 1].uuid - prev_timing = data_repo.get_timing_from_uuid(prev_timing_uuid) + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + prev_timing = data_repo.get_prev_timing(timing_uuid) min_value = prev_timing.frame_time if prev_timing else 0.0 - disabled_time_change = True if timing.aux_frame_index == 0 else False next_timing = data_repo.get_next_timing(timing_uuid) @@ -100,11 +75,20 @@ def single_frame_time_selector(timing_uuid, src, shift_frames=True): else: max_value = timing.frame_time + 100 # Allow up to 100 seconds more if it's the last item + adjusted_value = max(min_value + 0.01, timing.frame_time) # hackish solution to avoid timing.frame_time < min_value while streamlit is refreshing help_text = None if shift_frames else "You currently won't shift subsequent frames - to do this, go to the List View and turn on Shift Frames." frame_time = st.number_input("Time:", min_value=min_value, max_value=max_value, - value=timing.frame_time, step=0.1, key=f"frame_time_{timing.aux_frame_index}_{src}",disabled=disabled_time_change, help=help_text) + value=adjusted_value, step=0.1, key=f"frame_time_{timing.uuid}_{src}",disabled=disabled_time_change, help=help_text) + + frame_time = int(frame_time * 100) / 100.0 if frame_time != timing.frame_time: - update_frame_time(timing_uuid, frame_time, shift_frames) + data_repo.update_specific_timing(timing_uuid, frame_time=frame_time, timed_clip_id=None) + + # if this time is going to be greater than the next frame's time, shift all subsequent frames + if next_timing and frame_time >= next_timing.frame_time: + shift_subsequent_frames(timing, timing.frame_time - frame_time) + update_clip_duration_of_all_timing_frames(timing.project.uuid) + st.rerun() diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index 225118a4..df493f44 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -6,10 +6,10 @@ from utils.data_repo.data_repo import DataRepo from ui_components.widgets.frame_clip_generation_elements import update_animation_style_element from ui_components.constants import WorkflowStageType -from ui_components.models import InternalFrameTimingObject from utils import st_memory -def timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_setter_toggle, animation_style_selector_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_frame_position_toggle): +def timeline_view_buttons(i, j, timing_details, shift_frames_setting, args): + time_setter_toggle, animation_style_selector_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_frame_position_toggle = args if time_setter_toggle: single_frame_time_selector(timing_details[i + j].uuid, 'motion', shift_frames=shift_frames_setting) @@ -17,53 +17,56 @@ def timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_sette single_frame_time_duration_setter(timing_details[i + j].uuid, 'motion', shift_frames=shift_frames_setting) if animation_style_selector_toggle: update_animation_style_element(timing_details[i + j].uuid) + btn1, btn2, btn3 = st.columns([1, 1, 1]) - if move_frames_toggle: - + + if move_frames_toggle: with btn1: move_frame_back_button(timing_details[i + j].uuid, "side-to-side") with btn2: move_frame_forward_button(timing_details[i + j].uuid, "side-to-side") + if delete_frames_toggle: with btn3: delete_frame_button(timing_details[i + j].uuid) + if change_frame_position_toggle: change_frame_position_input(timing_details[i + j].uuid, "side-to-side") if time_setter_toggle or duration_setter_toggle or animation_style_selector_toggle or move_frames_toggle or delete_frames_toggle or change_frame_position_toggle: st.caption("--") + jump_to_single_frame_view_button(i + j + 1, timing_details) -def timeline_view(shift_frames_setting, project_uuid, stage,header_col_2,header_col_3): - +def timeline_view(shift_frames_setting, project_uuid, stage, header_col_2, header_col_3): st.markdown("---") data_repo = DataRepo() - timing = data_repo.get_timing_list_from_project(project_uuid)[0] timing_details = data_repo.get_timing_list_from_project(project_uuid) + with header_col_3: items_per_row = st_memory.slider("How many frames per row?", min_value=1, max_value=10, value=5, step=1, key="items_per_row_slider") with header_col_2: - time_setter_toggle, animation_style_selector_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_frame_position_toggle = display_toggles() + args = display_toggles() - for i in range(0, len(timing_details), items_per_row): # Step of items_per_row for grid + total_count = len(timing_details) + for i in range(0, total_count, items_per_row): # Step of items_per_row for grid grid = st.columns(items_per_row) # Create items_per_row columns for grid for j in range(items_per_row): - if i + j < len(timing_details): # Check if index is within range + if i + j < total_count: # Check if index is within range with grid[j]: display_number = i + j + 1 if stage == 'Styling': display_image(timing_uuid=timing_details[i + j].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) elif stage == 'Motion': - if timing.timed_clip: - st.video(timing.timed_clip.location) + if timing_details[i + j].timed_clip: + st.video(timing_details[i + j].timed_clip.location) else: st.error("No video found for this frame.") with st.expander(f'Frame #{display_number}', True): - timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_setter_toggle, animation_style_selector_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_frame_position_toggle) + timeline_view_buttons(i, j, timing_details, shift_frames_setting, args) def display_toggles(): - col1, col2, col3 = st.columns(3) with col1: diff --git a/utils/common_utils.py b/utils/common_utils.py index 35aa20bb..9defa7b8 100644 --- a/utils/common_utils.py +++ b/utils/common_utils.py @@ -59,6 +59,8 @@ def create_working_assets(project_uuid): if new_project: copy_sample_assets(project_uuid) +def truncate_decimal(num: float, n: int = 2) -> float: + return int(num * 10 ** n) / 10 ** n # fresh_fetch - bypasses the cache def get_current_user(fresh_fetch=False) -> InternalUserObject: From 7ddd0533a8a55466eb6092ead11d639e6adcf963 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 4 Oct 2023 12:18:06 +0530 Subject: [PATCH 039/164] duration and position shift fixed --- backend/models.py | 4 ++-- ui_components/methods/common_methods.py | 4 ++-- ui_components/widgets/frame_time_selector.py | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/backend/models.py b/backend/models.py index 59d52802..7e0d256b 100644 --- a/backend/models.py +++ b/backend/models.py @@ -205,7 +205,7 @@ def save(self, *args, **kwargs): elif self.old_aux_frame_index != self.aux_frame_index: if self.aux_frame_index >= self.old_aux_frame_index: timings_to_move = Timing.objects.filter(project_id=self.project_id, aux_frame_index__gt=self.old_aux_frame_index, \ - aux_frame_index__lte=self.aux_frame_index, is_disabled=False) + aux_frame_index__lte=self.aux_frame_index, is_disabled=False).order_by('aux_frame_index') frame_time_list = [int(self.frame_time * 100) / 100] for t in timings_to_move: frame_time_list.append(t.frame_time) @@ -218,7 +218,7 @@ def save(self, *args, **kwargs): timings_to_move.update(aux_frame_index=F('aux_frame_index') - 1) else: timings_to_move = Timing.objects.filter(project_id=self.project_id, aux_frame_index__gte=self.aux_frame_index, \ - aux_frame_index__lt=self.old_aux_frame_index, is_disabled=False) + aux_frame_index__lt=self.old_aux_frame_index, is_disabled=False).order_by('aux_frame_index') frame_time_list = [self.frame_time] for t in reversed(timings_to_move): diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 361eeb90..fa273c3a 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -613,9 +613,9 @@ def change_frame_position_input(timing_uuid, src): max_value = len(timing_list) new_position = st.number_input("Move to new position:", min_value=min_value, max_value=max_value, - value=timing.aux_frame_index + 1, step=1, key=f"new_position_{timing.aux_frame_index}_{src}") + value=timing.aux_frame_index + 1, step=1, key=f"new_position_{timing.uuid}_{src}") - if st.button('Update Position',key=f"change_frame_position_{timing.aux_frame_index}_{src}"): + if st.button('Update Position',key=f"change_frame_position_{timing.uuid}_{src}"): data_repo.update_specific_timing(timing.uuid, aux_frame_index=new_position - 1) st.rerun() diff --git a/ui_components/widgets/frame_time_selector.py b/ui_components/widgets/frame_time_selector.py index 20f8aad9..02acd3c4 100644 --- a/ui_components/widgets/frame_time_selector.py +++ b/ui_components/widgets/frame_time_selector.py @@ -25,7 +25,7 @@ def update_frame_duration(timing_uuid, frame_duration, next_timing, shift_frames if next_timing: # Calculate time_delta before updating next_timing.frame_time time_delta = frame_duration - truncate_decimal(next_timing.frame_time - timing.frame_time) - next_timing.frame_time = timing.frame_time + frame_duration + next_timing.frame_time = truncate_decimal(timing.frame_time + frame_duration) data_repo.update_specific_timing(next_timing.uuid, frame_time=next_timing.frame_time, timed_clip_id=None) if shift_frames: shift_subsequent_frames(timing, time_delta) @@ -43,7 +43,7 @@ def single_frame_time_duration_setter(timing_uuid, src, shift_frames=True): # Calculate clip_duration if next_timing: - clip_duration = max(truncate_decimal(next_timing.frame_time - timing.frame_time), 0) + clip_duration = max(truncate_decimal(next_timing.frame_time - timing.frame_time), 0.0) else: clip_duration = 0.0 # or some default value @@ -75,7 +75,7 @@ def single_frame_time_selector(timing_uuid, src, shift_frames=True): else: max_value = timing.frame_time + 100 # Allow up to 100 seconds more if it's the last item - adjusted_value = max(min_value + 0.01, timing.frame_time) # hackish solution to avoid timing.frame_time < min_value while streamlit is refreshing + adjusted_value = max(min_value + 0.01, timing.frame_time) if timing.aux_frame_index != 0 else timing.frame_time # hackish solution to avoid timing.frame_time < min_value while streamlit is refreshing help_text = None if shift_frames else "You currently won't shift subsequent frames - to do this, go to the List View and turn on Shift Frames." frame_time = st.number_input("Time:", min_value=min_value, max_value=max_value, value=adjusted_value, step=0.1, key=f"frame_time_{timing.uuid}_{src}",disabled=disabled_time_change, help=help_text) From 591206992058fabf275f35ab976ef8399f2b6bc9 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 4 Oct 2023 15:54:54 +0530 Subject: [PATCH 040/164] floating point issue fixed --- backend/models.py | 22 ++++++++++++++++++- ui_components/methods/common_methods.py | 23 +++----------------- ui_components/widgets/frame_time_selector.py | 12 +++++----- 3 files changed, 30 insertions(+), 27 deletions(-) diff --git a/backend/models.py b/backend/models.py index 7e0d256b..e1f3313f 100644 --- a/backend/models.py +++ b/backend/models.py @@ -232,12 +232,32 @@ def save(self, *args, **kwargs): Timing.objects.filter(uuid=t.uuid, is_disabled=False).update(frame_time=int(frame_time_list[idx] * 100) / 100) idx += 1 # moving frames - timings_to_move.update(aux_frame_index=F('aux_frame_index') + 1) + timings_to_move.update(aux_frame_index=F('aux_frame_index') + 1, timed_clip=None, preview_video=None) self.interpolated_video_id = None self.timed_clip_id = None + # updating clip_duration + timing_list = Timing.objects.filter(project_id=self.project_id, is_disabled=False).order_by('aux_frame_index') + length_of_list = len(timing_list) + + for idx, timing_item in enumerate(timing_list): + # last frame + if idx == (length_of_list - 1): + time_of_frame = timing_item.frame_time + duration_of_static_time = 0.0 + end_duration_of_frame = float(time_of_frame) + float(duration_of_static_time) + total_duration_of_frame = float(end_duration_of_frame) - float(time_of_frame) + else: + time_of_frame = timing_item.frame_time + next_timing = timing_list[idx + 1] + time_of_next_frame = next_timing.frame_time + total_duration_of_frame = float(time_of_next_frame) - float(time_of_frame) + + Timing.objects.filter(uuid=timing_item.uuid, is_disabled=False).update(clip_duration=total_duration_of_frame) + + # ------ handling timed_clip ------ # if timed_clip is deleted/changed then preview_video will be deleted if self.old_timed_clip and (not self.timed_clip or self.old_timed_clip != self.timed_clip): diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index fa273c3a..ee3e3cfc 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -21,7 +21,7 @@ from ui_components.methods.ml_methods import create_depth_mask_image, inpainting, remove_background from ui_components.methods.video_methods import calculate_desired_duration_of_individual_clip, create_or_get_single_preview_video from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject -from utils.common_utils import reset_styling_settings +from utils.common_utils import reset_styling_settings, truncate_decimal from utils.constants import ImageStage, MLQueryObject from utils.data_repo.data_repo import DataRepo from shared.constants import AnimationStyleType @@ -621,39 +621,25 @@ def change_frame_position_input(timing_uuid, src): def move_frame(direction, timing_uuid): - print(f"{datetime.now()} - Starting move_frame function") data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) - print(f"{datetime.now()} - Retrieved timing object") - if direction == "Up": - print(f"{datetime.now()} - Moving frame up") if timing.aux_frame_index == 0: - print(f"{datetime.now()} - This is the first frame") st.error("This is the first frame") time.sleep(1) return data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index - 1) - print(f"{datetime.now()} - Updated timing object") - elif direction == "Down": - print(f"{datetime.now()} - Moving frame down") timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) if timing.aux_frame_index == len(timing_list) - 1: - print(f"{datetime.now()} - This is the last frame") st.error("This is the last frame") time.sleep(1) return data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index + 1) - print(f"{datetime.now()} - Updated timing object") - - print(f"{datetime.now()} - Updating clip duration of all timing frames") - update_clip_duration_of_all_timing_frames(timing.project.uuid) - print(f"{datetime.now()} - Finished move_frame function") def move_frame_back_button(timing_uuid, orientation): if orientation == "side-to-side": @@ -1084,12 +1070,9 @@ def update_clip_duration_of_all_timing_frames(project_uuid): total_duration_of_frame = float( time_of_next_frame) - float(time_of_frame) - duration_of_static_time = 0.0 + total_duration_of_frame = round(total_duration_of_frame) + data_repo.update_specific_timing(timing_item.uuid, clip_duration=total_duration_of_frame) - - - data_repo.update_specific_timing( - timing_item.uuid, clip_duration=total_duration_of_frame) def create_timings_row_at_frame_number(project_uuid, index_of_frame, frame_time=0.0): data_repo = DataRepo() diff --git a/ui_components/widgets/frame_time_selector.py b/ui_components/widgets/frame_time_selector.py index 02acd3c4..9397af28 100644 --- a/ui_components/widgets/frame_time_selector.py +++ b/ui_components/widgets/frame_time_selector.py @@ -24,8 +24,8 @@ def update_frame_duration(timing_uuid, frame_duration, next_timing, shift_frames if next_timing: # Calculate time_delta before updating next_timing.frame_time - time_delta = frame_duration - truncate_decimal(next_timing.frame_time - timing.frame_time) - next_timing.frame_time = truncate_decimal(timing.frame_time + frame_duration) + time_delta = frame_duration - round(next_timing.frame_time - timing.frame_time, 2) + next_timing.frame_time = round(timing.frame_time + frame_duration, 2) data_repo.update_specific_timing(next_timing.uuid, frame_time=next_timing.frame_time, timed_clip_id=None) if shift_frames: shift_subsequent_frames(timing, time_delta) @@ -43,7 +43,7 @@ def single_frame_time_duration_setter(timing_uuid, src, shift_frames=True): # Calculate clip_duration if next_timing: - clip_duration = max(truncate_decimal(next_timing.frame_time - timing.frame_time), 0.0) + clip_duration = max(round(next_timing.frame_time - timing.frame_time, 2), 0.0) else: clip_duration = 0.0 # or some default value @@ -55,7 +55,7 @@ def single_frame_time_duration_setter(timing_uuid, src, shift_frames=True): value=clip_duration, step=0.1, key=f"frame_duration_{timing.uuid}_{src}", disabled=disable_duration_input, help=help_text) - frame_duration = truncate_decimal(frame_duration) + frame_duration = round(frame_duration, 2) if frame_duration != clip_duration: update_frame_duration(timing_uuid, frame_duration, next_timing, shift_frames) @@ -75,10 +75,10 @@ def single_frame_time_selector(timing_uuid, src, shift_frames=True): else: max_value = timing.frame_time + 100 # Allow up to 100 seconds more if it's the last item - adjusted_value = max(min_value + 0.01, timing.frame_time) if timing.aux_frame_index != 0 else timing.frame_time # hackish solution to avoid timing.frame_time < min_value while streamlit is refreshing + # adjusted_value = max(min_value + 0.01, timing.frame_time) if timing.aux_frame_index != 0 else timing.frame_time # hackish solution to avoid timing.frame_time < min_value while streamlit is refreshing help_text = None if shift_frames else "You currently won't shift subsequent frames - to do this, go to the List View and turn on Shift Frames." frame_time = st.number_input("Time:", min_value=min_value, max_value=max_value, - value=adjusted_value, step=0.1, key=f"frame_time_{timing.uuid}_{src}",disabled=disabled_time_change, help=help_text) + value=timing.frame_time, step=0.1, key=f"frame_time_{timing.uuid}_{src}",disabled=disabled_time_change, help=help_text) frame_time = int(frame_time * 100) / 100.0 if frame_time != timing.frame_time: From ca8d7fbf1ff032386c8051a85bbc91588da01e66 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 4 Oct 2023 16:29:44 +0530 Subject: [PATCH 041/164] clip_duration update fixed --- ui_components/methods/common_methods.py | 2 +- ui_components/methods/video_methods.py | 4 ++++ ui_components/widgets/frame_time_selector.py | 6 +----- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index ee3e3cfc..fe700ae2 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -1070,7 +1070,7 @@ def update_clip_duration_of_all_timing_frames(project_uuid): total_duration_of_frame = float( time_of_next_frame) - float(time_of_frame) - total_duration_of_frame = round(total_duration_of_frame) + total_duration_of_frame = round(total_duration_of_frame, 2) data_repo.update_specific_timing(timing_item.uuid, clip_duration=total_duration_of_frame) diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index 047d307d..11cf1418 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -119,6 +119,10 @@ def create_single_interpolated_clip(timing_uuid, quality, settings={}, variant_c timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) next_timing: InternalFrameTimingObject = data_repo.get_next_timing(timing_uuid) + if not next_timing: + st.error('This is the last image. Please add more images to create interpolated clip.') + return None + if quality == 'full': interpolation_steps = VideoInterpolator.calculate_dynamic_interpolations_steps(timing.clip_duration) elif quality == 'preview': diff --git a/ui_components/widgets/frame_time_selector.py b/ui_components/widgets/frame_time_selector.py index 9397af28..ac080b39 100644 --- a/ui_components/widgets/frame_time_selector.py +++ b/ui_components/widgets/frame_time_selector.py @@ -42,11 +42,7 @@ def single_frame_time_duration_setter(timing_uuid, src, shift_frames=True): next_timing = data_repo.get_next_timing(timing_uuid) # Calculate clip_duration - if next_timing: - clip_duration = max(round(next_timing.frame_time - timing.frame_time, 2), 0.0) - else: - clip_duration = 0.0 # or some default value - + clip_duration = max(float(round(timing.clip_duration, 2)), float(0.0)) max_value = 100.0 if shift_frames else clip_duration disable_duration_input = False if next_timing else True From dc1919d9d1b238ef0251d0fe52b98ecf3c929df8 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 4 Oct 2023 20:36:24 +0530 Subject: [PATCH 042/164] video generation fix --- banodoco_settings.py | 1 + ui_components/methods/file_methods.py | 2 +- ui_components/methods/video_methods.py | 12 +++++++----- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/banodoco_settings.py b/banodoco_settings.py index 272feec9..55e3c8be 100644 --- a/banodoco_settings.py +++ b/banodoco_settings.py @@ -12,6 +12,7 @@ from ui_components.models import InternalAppSettingObject, InternalFrameTimingObject, InternalProjectObject, InternalUserObject from utils.common_utils import create_working_assets from utils.constants import ML_MODEL_LIST, ImageStage +from utils.data_repo.data_repo import DataRepo from utils.ml_processor.replicate.constants import REPLICATE_MODEL ENCRYPTION_KEY = 'J2684nBgNUYa_K0a6oBr5H8MpSRW0EJ52Qmq7jExE-w=' diff --git a/ui_components/methods/file_methods.py b/ui_components/methods/file_methods.py index 044175bc..39b7fcce 100644 --- a/ui_components/methods/file_methods.py +++ b/ui_components/methods/file_methods.py @@ -142,7 +142,7 @@ def convert_bytes_to_file(file_location_to_save, mime_type, file_bytes, project_ hosted_url = save_or_host_file_bytes(file_bytes, file_location_to_save, "." + mime_type.split("/")[1]) file_data = { "name": str(uuid.uuid4()) + "." + mime_type.split("/")[1] if not filename else filename, - "type": InternalFileType.IMAGE.value, + "type": InternalFileType.VIDEO.value if 'video' in mime_type else (InternalFileType.AUDIO.value if 'audio' in mime_type else InternalFileType.IMAGE.value), "project_id": project_uuid, "inference_log_id": inference_log_id, "tag": tag diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index 11cf1418..a89bb382 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -2,6 +2,7 @@ import random import string import tempfile +import time from typing import List import ffmpeg import streamlit as st @@ -352,6 +353,10 @@ def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileT data_repo = DataRepo() + if not final_video_name: + st.error("Please enter a video name") + return + timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( project_uuid) @@ -417,11 +422,8 @@ def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileT output_video = update_speed_of_video_clip(video_file, current_timing.uuid) data_repo.update_specific_timing(current_timing.uuid, timed_clip_id=output_video.uuid) - - project_settings: InternalSettingObject = data_repo.get_project_setting( - timing.project.uuid) - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - timing.project.uuid) + project_settings: InternalSettingObject = data_repo.get_project_setting(project_uuid) + timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project(project_uuid) total_number_of_videos = len(timing_details) - 2 video_list = [] From 5699a9d104743feb5f994c435017583a0ff8874e Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Thu, 5 Oct 2023 03:09:15 +0530 Subject: [PATCH 043/164] wip: batch generation fixes --- shared/constants.py | 3 + .../components/frame_styling_page.py | 7 +- ui_components/constants.py | 17 ++ ui_components/models.py | 3 +- ui_components/widgets/styling_element.py | 225 +++++++++--------- 5 files changed, 137 insertions(+), 118 deletions(-) diff --git a/shared/constants.py b/shared/constants.py index 08b23100..32dcb845 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -69,6 +69,9 @@ class AnimationToolType(ExtendedEnum): ANIMATEDIFF = 'Animatediff' G_FILM = "Google FiLM" +class ViewType(ExtendedEnum): + SINGLE = "Single" + LIST = "List" ##################### global constants ##################### SERVER = os.getenv('SERVER', ServerType.PRODUCTION.value) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 238e1f40..96adcd7b 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -1,4 +1,5 @@ import streamlit as st +from shared.constants import ViewType from ui_components.methods.common_methods import add_key_frame,compare_to_previous_and_next_frame,compare_to_source_frame,style_cloning_element @@ -124,7 +125,7 @@ def frame_styling_page(mainheader2, project_uuid: str): with st.expander("🛠️ Generate Variants + Prompt Settings", expanded=True): col1, col2 = st.columns([1, 1]) with col1: - styling_element(st.session_state['current_frame_uuid'], view_type="Single") + styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.SINGLE.value) with col2: detail1, detail2 = st.columns([1, 1]) with detail1: @@ -216,7 +217,7 @@ def frame_styling_page(mainheader2, project_uuid: str): if st.session_state['page'] == "Styling": with st.sidebar: - styling_element(st.session_state['current_frame_uuid'], view_type="List") + styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.LIST.value) styling_list_view(start_index, end_index, shift_frames_setting, project_uuid) @@ -232,7 +233,7 @@ def frame_styling_page(mainheader2, project_uuid: str): elif st.session_state['list_view_type'] == "Timeline View": with st.sidebar: - styling_element(st.session_state['current_frame_uuid'], view_type="List") + styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.LIST.value) if st.session_state['page'] == "Styling": diff --git a/ui_components/constants.py b/ui_components/constants.py index 85d66b50..cd946500 100644 --- a/ui_components/constants.py +++ b/ui_components/constants.py @@ -34,6 +34,23 @@ class DefaultTimingStyleParams: animation_style = AnimationStyleType.INTERPOLATION.value model = None +class DefaultProjectSettingParams: + batch_prompt = "" + batch_negative_prompt = "bad image, worst quality" + batch_strength = 1 + batch_guidance_scale = 0.5 + batch_seed = 0 + batch_num_inference_steps = 25 + batch_low_threshold = 100 + batch_high_threshold = 200 + batch_adapter_type = None + batch_interpolation_steps = 3 + batch_transformation_stage = ImageStage.SOURCE_IMAGE.value + batch_custom_model_id_list = [] + batch_animation_tool = AnimationToolType.G_FILM.value + batch_animation_style = AnimationStyleType.INTERPOLATION.value + batch_model = None + # TODO: make proper paths for every file CROPPED_IMG_LOCAL_PATH = "videos/temp/cropped.png" diff --git a/ui_components/models.py b/ui_components/models.py index 9b056484..d565f0c4 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -3,7 +3,7 @@ import json from shared.constants import AnimationStyleType, AnimationToolType -from ui_components.constants import TEMP_MASK_FILE, DefaultTimingStyleParams +from ui_components.constants import TEMP_MASK_FILE, DefaultProjectSettingParams, DefaultTimingStyleParams from utils.common_decorators import session_state_attributes from utils.constants import MLQueryObject @@ -187,6 +187,7 @@ def __init__(self, **kwargs): self.replicate_key = kwargs['replicate_key'] if 'replicate_key' in kwargs and kwargs['replicate_key'] else "" +@session_state_attributes(DefaultProjectSettingParams) class InternalSettingObject: def __init__(self, **kwargs): self.uuid = kwargs['uuid'] if 'uuid' in kwargs else None diff --git a/ui_components/widgets/styling_element.py b/ui_components/widgets/styling_element.py index 17eb0225..cbf16df1 100644 --- a/ui_components/widgets/styling_element.py +++ b/ui_components/widgets/styling_element.py @@ -3,81 +3,71 @@ import uuid from typing import List -from shared.constants import AIModelCategory, AIModelType +from shared.constants import AIModelCategory, AIModelType, ViewType from ui_components.methods.ml_methods import trigger_restyling_process from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo -def styling_element(timing_uuid, view_type="Single"): +def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( timing.project.uuid) - project_settings: InternalSettingObject = data_repo.get_project_setting( - timing.project.uuid) + project_settings: InternalSettingObject = data_repo.get_project_setting(timing.project.uuid) + # -------------------- Transfomation Stage -------------------- # stages = ImageStage.value_list() + if view_type == ViewType.SINGLE.value: + append_to_item_name = f"{timing_uuid}" + elif view_type == ViewType.LIST.value: + append_to_item_name = str(timing.project.uuid) + st.markdown("## Batch queries") - if project_settings.default_stage != "": - if 'index_of_which_stage_to_run_on' not in st.session_state: - st.session_state['transformation_stage'] = project_settings.default_stage - st.session_state['index_of_which_stage_to_run_on'] = stages.index( - st.session_state['transformation_stage']) + if project_settings.default_stage: + if f'index_of_which_stage_to_run_on_{append_to_item_name}' not in st.session_state: + st.session_state["transformation_stage"] = project_settings.default_stage + st.session_state[f'index_of_which_stage_to_run_on_{append_to_item_name}'] = stages.index( + st.session_state["transformation_stage"]) else: - st.session_state['index_of_which_stage_to_run_on'] = 0 + st.session_state["transformation_stage"] = ImageStage.SOURCE_IMAGE.value + st.session_state[f'index_of_which_stage_to_run_on_{append_to_item_name}'] = 0 - if view_type == "Single": - append_to_item_name = f"{st.session_state['current_frame_uuid']}" - elif view_type == "List": - append_to_item_name = "bulk" - st.markdown("## Batch queries") - - if view_type == "Single": - if timing.transformation_stage: - if f'index_of_which_stage_to_run_on_{append_to_item_name}' not in st.session_state: - st.session_state['transformation_stage'] = timing.transformation_stage - st.session_state[f'index_of_which_stage_to_run_on_{append_to_item_name}'] = stages.index( - st.session_state['transformation_stage']) - else: - st.session_state[f'index_of_which_stage_to_run_on_{append_to_item_name}'] = 0 - - elif view_type == "List": - if project_settings.default_stage != "": - if f'index_of_which_stage_to_run_on_{append_to_item_name}' not in st.session_state: - st.session_state['transformation_stage'] = project_settings.default_stage - st.session_state[f'index_of_which_stage_to_run_on_{append_to_item_name}'] = stages.index( - st.session_state['transformation_stage']) - else: - st.session_state[f'index_of_which_stage_to_run_on_{append_to_item_name}'] = 0 stages1, stages2 = st.columns([1, 1]) with stages1: - st.session_state['transformation_stage'] = st.radio("What stage of images would you like to run styling on?", options=stages, horizontal=True, key=f"image_stage_selector_{append_to_item_name}", - index=st.session_state[f'index_of_which_stage_to_run_on_{append_to_item_name}'], help="Extracted frames means the original frames from the video.") + st.session_state["transformation_stage"] = st.radio( + "What stage of images would you like to run styling on?", + options=stages, + horizontal=True, + key=f"image_stage_selector_{append_to_item_name}", + index=st.session_state[f'index_of_which_stage_to_run_on_{append_to_item_name}'], + help="Extracted frames means the original frames from the video." + ) + with stages2: image = None - if st.session_state['transformation_stage'] == ImageStage.SOURCE_IMAGE.value: + if st.session_state["transformation_stage"] == ImageStage.SOURCE_IMAGE.value: source_img = timing_details[st.session_state['current_frame_index'] - 1].source_image image = source_img.location if source_img else "" - elif st.session_state['transformation_stage'] == ImageStage.MAIN_VARIANT.value: + elif st.session_state["transformation_stage"] == ImageStage.MAIN_VARIANT.value: image = timing_details[st.session_state['current_frame_index'] - 1].primary_image_location if image: - st.image(image, use_column_width=True, - caption=f"Image {st.session_state['current_frame_index']}") - elif not image and st.session_state['transformation_stage'] in [ImageStage.SOURCE_IMAGE.value, ImageStage.MAIN_VARIANT.value]: - st.error( - f"No {st.session_state['transformation_stage']} image found for this variant") + st.image(image, use_column_width=True, caption=f"Image {st.session_state['current_frame_index']}") + elif not image and st.session_state["transformation_stage"] in [ImageStage.SOURCE_IMAGE.value, ImageStage.MAIN_VARIANT.value]: + st.error(f"No {st.session_state['transformation_stage']} image found for this variant") - if stages.index(st.session_state['transformation_stage']) != st.session_state['index_of_which_stage_to_run_on']: + if stages.index(st.session_state["transformation_stage"]) != st.session_state[f'index_of_which_stage_to_run_on_{append_to_item_name}']: st.session_state['index_of_which_stage_to_run_on'] = stages.index( - st.session_state['transformation_stage']) + st.session_state["transformation_stage"]) st.rerun() - if st.session_state['transformation_stage'] != ImageStage.NONE.value: + + # -------------------- Model Selection -------------------- # + if st.session_state["transformation_stage"] != ImageStage.NONE.value: model_list = data_repo.get_all_ai_model_list(model_type_list=[AIModelType.IMG2IMG.value], custom_trained=False) else: model_list = data_repo.get_all_ai_model_list(model_type_list=[AIModelType.TXT2IMG.value], custom_trained=False) @@ -112,7 +102,7 @@ def styling_element(timing_uuid, view_type="Single"): current_model_name = data_repo.get_ai_model_from_uuid( st.session_state['model']).name - # NOTE: there is a check when creating custom models that no two model can have the same name + # -------------------- Model Params (e.g. adapter type for controlnet) -------------------- # if current_model_name == AIModelCategory.CONTROLNET.value: controlnet_adapter_types = [ "scribble", "normal", "canny", "hed", "seg", "hough", "depth2img", "pose"] @@ -132,6 +122,44 @@ def styling_element(timing_uuid, view_type="Single"): st.rerun() st.session_state['custom_models'] = [] + if not ( 'adapter_type' in st.session_state and st.session_state['adapter_type']): + st.session_state['adapter_type'] = 'N' + + # setting default values of low and high threshold + if st.session_state['adapter_type'] in ["canny", "pose"]: + canny1, canny2 = st.columns(2) + if view_type == ViewType.LIST.value: + if project_settings.default_low_threshold != "": + low_threshold_value = project_settings.default_low_threshold + else: + low_threshold_value = 50 + + if project_settings.default_high_threshold != "": + high_threshold_value = project_settings.default_high_threshold + else: + high_threshold_value = 150 + + elif view_type == ViewType.SINGLE.value: + if timing.low_threshold != "": + low_threshold_value = timing.low_threshold + else: + low_threshold_value = 50 + + if timing.high_threshold != "": + high_threshold_value = timing.high_threshold + else: + high_threshold_value = 150 + + with canny1: + st.session_state['low_threshold'] = st.slider( + 'Low Threshold', 0, 255, value=int(low_threshold_value)) + with canny2: + st.session_state['high_threshold'] = st.slider( + 'High Threshold', 0, 255, value=int(high_threshold_value)) + else: + st.session_state['low_threshold'] = 0 + st.session_state['high_threshold'] = 0 + elif current_model_name == AIModelCategory.LORA.value: if not ('index_of_lora_model_1' in st.session_state and st.session_state['index_of_lora_model_1']): st.session_state['index_of_lora_model_1'] = 0 @@ -234,48 +262,9 @@ def styling_element(timing_uuid, view_type="Single"): st.session_state['custom_models'] = [] st.session_state['adapter_type'] = "N" - if not ( 'adapter_type' in st.session_state and st.session_state['adapter_type']): - st.session_state['adapter_type'] = 'N' - - if st.session_state['adapter_type'] in ["canny", "pose"]: - - canny1, canny2 = st.columns(2) - - if view_type == "List": - - if project_settings.default_low_threshold != "": - low_threshold_value = project_settings.default_low_threshold - else: - low_threshold_value = 50 - - if project_settings.default_high_threshold != "": - high_threshold_value = project_settings.default_high_threshold - else: - high_threshold_value = 150 - - elif view_type == "Single": - - if timing.low_threshold != "": - low_threshold_value = timing.low_threshold - else: - low_threshold_value = 50 - - if timing.high_threshold != "": - high_threshold_value = timing.high_threshold - else: - high_threshold_value = 150 - - with canny1: - st.session_state['low_threshold'] = st.slider( - 'Low Threshold', 0, 255, value=int(low_threshold_value)) - with canny2: - st.session_state['high_threshold'] = st.slider( - 'High Threshold', 0, 255, value=int(high_threshold_value)) - else: - st.session_state['low_threshold'] = 0 - st.session_state['high_threshold'] = 0 - + # -------------------- Prompt -------------------- # if st.session_state['model'] == "StyleGAN-NADA": + # only certain words are available in case of stylegan-nada st.warning("StyleGAN-NADA is a custom model that uses StyleGAN to generate a consistent character and style transformation. It only works for square images.") st.session_state['prompt'] = st.selectbox("What style would you like to apply to the character?", ['base', 'mona_lisa', 'modigliani', 'cubism', 'elf', 'sketch_hq', 'thomas', 'thanos', 'simpson', 'witcher', 'edvard_munch', 'ukiyoe', 'botero', 'shrek', 'joker', 'pixar', 'zombie', 'werewolf', 'groot', 'ssj', 'rick_morty_cartoon', 'anime', 'white_walker', 'zuckerberg', 'disney_princess', 'all', 'list']) @@ -285,29 +274,23 @@ def styling_element(timing_uuid, view_type="Single"): st.session_state['num_inference_steps'] = int(50) else: - if view_type == "List": - if timing.prompt != "": + if not (f'prompt_value_{append_to_item_name}' in st.session_state and st.session_state[f'prompt_value_{append_to_item_name}']): + if timing.prompt: st.session_state[f'prompt_value_{append_to_item_name}'] = timing.prompt else: st.session_state[f'prompt_value_{append_to_item_name}'] = "" - elif view_type == "Single": - if not (f'prompt_value_{append_to_item_name}' in st.session_state and st.session_state[f'prompt_value_{append_to_item_name}']): - if timing.prompt != "": - st.session_state[f'prompt_value_{append_to_item_name}'] = timing.prompt - else: - st.session_state[f'prompt_value_{append_to_item_name}'] = "" - - st.session_state['prompt'] = st.text_area( - f"Prompt", label_visibility="visible", value=st.session_state[f'prompt_value_{append_to_item_name}'], height=150) + st.session_state['prompt'] = st.text_area(f"Prompt", label_visibility="visible", + value=st.session_state[f'prompt_value_{append_to_item_name}'], height=150) if st.session_state['prompt'] != st.session_state[f'prompt_value_{append_to_item_name}']: st.session_state[f'prompt_value_{append_to_item_name}'] = st.session_state['prompt'] st.rerun() - if view_type == "List": + if view_type == ViewType.LIST.value: st.info( "You can include the following tags in the prompt to vary the prompt dynamically: [expression], [location], [mouth], and [looking]") + if st.session_state['model'] == AIModelCategory.DREAMBOOTH.value: model_details: InternalAIModelObject = data_repo.get_ai_model_from_uuid( st.session_state['dreambooth_model_uuid']) @@ -324,13 +307,13 @@ def styling_element(timing_uuid, view_type="Single"): if st.session_state['model'] == AIModelCategory.PIX_2_PIX.value: st.info("In our experience, setting the seed to 87870, and the guidance scale to 7.5 gets consistently good results. You can set this in advanced settings.") - if view_type == "List": - if project_settings.default_strength != "": + if view_type == ViewType.LIST.value: + if project_settings.default_strength: st.session_state['strength'] = project_settings.default_strength else: st.session_state['strength'] = 0.5 - elif view_type == "Single": + elif view_type == ViewType.SINGLE.value: if timing.strength: st.session_state['strength'] = timing.strength else: @@ -339,12 +322,12 @@ def styling_element(timing_uuid, view_type="Single"): st.session_state['strength'] = st.slider(f"Strength", value=float( st.session_state['strength']), min_value=0.0, max_value=1.0, step=0.01) - if view_type == "List": - if project_settings.default_guidance_scale != "": + if view_type == ViewType.LIST.value: + if project_settings.default_guidance_scale: st.session_state['guidance_scale'] = project_settings.default_guidance_scale else: st.session_state['guidance_scale'] = 7.5 - elif view_type == "Single": + elif view_type == ViewType.SINGLE.value: if timing.guidance_scale != "": st.session_state['guidance_scale'] = timing.guidance_scale else: @@ -363,13 +346,13 @@ def styling_element(timing_uuid, view_type="Single"): st.session_state['guidance_scale'] = st.number_input( f"Guidance scale", value=float(st.session_state['guidance_scale'])) - if view_type == "List": + if view_type == ViewType.LIST.value: if project_settings.default_seed != "": st.session_state['seed'] = project_settings.default_seed else: st.session_state['seed'] = 0 - elif view_type == "Single": + elif view_type == ViewType.SINGLE.value: if timing.seed != "": st.session_state['seed'] = timing.seed else: @@ -378,12 +361,12 @@ def styling_element(timing_uuid, view_type="Single"): st.session_state['seed'] = st.number_input( f"Seed", value=int(st.session_state['seed'])) - if view_type == "List": + if view_type == ViewType.LIST.value: if project_settings.default_num_inference_steps: st.session_state['num_inference_steps'] = project_settings.default_num_inference_steps else: st.session_state['num_inference_steps'] = 50 - elif view_type == "Single": + elif view_type == ViewType.SINGLE.value: if timing.num_inference_steps: st.session_state['num_inference_steps'] = timing.num_inference_steps else: @@ -391,11 +374,9 @@ def styling_element(timing_uuid, view_type="Single"): st.session_state['num_inference_steps'] = st.number_input( f"Inference steps", value=int(st.session_state['num_inference_steps'])) - st.session_state["promote_new_generation"] = st.checkbox( - "Promote new generation to main variant", key="promote_new_generation_to_main_variant_1") st.session_state["use_new_settings"] = True - if view_type == "List": + if view_type == ViewType.LIST.value: batch_run_range = st.slider( "Select range:", 1, 1, (1, len(timing_details))) first_batch_run_value = batch_run_range[0] - 1 @@ -433,7 +414,23 @@ def styling_element(timing_uuid, view_type="Single"): for i in range(first_batch_run_value, last_batch_run_value+1): for _ in range(0, batch_number_of_variants): - trigger_restyling_process(timing_details[i].uuid, st.session_state['model'], st.session_state['prompt'], st.session_state['strength'], st.session_state['negative_prompt'], st.session_state['guidance_scale'], st.session_state['seed'], st.session_state[ - 'num_inference_steps'], st.session_state['transformation_stage'], st.session_state["promote_new_generation"], st.session_state['custom_models'], st.session_state['adapter_type'], st.session_state["use_new_settings"], st.session_state['low_threshold'], st.session_state['high_threshold']) + trigger_restyling_process( + timing_uuid=timing_details[i].uuid, + model_uuid=st.session_state['model'], + prompt=st.session_state['prompt'], + strength=st.session_state['strength'], + negative_prompt=st.session_state['negative_prompt'], + guidance_scale=st.session_state['guidance_scale'], + seed=st.session_state['seed'], + num_inference_steps=st.session_state['num_inference_steps'], + transformation_stage=st.session_state['transformation_stage'], + promote_new_generation=st.session_state["promote_new_generation"], + custom_models=st.session_state['custom_models'], + adapter_type=st.session_state['adapter_type'], + update_inference_settings=True, + low_threshold=st.session_state['low_threshold'], + high_threshold=st.session_state['high_threshold'], + canny_image=st.session_state['canny_image'] if 'canny_image' in st.session_state else None, + ) st.rerun() From 07452a71c777da3737728e1eadc29fede28f64fe Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Thu, 5 Oct 2023 03:25:34 +0530 Subject: [PATCH 044/164] minor session_state bug fix --- ui_components/widgets/styling_element.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ui_components/widgets/styling_element.py b/ui_components/widgets/styling_element.py index cbf16df1..3ee6452b 100644 --- a/ui_components/widgets/styling_element.py +++ b/ui_components/widgets/styling_element.py @@ -61,8 +61,7 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): st.error(f"No {st.session_state['transformation_stage']} image found for this variant") if stages.index(st.session_state["transformation_stage"]) != st.session_state[f'index_of_which_stage_to_run_on_{append_to_item_name}']: - st.session_state['index_of_which_stage_to_run_on'] = stages.index( - st.session_state["transformation_stage"]) + st.session_state[f'index_of_which_stage_to_run_on_{append_to_item_name}'] = stages.index(st.session_state["transformation_stage"]) st.rerun() @@ -405,7 +404,6 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): st.write("") st.write("") if st.button(f'Batch restyle') or st.session_state['restyle_button'] == 'yes': - if st.session_state['restyle_button'] == 'yes': range_start = int(st.session_state['item_to_restyle']) range_end = range_start + 1 @@ -432,5 +430,6 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): high_threshold=st.session_state['high_threshold'], canny_image=st.session_state['canny_image'] if 'canny_image' in st.session_state else None, ) + st.rerun() From 820012f220f4459eb47283724c2bdfa3c894208a Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Thu, 5 Oct 2023 13:03:40 +0530 Subject: [PATCH 045/164] clip interrogator fix --- ui_components/methods/ml_methods.py | 1 + ui_components/widgets/prompt_finder.py | 38 ++--------------------- utils/ml_processor/replicate/replicate.py | 7 ++++- 3 files changed, 10 insertions(+), 36 deletions(-) diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index 1ee739a7..b0d20659 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -238,6 +238,7 @@ def prompt_model_blip2(input_image, query): return output def facial_expression_recognition(input_image): + input_image = input_image.location if not input_image.startswith("http"): input_image = open(input_image, "rb") diff --git a/ui_components/widgets/prompt_finder.py b/ui_components/widgets/prompt_finder.py index 80f5c39b..6ada0d60 100644 --- a/ui_components/widgets/prompt_finder.py +++ b/ui_components/widgets/prompt_finder.py @@ -17,6 +17,8 @@ def prompt_finder_element(project_uuid): "Best", "Fast"], key="best_or_fast", help="This is to know whether we should optimize for best quality or fastest speed. Best quality is usually best if you're in doubt", horizontal=True).lower() if st.button("Get prompts"): if not uploaded_file: + st.error("Please upload a file first") + time.sleep(0.3) return uploaded_file_path = f"videos/{project_uuid}/assets/resources/prompt_images/{uploaded_file.name}" @@ -25,17 +27,7 @@ def prompt_finder_element(project_uuid): uploaded_file_path = hosted_url or uploaded_file_path prompt = prompt_clip_interrogator(uploaded_file_path, which_model, best_or_fast) - - # if not os.path.exists(f"videos/{project_uuid}/prompts.csv"): - # with open(f"videos/{project_uuid}/prompts.csv", "w") as f: - # f.write("prompt,example_image,which_model\n") - # add the prompt to prompts.csv - # with open(f"videos/{project_uuid}/prompts.csv", "a") as f: - # f.write( - # f'"{prompt}",videos/{project_uuid}/assets/resources/prompt_images/{uploaded_file.name},{which_model}\n') - st.session_state["last_generated_prompt"] = prompt - st.success("Prompt added successfully!") time.sleep(0.3) uploaded_file = "" @@ -43,28 +35,4 @@ def prompt_finder_element(project_uuid): if 'last_generated_prompt' in st.session_state and st.session_state['last_generated_prompt']: st.write("Generated prompt - ", st.session_state['last_generated_prompt']) - - # list all the prompts in prompts.csv - # if os.path.exists(f"videos/{project_uuid}/prompts.csv"): - - # df = pd.read_csv(f"videos/{project_uuid}/prompts.csv", na_filter=False) - # prompts = df.to_dict('records') - - # prompts.reverse() - - # col1, col2 = st.columns([1.5, 1]) - # with col1: - # st.markdown("### Prompt") - # with col2: - # st.markdown("### Example Image") - # with open(f"videos/{project_uuid}/prompts.csv", "r") as f: - # for i in prompts: - # index_of_current_item = prompts.index(i) - # col1, col2 = st.columns([1.5, 1]) - # with col1: - # st.write(prompts[index_of_current_item]["prompt"]) - # with col2: - # st.image(prompts[index_of_current_item] - # ["example_image"], use_column_width=True) - # st.markdown("***") - + st.session_state["last_generated_prompt"] = "" \ No newline at end of file diff --git a/utils/ml_processor/replicate/replicate.py b/utils/ml_processor/replicate/replicate.py index ab9e8b64..e8aa4858 100644 --- a/utils/ml_processor/replicate/replicate.py +++ b/utils/ml_processor/replicate/replicate.py @@ -67,7 +67,12 @@ def predict_model_output(self, model: ReplicateModel, **kwargs): log = log_model_inference(model, end_time - start_time, **kwargs) self._update_usage_credits(end_time - start_time) - return [output[-1]], log + if model == REPLICATE_MODEL.clip_interrogator: + output = output # adding this for organisation purpose + else: + output = [output[-1]] + + return output, log @check_user_credits def predict_model_output_async(self, model: ReplicateModel, **kwargs): From 35aedead33f7a44387a0bbc4b6d61b49f550a941 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Thu, 5 Oct 2023 16:59:38 +0530 Subject: [PATCH 046/164] user data cached + low credits warning added --- app.py | 4 +-- ui_components/setup.py | 12 ++++--- utils/cache/cache.py | 1 + utils/cache/cache_methods.py | 48 +++++++++++++++++++++++++++- utils/common_utils.py | 21 +++++------- utils/constants.py | 1 - utils/data_repo/api_repo.py | 2 +- utils/data_repo/data_repo.py | 2 +- utils/local_storage/local_storage.py | 5 --- 9 files changed, 67 insertions(+), 29 deletions(-) diff --git a/app.py b/app.py index 967e1887..e13ee5f1 100644 --- a/app.py +++ b/app.py @@ -7,7 +7,7 @@ from shared.constants import OFFLINE_MODE, SERVER, ServerType import sentry_sdk -from utils.constants import AUTH_TOKEN, LOGGED_USER +from utils.constants import AUTH_TOKEN from utils.local_storage.url_storage import delete_url_param, get_url_param, set_url_param from utils.third_party_auth.google.google_auth import get_google_auth_url @@ -59,9 +59,7 @@ def main(): data_repo = DataRepo() user, token, refresh_token = data_repo.google_user_login(**data) if user: - st.session_state[LOGGED_USER] = user.to_json() if user else None set_url_param(AUTH_TOKEN, str(token)) - # st.experimental_set_query_params(test='testing') st.rerun() else: delete_url_param(AUTH_TOKEN) diff --git a/ui_components/setup.py b/ui_components/setup.py index 0ebb1703..0dbf1ce4 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -3,6 +3,7 @@ import os import math from moviepy.editor import * +from shared.constants import SERVER, ServerType from ui_components.components.app_settings_page import app_settings_page from ui_components.components.custom_models_page import custom_models_page @@ -13,7 +14,7 @@ from streamlit_option_menu import option_menu from ui_components.constants import CreativeProcessType from ui_components.models import InternalAppSettingObject -from utils.common_utils import create_working_assets, get_current_user_uuid, reset_project_state +from utils.common_utils import create_working_assets, get_current_user, get_current_user_uuid, reset_project_state from utils import st_memory from utils.data_repo.data_repo import DataRepo @@ -26,15 +27,18 @@ def setup_app_ui(): app_settings: InternalAppSettingObject = data_repo.get_app_setting_from_uuid() - with st.sidebar: + if SERVER != ServerType.DEVELOPMENT.value: + current_user = get_current_user() + user_credits = current_user.total_credits if (current_user and current_user.total_credits > 0) else 0 + if user_credits < 0.5: + st.error(f"You have {user_credits} credits left - please go to App Settings to add more credits") + with st.sidebar: h1, h2 = st.columns([1, 3]) - with h1: st.markdown("# :red[ba]:green[no]:orange[do]:blue[co]") sections = ["Open Project", "App Settings", "New Project"] - with h2: st.write("") st.session_state["section"] = st_memory.menu( diff --git a/utils/cache/cache.py b/utils/cache/cache.py index 0f86b9e1..b7cdbada 100644 --- a/utils/cache/cache.py +++ b/utils/cache/cache.py @@ -8,6 +8,7 @@ class CacheKey(ExtendedEnum): APP_SECRET = "app_secret" PROJECT_SETTING = "project_setting" AI_MODEL = "ai_model" + LOGGED_USER = "logged_user" class StCache: diff --git a/utils/cache/cache_methods.py b/utils/cache/cache_methods.py index 9473927e..be6bcede 100644 --- a/utils/cache/cache_methods.py +++ b/utils/cache/cache_methods.py @@ -1,7 +1,7 @@ from shared.logging.logging import AppLogger from utils.cache.cache import CacheKey, StCache +import streamlit as st -from utils.enum import ExtendedEnum logger = AppLogger() @@ -378,4 +378,50 @@ def _cache_bulk_update_project_setting(self, *args, **kwargs): setattr(cls, '_original_bulk_update_project_setting', cls.bulk_update_project_setting) setattr(cls, "bulk_update_project_setting", _cache_bulk_update_project_setting) + def _cache_update_user(self, *args, **kwargs): + original_func = getattr(cls, '_original_update_user') + user = original_func(self, *args, **kwargs) + StCache.delete_all(user, CacheKey.LOGGED_USER.value) + + return user + + setattr(cls, '_original_update_user', cls.update_user) + setattr(cls, "update_user", _cache_update_user) + + def _cache_get_first_active_user(self, *args, **kwargs): + logged_user_list = StCache.get_all(CacheKey.LOGGED_USER.value) + if logged_user_list and len(logged_user_list): + return logged_user_list[0] + + original_func = getattr(cls, '_original_get_first_active_user') + user = original_func(self, *args, **kwargs) + StCache.delete_all(CacheKey.LOGGED_USER.value) + StCache.add(user, CacheKey.LOGGED_USER.value) + + return user + + setattr(cls, '_original_get_first_active_user', cls.get_first_active_user) + setattr(cls, "get_first_active_user", _cache_get_first_active_user) + + def _cache_create_user(self, **kwargs): + original_func = getattr(cls, '_original_create_user') + user = original_func(self, **kwargs) + StCache.update(user, CacheKey.LOGGED_USER.value) + + return user + + setattr(cls, '_original_create_user', cls.create_user) + setattr(cls, "create_user", _cache_create_user) + + def _cache_google_user_login(self, **kwargs): + original_func = getattr(cls, '_original_google_user_login') + user, token, refresh_token = original_func(self, **kwargs) + StCache.delete_all(CacheKey.LOGGED_USER.value) + StCache.add(user, CacheKey.LOGGED_USER.value) + + return user, token, refresh_token + + setattr(cls, '_original_google_user_login', cls.google_user_login) + setattr(cls, "google_user_login", _cache_google_user_login) + return cls \ No newline at end of file diff --git a/utils/common_utils.py b/utils/common_utils.py index 9defa7b8..fc586bdc 100644 --- a/utils/common_utils.py +++ b/utils/common_utils.py @@ -6,7 +6,6 @@ from shared.constants import SERVER, ServerType from ui_components.models import InternalUserObject from utils.cache.cache import StCache -from utils.constants import LOGGED_USER from utils.data_repo.data_repo import DataRepo def copy_sample_assets(project_uuid): @@ -62,24 +61,20 @@ def create_working_assets(project_uuid): def truncate_decimal(num: float, n: int = 2) -> float: return int(num * 10 ** n) / 10 ** n -# fresh_fetch - bypasses the cache -def get_current_user(fresh_fetch=False) -> InternalUserObject: - # changing the code to operate on streamlit state rather than local file - if not LOGGED_USER in st.session_state or fresh_fetch: - data_repo = DataRepo() - user = data_repo.get_first_active_user() - st.session_state[LOGGED_USER] = user.to_json() if user else None - - return json.loads(st.session_state[LOGGED_USER]) if LOGGED_USER in st.session_state else None + +def get_current_user() -> InternalUserObject: + data_repo = DataRepo() + user = data_repo.get_first_active_user() + return user def user_credits_available(): - current_user = get_current_user(fresh_fetch=True) + current_user = get_current_user() return True if (current_user and current_user['total_credits'] > 0) else False def get_current_user_uuid(): current_user = get_current_user() - if current_user and 'uuid' in current_user: - return current_user['uuid'] + if current_user: + return current_user.uuid else: return None diff --git a/utils/constants.py b/utils/constants.py index 960ec8e4..e05ce946 100644 --- a/utils/constants.py +++ b/utils/constants.py @@ -6,7 +6,6 @@ import streamlit as st -LOGGED_USER = 'logged_user' AUTH_TOKEN = 'auth_details' class ImageStage(ExtendedEnum): diff --git a/utils/data_repo/api_repo.py b/utils/data_repo/api_repo.py index b9367578..49648440 100644 --- a/utils/data_repo/api_repo.py +++ b/utils/data_repo/api_repo.py @@ -8,7 +8,7 @@ from shared.constants import SERVER, InternalFileType, InternalResponse, ServerType from utils.common_decorators import log_time -from utils.constants import AUTH_TOKEN, AUTH_TOKEN, LOGGED_USER +from utils.constants import AUTH_TOKEN, AUTH_TOKEN from utils.local_storage.url_storage import delete_url_param, get_url_param diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 21ff2d1c..66bf2f70 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -354,7 +354,7 @@ def restore_backup(self, uuid): # update user credits - updates the credit of the user calling the API def update_usage_credits(self, credits_to_add): user = self.update_user(user_id=None, credits_to_add=credits_to_add) - return True if user else None + return user def generate_payment_link(self, amount): res = self.db_repo.generate_payment_link(amount) diff --git a/utils/local_storage/local_storage.py b/utils/local_storage/local_storage.py index 3353bed4..2d13ced1 100644 --- a/utils/local_storage/local_storage.py +++ b/utils/local_storage/local_storage.py @@ -1,10 +1,5 @@ import json import os -import streamlit as st -from shared.logging.constants import LoggingType - -from shared.logging.logging import AppLogger -from utils.constants import LOGGED_USER def is_file_present(filename): script_directory = os.path.dirname(os.path.abspath(__file__)) From 93fa520ff61169049792b939d967d583ea5c78f0 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Thu, 5 Oct 2023 22:22:39 +0530 Subject: [PATCH 047/164] wip: added AD endpoint --- ui_components/methods/common_methods.py | 25 ++++++------ .../widgets/animation_style_element.py | 38 +++++++++++++++---- ui_components/widgets/styling_element.py | 3 +- utils/cache/cache_methods.py | 2 +- utils/common_utils.py | 2 +- utils/media_processor/interpolator.py | 22 ++++++++--- utils/ml_processor/motion_module.py | 5 +-- utils/ml_processor/replicate/constants.py | 1 + utils/ml_processor/replicate/replicate.py | 33 ++++++++++------ 9 files changed, 87 insertions(+), 44 deletions(-) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index fe700ae2..ef15b954 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -188,19 +188,20 @@ def clone_styling_settings(source_frame_number, target_frame_uuid): primary_image = data_repo.get_file_from_uuid(timing_details[source_frame_number].primary_image.uuid) params = primary_image.inference_params - target_timing.prompt = params.prompt - target_timing.negative_prompt = params.negative_prompt - target_timing.guidance_scale = params.guidance_scale - target_timing.seed = params.seed - target_timing.num_inference_steps = params.num_inference_steps - target_timing.strength = params.strength - target_timing.adapter_type = params.adapter_type - target_timing.low_threshold = params.low_threshold - target_timing.high_threshold = params.high_threshold + if params: + target_timing.prompt = params.prompt + target_timing.negative_prompt = params.negative_prompt + target_timing.guidance_scale = params.guidance_scale + target_timing.seed = params.seed + target_timing.num_inference_steps = params.num_inference_steps + target_timing.strength = params.strength + target_timing.adapter_type = params.adapter_type + target_timing.low_threshold = params.low_threshold + target_timing.high_threshold = params.high_threshold - if params.model_uuid: - model = data_repo.get_ai_model_from_uuid(params.model_uuid) - target_timing.model = model + if params.model_uuid: + model = data_repo.get_ai_model_from_uuid(params.model_uuid) + target_timing.model = model # TODO: image format is assumed to be PNG, change this later def save_new_image(img: Union[Image.Image, str, np.ndarray, io.BytesIO], project_uuid) -> InternalFileObject: diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 0890fae9..adb1b863 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -16,35 +16,57 @@ def animation_style_element(timing_uuid, project_uuid): if current_animation_style == AnimationStyleType.INTERPOLATION.value: animation_tool = st.radio("Animation Tool:", options=AnimationToolType.value_list(), key="animation_tool", horizontal=True) - video_resolution = st.radio("Video Resolution:", options=["Preview Resolution", "Full Resolution"], key="video_resolution", horizontal=True) + video_resolution = None settings = { "animation_tool": animation_tool } timing.animation_tool = animation_tool if animation_tool == AnimationToolType.ANIMATEDIFF.value: - motion_module = st.selectbox("Which motion module would you like to use?", options=motion_modules, key="motion_module") + c1, c2 = st.columns([1,1]) + with c1: + motion_module = st.selectbox("Which motion module would you like to use?", options=motion_modules, key="motion_module") + with c2: + sd_model_list = [ + "Realistic_Vision_V5.0.safetensors", + "Counterfeit-V3.0_fp32.safetensors", + "epic_realism.safetensors", + "dreamshaper_v8.safetensors", + "deliberate_v3.safetensors" + ] + sd_model = st.selectbox("Which Stable Diffusion model would you like to use?", options=sd_model_list, key="sd_model") + prompt_column_1, prompt_column_2 = st.columns([1, 1]) with prompt_column_1: - starting_prompt = st.text_area("Starting Prompt:", value=project_settings.default_prompt, key="starting_prompt") + positive_prompt = st.text_area("Positive Prompt:", value=project_settings.default_prompt, key="positive_prompt") with prompt_column_2: - ending_prompt = st.text_area("Ending Prompt:", value=project_settings.default_prompt, key="ending_prompt") + negative_prompt = st.text_area("Negative Prompt:", value=project_settings.default_prompt, key="negative_prompt") - animate_col_1, animate_col_2 = st.columns([1, 3]) + animate_col_1, animate_col_2, _ = st.columns([1, 1, 2]) with animate_col_1: + img_dimension_list = ["512x512", "512x768", "768x512"] + img_dimension = st.selectbox("Image Dimension:", options=img_dimension_list, key="img_dimension") + with animate_col_2: variant_count = st.number_input("How many variants?", min_value=1, max_value=100, value=1, step=1, key="variant_count") normalise_speed = st.checkbox("Normalise Speed", value=True, key="normalise_speed") settings.update( - motion_module=AnimateDiffCheckpoint.get_model_from_name(motion_module), - starting_prompt=starting_prompt, - ending_prompt=ending_prompt, + positive_prompt=positive_prompt, + negative_prompt=negative_prompt, + image_dimension=img_dimension, + sampling_steps=30, + motion_module=motion_module, + model=sd_model, normalise_speed=normalise_speed ) + + elif animation_tool == AnimationToolType.G_FILM.value: + video_resolution = st.selectbox("Video Resolution:", options=["Full Resolution", "Preview"], key="video_resolution") + elif current_animation_style == AnimationStyleType.IMAGE_TO_VIDEO.value: st.info("For image to video, you can select one or more prompts, and how many frames you want to generate for each prompt - it'll attempt to travel from one prompt to the next.") diff --git a/ui_components/widgets/styling_element.py b/ui_components/widgets/styling_element.py index 3ee6452b..74bd556e 100644 --- a/ui_components/widgets/styling_element.py +++ b/ui_components/widgets/styling_element.py @@ -14,8 +14,7 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - timing.project.uuid) + timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project(timing.project.uuid) project_settings: InternalSettingObject = data_repo.get_project_setting(timing.project.uuid) # -------------------- Transfomation Stage -------------------- # diff --git a/utils/cache/cache_methods.py b/utils/cache/cache_methods.py index be6bcede..81456c1f 100644 --- a/utils/cache/cache_methods.py +++ b/utils/cache/cache_methods.py @@ -381,7 +381,7 @@ def _cache_bulk_update_project_setting(self, *args, **kwargs): def _cache_update_user(self, *args, **kwargs): original_func = getattr(cls, '_original_update_user') user = original_func(self, *args, **kwargs) - StCache.delete_all(user, CacheKey.LOGGED_USER.value) + StCache.delete_all(CacheKey.LOGGED_USER.value) return user diff --git a/utils/common_utils.py b/utils/common_utils.py index fc586bdc..e223d8fd 100644 --- a/utils/common_utils.py +++ b/utils/common_utils.py @@ -69,7 +69,7 @@ def get_current_user() -> InternalUserObject: def user_credits_available(): current_user = get_current_user() - return True if (current_user and current_user['total_credits'] > 0) else False + return True if (current_user and current_user.total_credits > 0) else False def get_current_user_uuid(): current_user = get_current_user() diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index 7bbf0db8..e5b331b7 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -69,14 +69,24 @@ def video_through_frame_interpolation(img_location_list, settings, variant_count ml_client = get_ml_client() animation_tool = settings['animation_tool'] if 'animation_tool' in settings else AnimationToolType.G_FILM.value - # if animation_tool == AnimationToolType.G_FILM.value: - if True: + if animation_tool == AnimationToolType.G_FILM.value: res = ml_client.predict_model_output_async(REPLICATE_MODEL.google_frame_interpolation, frame1=img1, frame2=img2, times_to_interpolate=settings['interpolation_steps'], variant_count=variant_count) - # else: - # # TODO: integrate the AD interpolation API here - # output, log = ml_client.predict_model_output(REPLICATE_MODEL.google_frame_interpolation, frame1=img1, frame2=img2, - # times_to_interpolate=settings['interpolation_steps']) + + # since workflows can have multiple input params it's not standardized yet + elif animation_tool == AnimationToolType.ANIMATEDIFF.value: + data = { + "positive_prompt": settings['positive_prompt'], + "negative_prompt": settings['negative_prompt'], + "image_dimension": settings['image_dimension'], + "starting_image_path": img1, + "ending_image_path": img2, + "sampling_steps": settings['sampling_steps'], + "motion_module": settings['motion_module'], + "model": settings['model'], + } + + res = ml_client.predict_model_output_async(REPLICATE_MODEL.ad_interpolation, **data) final_res = [] for (output, log) in res: diff --git a/utils/ml_processor/motion_module.py b/utils/ml_processor/motion_module.py index 48c1455e..e545be11 100644 --- a/utils/ml_processor/motion_module.py +++ b/utils/ml_processor/motion_module.py @@ -7,9 +7,8 @@ class MotionModuleCheckpoint: # make sure to have unique names (streamlit limitation) class AnimateDiffCheckpoint: - mm_v15_v2 = MotionModuleCheckpoint(name="mm-v15-v2") - ad_stabilized_motion = MotionModuleCheckpoint(name="AD_Stabilized_Motion") - temporal_diff = MotionModuleCheckpoint(name="TemporalDiff") + mm_v15 = MotionModuleCheckpoint(name="mm_sd_v15.ckpt") + mm_v14 = MotionModuleCheckpoint(name="mm_sd_v14.ckpt") @staticmethod def get_name_list(): diff --git a/utils/ml_processor/replicate/constants.py b/utils/ml_processor/replicate/constants.py index ad7e653c..78c4fe90 100644 --- a/utils/ml_processor/replicate/constants.py +++ b/utils/ml_processor/replicate/constants.py @@ -43,6 +43,7 @@ class REPLICATE_MODEL: epicrealism_v5 = ReplicateModel("pagebrain/epicrealism-v5", "222465e57e4d9812207f14133c9499d47d706ecc41a8bf400120285b2f030b42") sdxl_controlnet = ReplicateModel("lucataco/sdxl-controlnet", "db2ffdbdc7f6cb4d6dab512434679ee3366ae7ab84f89750f8947d5594b79a47") realistic_vision_v5_img2img = ReplicateModel("lucataco/realistic-vision-v5-img2img", "82bbb4595458d6be142450fc6d8c4d79c936b92bd184dd2d6dd71d0796159819") + ad_interpolation = ReplicateModel("piyushk52/ad_interpolation", "4a478c659d96673b81992b866f1072fc62f297b7ad9945632cda027a6a07c624") @staticmethod def get_model_by_db_obj(model_db_obj): diff --git a/utils/ml_processor/replicate/replicate.py b/utils/ml_processor/replicate/replicate.py index e8aa4858..c33f194b 100644 --- a/utils/ml_processor/replicate/replicate.py +++ b/utils/ml_processor/replicate/replicate.py @@ -59,15 +59,21 @@ def predict_model_output_standardized(self, model: ReplicateModel, query_obj: ML return self.predict_model_output(model, **params) @check_user_credits - def predict_model_output(self, model: ReplicateModel, **kwargs): - model_version = self.get_model(model) + def predict_model_output(self, replicate_model: ReplicateModel, **kwargs): + model_version = self.get_model(replicate_model) start_time = time.time() output = model_version.predict(**kwargs) end_time = time.time() - log = log_model_inference(model, end_time - start_time, **kwargs) + + # hackish fix for now, will update replicate model later + if 'model' in kwargs: + kwargs['inf_model'] = kwargs['model'] + del kwargs['model'] + + log = log_model_inference(replicate_model, end_time - start_time, **kwargs) self._update_usage_credits(end_time - start_time) - if model == REPLICATE_MODEL.clip_interrogator: + if replicate_model == REPLICATE_MODEL.clip_interrogator: output = output # adding this for organisation purpose else: output = [output[-1]] @@ -75,24 +81,29 @@ def predict_model_output(self, model: ReplicateModel, **kwargs): return output, log @check_user_credits - def predict_model_output_async(self, model: ReplicateModel, **kwargs): - res = asyncio.run(self._multi_async_prediction(model, **kwargs)) + def predict_model_output_async(self, replicate_model: ReplicateModel, **kwargs): + res = asyncio.run(self._multi_async_prediction(replicate_model, **kwargs)) output_list = [] for (output, time_taken) in res: - log = log_model_inference(model, time_taken, **kwargs) + # hackish fix for now, will update replicate model later + if 'model' in kwargs: + kwargs['inf_model'] = kwargs['model'] + del kwargs['model'] + + log = log_model_inference(replicate_model, time_taken, **kwargs) self._update_usage_credits(time_taken) output_list.append((output, log)) return output_list - async def _multi_async_prediction(self, model: ReplicateModel, **kwargs): + async def _multi_async_prediction(self, replicate_model: ReplicateModel, **kwargs): variant_count = kwargs['variant_count'] if ('variant_count' in kwargs and kwargs['variant_count']) else 1 - res = await asyncio.gather(*[self._async_model_prediction(model, **kwargs) for _ in range(variant_count)]) + res = await asyncio.gather(*[self._async_model_prediction(replicate_model, **kwargs) for _ in range(variant_count)]) return res - async def _async_model_prediction(self, model: ReplicateModel, **kwargs): - model_version = self.get_model(model) + async def _async_model_prediction(self, replicate_model: ReplicateModel, **kwargs): + model_version = self.get_model(replicate_model) start_time = time.time() output = await asyncio.to_thread(model_version.predict, **kwargs) end_time = time.time() From 112e84db0ff5d0098ae4af5e56e016246f7bfbcf Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Fri, 6 Oct 2023 14:33:14 +0530 Subject: [PATCH 048/164] minor ext fix --- utils/media_processor/video.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/media_processor/video.py b/utils/media_processor/video.py index 4ff20e68..f1c9e2a9 100644 --- a/utils/media_processor/video.py +++ b/utils/media_processor/video.py @@ -15,7 +15,7 @@ def update_video_speed(video_location, animation_style, desired_duration): @staticmethod def update_video_bytes_speed(video_bytes, animation_style, desired_duration): # video_io = BytesIO(video_bytes) - temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png", mode='wb') + temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4", mode='wb') with open(temp_file.name, 'wb') as out_file: out_file.write(video_bytes) From 417fc63529635b28cc02a6b256879d796adfd142 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Fri, 6 Oct 2023 16:29:27 +0530 Subject: [PATCH 049/164] frame deletion fixed --- ui_components/methods/common_methods.py | 12 ++++-------- ui_components/widgets/frame_selector.py | 4 +++- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index ef15b954..27b71ece 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -674,8 +674,7 @@ def delete_frame_button(timing_uuid): def delete_frame(timing_uuid): data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) next_timing = data_repo.get_next_timing(timing_uuid) timing_details = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) @@ -683,18 +682,15 @@ def delete_frame(timing_uuid): data_repo.update_specific_timing( next_timing.uuid, interpolated_clip_list=None, - preview_video_id=None + preview_video_id=None, + timed_clip_id=None ) - # If the deleted frame is the first one, set the time of the next frame to 0.00 - if timing.aux_frame_index == 0 and next_timing: - data_repo.update_specific_timing( - next_timing.uuid, frame_time=0.00) - data_repo.delete_timing_from_uuid(timing.uuid) if timing.aux_frame_index == len(timing_details) - 1: st.session_state['current_frame_index'] = max(1, st.session_state['current_frame_index'] - 1) + st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid def replace_image_widget(timing_uuid, stage): diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index cc199c1a..cb42294f 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -20,7 +20,9 @@ def frame_selector_widget(): # st.write(st.session_state['prev_frame_index']) # st.write(st.session_state['current_frame_index']) - st.session_state['current_frame_index'] = st.number_input(f"Key frame # (out of {len(timing_details)})", 1, len(timing_details), value=st.session_state['prev_frame_index'], step=1, key="which_image_selector") + st.session_state['current_frame_index'] = st.number_input(f"Key frame # (out of {len(timing_details)})", 1, + len(timing_details), value=st.session_state['prev_frame_index'], + step=1, key="which_image_selector") st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid From 3b3a8c1f6f15a377313cef18b409b442007c687a Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Fri, 6 Oct 2023 16:39:43 +0530 Subject: [PATCH 050/164] minor text changes --- .../widgets/add_key_frame_element.py | 43 ++++++++++++------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index 885fbd67..4a6383f0 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -15,8 +15,6 @@ def add_key_frame_element(timing_details, project_uuid): data_repo = DataRepo() timing_details = data_repo.get_timing_list_from_project(project_uuid) - project_settings = data_repo.get_project_setting(project_uuid) - add1, add2 = st.columns(2) with add1: @@ -25,22 +23,35 @@ def add_key_frame_element(timing_details, project_uuid): image1,image2 = st.columns(2) with image1: source_of_starting_image = st.radio("Where would you like to get the starting image from?", [ - "Previous frame", "Uploaded image"], key="source_of_starting_image") + "Existing Frame", "Uploaded image"], key="source_of_starting_image") - which_stage_for_starting_image = None - if source_of_starting_image == "Previous frame": + transformation_stage = None + if source_of_starting_image == "Existing Frame": with image2: - which_stage_for_starting_image = st.radio("Which stage would you like to use?", [ - ImageStage.MAIN_VARIANT.value, ImageStage.SOURCE_IMAGE.value], key="which_stage_for_starting_image", horizontal=True) - which_number_for_starting_image = st.number_input("Which frame would you like to use?", min_value=1, max_value= - max(1, len(timing_details)), value=st.session_state['current_frame_index'], step=1, key="which_number_for_starting_image") - if which_stage_for_starting_image == ImageStage.SOURCE_IMAGE.value: - if timing_details[which_number_for_starting_image - 1].source_image != "": - selected_image_location = timing_details[which_number_for_starting_image - 1].source_image.location + transformation_stage = st.radio( + "Which stage would you like to use?", + [ + ImageStage.MAIN_VARIANT.value, + ImageStage.SOURCE_IMAGE.value + ], + key="transformation_stage", + horizontal=True + ) + image_idx = st.number_input( + "Which frame would you like to use?", + min_value=1, + max_value=max(1, len(timing_details)), + value=st.session_state['current_frame_index'], + step=1, + key="image_idx" + ) + if transformation_stage == ImageStage.SOURCE_IMAGE.value: + if timing_details[image_idx - 1].source_image != "": + selected_image_location = timing_details[image_idx - 1].source_image.location else: selected_image_location = "" - elif which_stage_for_starting_image == ImageStage.MAIN_VARIANT.value: - selected_image_location = timing_details[which_number_for_starting_image - 1].primary_image_location + elif transformation_stage == ImageStage.MAIN_VARIANT.value: + selected_image_location = timing_details[image_idx - 1].primary_image_location elif source_of_starting_image == "Uploaded image": with image2: uploaded_image = st.file_uploader( @@ -53,7 +64,7 @@ def add_key_frame_element(timing_details, project_uuid): selected_image_location = selected_image_location or file_location else: selected_image_location = "" - which_number_for_starting_image = st.session_state['current_frame_index'] + image_idx = st.session_state['current_frame_index'] how_long_after = st.slider( @@ -83,4 +94,4 @@ def add_key_frame_element(timing_details, project_uuid): else: st.error("No Starting Image Found") - return selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image \ No newline at end of file + return selected_image, inherit_styling_settings, how_long_after, transformation_stage \ No newline at end of file From 5d8161e4366df055c80c48fea0c7496c2bf60797 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 7 Oct 2023 19:12:29 +0530 Subject: [PATCH 051/164] runner script added --- app.py | 33 +++++++++---- banodoco_runner.py | 58 +++++++++++++++++++++++ requirements.txt | 3 +- utils/common_utils.py | 15 +++++- utils/constants.py | 1 + utils/ml_processor/replicate/replicate.py | 16 ++++++- 6 files changed, 112 insertions(+), 14 deletions(-) create mode 100644 banodoco_runner.py diff --git a/app.py b/app.py index e13ee5f1..7fe9df48 100644 --- a/app.py +++ b/app.py @@ -1,15 +1,19 @@ -import webbrowser +import threading +import time import streamlit as st from moviepy.editor import * -import time +import subprocess import os import django from shared.constants import OFFLINE_MODE, SERVER, ServerType import sentry_sdk +from shared.logging.logging import AppLogger +from utils.common_utils import is_process_active -from utils.constants import AUTH_TOKEN +from utils.constants import AUTH_TOKEN, RUNNER_PROCESS_NAME from utils.local_storage.url_storage import delete_url_param, get_url_param, set_url_param from utils.third_party_auth.google.google_auth import get_google_auth_url +from streamlit_server_state import server_state_lock os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_settings") @@ -17,10 +21,8 @@ st.session_state['django_init'] = True from banodoco_settings import project_init -from ui_components.models import InternalAppSettingObject from utils.data_repo.data_repo import DataRepo - if OFFLINE_MODE: SENTRY_DSN = os.getenv('SENTRY_DSN', '') @@ -38,6 +40,21 @@ traces_sample_rate=0 ) +def start_runner(): + if SERVER != ServerType.DEVELOPMENT.value: + return + + with server_state_lock["runner"]: + app_logger = AppLogger() + + if not is_process_active(RUNNER_PROCESS_NAME): + app_logger.info("Starting runner") + _ = subprocess.Popen(["python", "banodoco_runner.py"]) + while not is_process_active(RUNNER_PROCESS_NAME): + time.sleep(0.1) + else: + app_logger.debug("Runner already running") + def main(): st.set_page_config(page_title="Banodoco", page_icon="🎨", layout="wide") @@ -65,13 +82,9 @@ def main(): delete_url_param(AUTH_TOKEN) st.error("please login again") else: - + start_runner() project_init() - data_repo = DataRepo() - app_settings: InternalAppSettingObject = data_repo.get_app_setting_from_uuid() - app_secret = data_repo.get_app_secrets_from_user_uuid() - from ui_components.setup import setup_app_ui setup_app_ui() diff --git a/banodoco_runner.py b/banodoco_runner.py new file mode 100644 index 00000000..c7a1c4e9 --- /dev/null +++ b/banodoco_runner.py @@ -0,0 +1,58 @@ +import os +import time +import requests +import setproctitle +from dotenv import load_dotenv +import django + +from utils.constants import RUNNER_PROCESS_NAME + + +load_dotenv() +setproctitle.setproctitle(RUNNER_PROCESS_NAME) + +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_settings") +django.setup() +SERVER = os.getenv('SERVER', 'development') + +def main(): + if SERVER != 'development': + return + + retries = 10000 + + print('runner running') + while True: + if not is_app_running(): + if retries <= 0: + print('runner stopped') + return + retries -= 1 + else: + retries = max(retries + 1, 3) + + time.sleep(1) + check_and_update_db() + +def is_app_running(): + url = 'http://localhost:5500/healthz' + + try: + response = requests.get(url) + if response.status_code == 200: + return True + else: + print(f"server not running") + return False + except requests.exceptions.RequestException as e: + print("server not running") + return False + +def check_and_update_db(): + from backend.models import AppSetting + + app_settings = AppSetting.objects.first() + print("user name in db: ", app_settings.user.name) + return + +main() \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 11dd3072..ed8d0394 100644 --- a/requirements.txt +++ b/requirements.txt @@ -33,4 +33,5 @@ watchdog==3.0.0 httpx-oauth==0.13.0 extra-streamlit-components==0.1.56 wrapt==1.15.0 -pydantic==1.10.9 \ No newline at end of file +pydantic==1.10.9 +streamlit-server-state==0.17.1 \ No newline at end of file diff --git a/utils/common_utils.py b/utils/common_utils.py index e223d8fd..0d1c7844 100644 --- a/utils/common_utils.py +++ b/utils/common_utils.py @@ -1,6 +1,8 @@ from pathlib import Path import os import csv +import subprocess +import psutil import streamlit as st import json from shared.constants import SERVER, ServerType @@ -163,4 +165,15 @@ def reset_styling_settings(timing_uuid): for k in keys_to_delete: if k in st.session_state: - del st.session_state[k] \ No newline at end of file + del st.session_state[k] + + +def is_process_active(custom_process_name): + try: + ps_output = subprocess.check_output(["ps", "aux"]).decode("utf-8") + if custom_process_name in ps_output: + return True + except subprocess.CalledProcessError: + return False + + return False \ No newline at end of file diff --git a/utils/constants.py b/utils/constants.py index e05ce946..9b5be43b 100644 --- a/utils/constants.py +++ b/utils/constants.py @@ -7,6 +7,7 @@ AUTH_TOKEN = 'auth_details' +RUNNER_PROCESS_NAME = 'banodoco_runner_SFX8T' class ImageStage(ExtendedEnum): SOURCE_IMAGE = 'Source Image' diff --git a/utils/ml_processor/replicate/replicate.py b/utils/ml_processor/replicate/replicate.py index c33f194b..516b8d2e 100644 --- a/utils/ml_processor/replicate/replicate.py +++ b/utils/ml_processor/replicate/replicate.py @@ -22,7 +22,7 @@ class ReplicateProcessor(MachineLearningProcessor): def __init__(self): data_repo = DataRepo() - self.app_settings = data_repo.get_app_secrets_from_user_uuid(uuid=get_current_user_uuid()) + self.app_settings = data_repo.get_app_secrets_from_user_uuid() self.logger = None try: @@ -238,4 +238,16 @@ def get_model_version_from_id(self, model_id): # version = (response.json()["version"]) version = (response.json())['results'][0]['id'] - return version \ No newline at end of file + return version + + @check_user_credits + def create_prediction(self, replicate_model: ReplicateModel, **kwargs): + model_version = self.get_model(replicate_model) + headers = { + "Authorization": "Token " + os.environ.get("REPLICATE_API_TOKEN"), + "Content-Type": "application/json" + } + + response = r.post(self.dreambooth_training_url, headers=headers, data=json.dumps(kwargs)) + response = (response.json()) + From 4e810a46a4a1ca98b3fc7cf9da6efdfea6300639 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 7 Oct 2023 19:12:35 +0530 Subject: [PATCH 052/164] runner script added --- banodoco_runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/banodoco_runner.py b/banodoco_runner.py index c7a1c4e9..2948db6d 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -19,7 +19,7 @@ def main(): if SERVER != 'development': return - retries = 10000 + retries = 3 print('runner running') while True: From 24da84805b8f7e1893ef6febe7066242e737b441 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 7 Oct 2023 21:39:00 +0530 Subject: [PATCH 053/164] minor restructuring --- banodoco_runner.py | 2 +- repository/__init__.py | 0 repository/backend_repo/__init__.py | 0 repository/local_repo/__init__.py | 0 repository/local_repo/csv_repo.py | 109 ------------------ .../59ca0052-6c8b-4eee-ae6b-7dbf51f5327c.jpeg | Bin 73512 -> 0 bytes ...ntitled design - 2023-05-12T110028.218.png | Bin 576537 -> 0 bytes ui_components/methods/common_methods.py | 6 - .../methods}/data_logger.py | 0 ui_components/methods/ml_methods.py | 1 - utils/common_utils.py | 1 + utils/ml_processor/replicate/replicate.py | 6 +- 12 files changed, 4 insertions(+), 121 deletions(-) delete mode 100644 repository/__init__.py delete mode 100644 repository/backend_repo/__init__.py delete mode 100644 repository/local_repo/__init__.py delete mode 100644 repository/local_repo/csv_repo.py delete mode 100644 temp/59ca0052-6c8b-4eee-ae6b-7dbf51f5327c.jpeg delete mode 100644 temp/Untitled design - 2023-05-12T110028.218.png rename {repository => ui_components/methods}/data_logger.py (100%) diff --git a/banodoco_runner.py b/banodoco_runner.py index 2948db6d..722f7286 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -29,7 +29,7 @@ def main(): return retries -= 1 else: - retries = max(retries + 1, 3) + retries = min(retries + 1, 3) time.sleep(1) check_and_update_db() diff --git a/repository/__init__.py b/repository/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/repository/backend_repo/__init__.py b/repository/backend_repo/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/repository/local_repo/__init__.py b/repository/local_repo/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/repository/local_repo/csv_repo.py b/repository/local_repo/csv_repo.py deleted file mode 100644 index e973c947..00000000 --- a/repository/local_repo/csv_repo.py +++ /dev/null @@ -1,109 +0,0 @@ -import pandas as pd -import csv - -from utils.common_utils import create_file_path - -class CSVProcessor: - def __init__(self, file_path): - self.file_path = file_path - create_file_path(self.file_path) - - # returns the entire csv file data in json format - def get_all_json_data(self, na_filter=False): - res = {} - data = pd.read_csv(self.file_path, na_filter=na_filter) - - for i, row in data.iterrows(): - res[row['key']] = row['value'] - return res - - def get_df_data(self): - df = pd.read_csv(self.file_path, na_filter=False) - return df - - # updates a single key value pair - def update_csv_data(self, key, value): - with open(self.file_path, 'r') as csv_file: - csv_reader = csv.reader(csv_file) - - rows = [] - for row in csv_reader: - if row[0] == key: - row_number = csv_reader.line_num - 2 - new_value = value - - df = pd.read_csv(self.file_path) - df.iat[row_number, 1] = new_value - df.to_csv(self.file_path, index=False) - - # clear the entire csv file - def clear_all_data(self): - df = pd.read_csv(self.file_path) - df = df.drop(df.index[0:]) - df.to_csv(self.file_path, index=False) - - def delete_row(self, idx): - df = pd.read_csv(self.file_path) - df = df.drop([int(idx)]) - df.to_csv(self.file_path, index=False) - - def add_row(self, new_row): - df = pd.read_csv(self.file_path) - df = df.append(new_row, ignore_index=True) - df.to_csv(self.file_path, index=False) - - # TODO: create a separate interface for this later - def update_specific_timing_value(self, index_of_current_item, parameter, value): - - df = pd.read_csv(self.file_path) - - try: - col_index = df.columns.get_loc(parameter) - except KeyError: - raise ValueError(f"Invalid parameter: {parameter}") - - df.iloc[index_of_current_item, col_index] = value - - - numeric_cols = ["primary_image", "seed", "num_inference_steps","interpolation_steps"] - - for col in numeric_cols: - df[col] = pd.to_numeric(df[col], downcast="integer", errors="coerce") - df[col].fillna(0, inplace=True) - df[col] = df[col].astype(int) - - df.to_csv(self.file_path, index=False) - - -# def get_project_settings(project_name): -# csv_client = CSVProcessor(f'videos/{project_name}/settings.csv') -# return csv_client.get_all_json_data() - -# def update_project_setting(key, value, project_name): -# if isinstance(value, str): -# value = value.strip() -# print("striped newline: ", value) - -# print("setting ", key, " value to: ", value) -# csv_client = CSVProcessor(f'videos/{project_name}/settings.csv') -# csv_client.update_csv_data(key, value) - -# def remove_existing_timing(project_name): -# csv_client = CSVProcessor("videos/" + str(project_name) + "/timings.csv") -# csv_client.clear_all_data() - -# def get_app_settings(): -# csv_client = CSVProcessor("app_settings.csv") -# return csv_client.get_all_json_data() - -# def update_app_settings(key, value): -# csv_client = CSVProcessor("app_settings.csv") -# csv_client.update_csv_data(key, value) - -# def update_specific_timing_value(project_name, index_of_current_item, parameter, value): -# csv_client = CSVProcessor(f"videos/{project_name}/timings.csv") -# csv_client.update_specific_timing_value(index_of_current_item, parameter, value) - -# def log_inference_data_in_csv(data): -# csv_client = CSVProcessor('inference_log/log.csv') -# csv_client.add_row(data) \ No newline at end of file diff --git a/temp/59ca0052-6c8b-4eee-ae6b-7dbf51f5327c.jpeg b/temp/59ca0052-6c8b-4eee-ae6b-7dbf51f5327c.jpeg deleted file mode 100644 index 9bab9d75edd653c1097f2638f82ef374a9457451..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 73512 zcmbT7cT`is+vh{?oglqSN2y9EA|O?I2_0!t5_%_y2#B-@1O%jmbV5K%=paZ59ciH> zy%*_7(e?Mfdv?#+zjpVzbMHShbMNyxGjpCh-}B7h*}p3QYArQQH2@9{0D$xF0{mS7 zr~vSAasN~P8UDXQKukb@k552KL_|nTPD)NrMoLCTK}kzZK}kbNMn=s*O+!Zy1Omya z9xyV{Gt$xn>HqT*9K3&h@Cir=2uSED$SCOlpY3lafQA?sfZK_O!wtZt!NH@!`P&2F z_~$19&VL!;e;N+%KOYH+h)GDv{xv{T18{Nh@Nn_*{^Rw()}jCE0r)fov|Qp!gmeaW zMBLu=65)sfVjkt1E}-G~FJ4J|p9m6Ch6jvH%n$ka1q6kpq@O&M0m-VoP*qdc(9|+A zHUXQOnOitGzIJkUafSH$`3D3B1;ZlWM}3HniH%E6`IMTL{y8J_OJPxQNoiSmMQvR@ zx}mYDx#fFzPj4Tle_(K8a%vhoGdnlGy0*TtxwXBsyLWPWc7Abrb^ZJ1KU_Efy#L1f z_xx|L|AUL>9~Uk@J{~^Nf4FdP1OMH4H24Hu;)JwH21It=blei*#PrIDf|@Q89!bMr zKzpBYQU+eB)rTklLHjSV|2wdV|1Yxt1@^zW<^kk*IR6F@j|QL!xT_NnpW#j_p|Z%v z4c&P??;|#3&5HCb_(F~=2>;AJE%|61(SKF4&VzbsIv$HmJ<$Ze9iFHaaXA|4)Nkg! zA&oP$c;M(SFz96xj?YnSe=ZK`@v~1a7Ti?DWr?e^dwqG^v~u<(G8mH~>8^T{S)CAC zuC5ynp1qQlRdnJk`y{oL(}>gaEY_SRCTU;dIkt!GQu z{whT;TY5c{Al%;+uMicD2tJ!XT$d9;W;cdf-~&rONFgc`mHTuCb(Xd zgds-D0K28m(ju-7ece!qC%;dE_m}(3 zLd`P80Q^K0wpX zImsKv!Y;d#B}>SOaA?PodAGXwVU71`++i_r3grj>3wY-nYcGk2V;}2SOthGU686XLP1z&C9R`%3C*BaYDacmIYp!`(ek_bo}V8PrcNRJXT_E=d%OYLNo zuqK3n{cCa6fs-x`yc#5|m(nQ;q^^T~M~G6g_!zt24iXYoTRRfo){P1B#JQx?1z#=u ztIZ3{UzReCK{{*E-w8b;(GV#eKNXE*9t6B*(R(4YDNBpi(6A%NAUSR6t-g-trC0XJ z2qZMcrBD2#Y@?BP9Y2KBFhKB8uT+ffigV;E<)Fsz)UV!fc3?NZ)lH#6$@4+WQ}3k zyVJg~HY$ZbsoSPypT1PPhZ&WKr9YN<<|-T^yFFi!O#We;4+R{AGdobFaOA+2OXaa! zUJb;$08Be1$hV96Tl_Kmr23@uR27gs5!f_fjQ21xe`Sh-(QN~q8;|;V4RcP{Ys>93 zcMzBB+rca9^jTN>$Xszr0xY{#^lrV}?x-EGig>zXD*n#j&Vzk^23u1!G3w z(pMHA9RM^l-1U~9Kc5aXMt~|_L}0k%qR5daPQVeKVXC-YOd4$yI$B>Nhii zY*CBS=cM8L;U${~tA;yE!#$^7k!I&AO=`u*JYjyJ z?PX1q4wtuYrSb#*e7e}))g{^0k$maujpR^PyiNn@th#{i+etg@zwVORw$6m<<_ryX}(hoD+nFVA_cO=Y-ImP3;8GnZ{_a( zSo^GBwpRBtHTxq%;*-5>TnyaT=o_cwnSM&7`H(kNV@u|Q%T@&VgV}PDMm>>fsuN>&0vz2A7H5n zjir_DcVB{q8_9YNE6AF-Z@&$Jksh#MXhwdmJtobe?9@*0JHEAjd}GALu^0R22&yr) zgZQ*I%I*|k7Y;u$*=V3bz?mmc!|mINPzvru`Z&*0ksq%>ui2x}u*(BZvR2AFruo&UsW-%#Pt7?z-ANaSiIA(BL zO7!p^JDqn^6eov)wU;YOS9Z#?Qcv=wM07q6&}W(Zc|WGzFZ~%yvSP~zWSC>Uk8^e+ zWMY6dF1tbS-!EMPx*Rw%XsPLzGWc~MlGl}sc*C!M2cl6Kmyl{Fl93e-zwA|k%^pO_ z@d%=1H|Pviol5aM^xUwE7UoX3ZE@2zPrN#mv1)B5{S9Vvh-3;9Hb&)G6 zP?Mz1=~V(@O?WQXVxB#dczVZ0_EvJ2s==DtrE>( zn=59bq=Q?Tl|L0184Ov>4)BzjEagpfME|;s>yW(9sb3Hkaw+Vn%iXiR`wJ+#6zI6G zS#D;->zsdq!#hh)oni?H|f$BSE<2cS-a&YPK7OU8;h zWznJ||GxLjjWwseBk|oL2r;3P8n7w81Y$O!FW-d}ev$~gndu25t5;;{M0tH+Ielh1 zOUZF^@bhEP4E!zbbz*mi8`hfw(i{BPO);o&N%4F8;xpOQQ1-++8KFrp=hXM04R2lX zWXXh3q3#|B#1{lJ=s`r4nEDhm>;-YBe8TiDYk*`CJb6wr3d~TD$fn~y;M|J9<}+kj z(RJ-w)v2pKbti8Y!7Y=yg9z5Ia&Me00DnB}Fy0_HvZ2iPJMuOcK(a`AKZOOy4zQAr ziT`%bc4~cjOFO8h$h=i8DqpTw3JU-^p=#LvRL;oi6erH192hne^H_sY&+fO5<2pZQ z`*i|~r7YDT@EEi6jT4<0+n*3>DnA!wDiQ*+BQ07wQv8dSSVHuLcGocE9U90!qtC>{rXxPbRgV1 zc`kUTRAK3g4JW^P#FIGU6`!)Fr?rJfvFivMG}V6=NFyQl{CdR1%F2E*W@O^^a(PqF zhzr&DXmfwpBrZEi6)M80;CrZ@I~lw}W2F_~>{jNB>}9glD8_)KJzg>xAWSkPqVfGJ z47JyD1iSt7Kd-a74ow-UTX?NRW_}4J6F*BaQl&}#)s~`j*fK?_>e$`~B2Sh=^F zjo}z1(wD>1Eo$Kgjwv8Ndf~2vj@w z_jw>s`YCI6w8sT>W7Imao=s}q7FboLuEN*1&T42BgPWD8nGg7pgIA_hsHQN+qg^TCV%1+!0SwXvLogQ6%7u z869f3q8Vmrdun;RSa3BGOxk;26uL82OjX6FSW`uD{@9obTMHFEc*1U!gC(x!yJL$a zf8OVvVip(utYg{-I4UDklz-r^V`&M{%Yq+{v79CBQamYUUoH@uT^b; z9B1`c-XNZ-3ncY(HqEhN*|=n3&#c5uRIeQxBX7W5F6pdtVglvqLC$Y{=-iFYBX5HzrBJylWe;bok^HW-jrloD>q z5a3+S3wKJEvjPF&lZ9%DYP3#V_P3ez3%8&#$wkAs;x!9gx0XM+C-zAMVE<(br?-Idj?7mk0rK|Ub ze*tPO=JmJb2S*X9w4IwQu8cehJ;q53wX{r{q62)D>Wn-vXsoZlpeYxSvtRSyGNBMVgr8WPLF+581I}n_eLCAOU~u$QmyL~$9Oy_IJTyu zuPiKiA8Jw#f>tkGv9@oelW8VCu66@UABD5~R1k)+qnBCqCs@g0TS&IYqrBW6-V?QH zeEZ(FR6L`6KZbR(X=Sn1QEdVpTr+Ox+a2gh+SSXfp}qxrmFbwOgl!!%E=ma)&8~h` z79;;AOQ>+h+We1$(#Wluf>geD4e2T0A0~LSe4@n+KBJUS`1lH4w-7~^VnH*PAl`Wc z{)qa_C^K;2Vp42~dG+-pud1`dRiJI=T>Zr93S#OUndy{1n3KWR5>LVU;%OsZd4E+C zQq+9tX<&i!kjA5og@%$Qbljplle0n;WJIW!j#`<&d=JVUd^Zt*OaBKwVDum?Ag$xI za8G4id2kkNObL6vI;#*w_=&1Kr zx)j>^l|tcQy}I=(RE~g3q_AWiyBb6DiQ)lo^U z9&hEw01s=8g60J(o&Qm5wm)dGx)^oV+;9EdU<`!UR5`akA9N?2-x?1kPpk%}dlLsR z?(ZwdrD0f$Cd56oQ5YNp_gWMCrT}Co*hA@<(Ml>XM2)oeTaZsA#aH>xGhiX z05otDmMoR}4R3G-QH^ExZ6wWNd#JD36+)`&C8Zv0*F!WQG65>&(i&KvswOjTnIY_= z)W4O*tDc*2Yz<8PBq=OXX`1*c(E?Sr>%CxRG1}<;QlvMsGh>OGjVu(3|GR2*2B`CJ zu`B3u+VybyEN)!xMV=m}Pw&%xM)VWmJ2SclQ$*fEs7H=H`#K^0T zc%rzX2W&T{#x~KBVD?du-WYmTyybSP@30_OTV9XfzBZw55=zxJzf?=^pD>+t0-R5f zmN~O7XKv6X%tr55xZ_sac3(zFHeRRnu0`4XO+=vYa@%(Acv#Uu!s1qy@9a^IqJLqc z_Xmu&vE9FQWeb}u!f);`R>KTU=Au6p-LMcT2hX&3B}=*^)uA++Taz-ZsEP@@rBzrBnS zTgqMZFOFkxKN)d%_hXgjImPpa;}}SP8-WMzo2KE;DiF#U*~lkQN$HA1wzUIm%sKmr zV6DK~74Jlt866Z4j(sm)+5G#tLxQY*&4keQ8}1AX`uWgrIo83st2CD6bWtnh;Ke^= z(}#XC(&hE7pU=x@i%d~8G;);rPnDs9T%ZkGr?2p-3*F17o8CX-grBAmn3lqVqY7Vr zW}Dv2BYYFQdK}#nb-&fT`dMCkVnLIh;feyy6Tr9_im^Dm2fzNeL4Wblez7;=vFgCC z(dc5f+iP7DYjdUV=4JAM zxJtACN}$_#tRT!eajxeL_}BfPvKb2UfRRmtII#oE>2`Y4-0S1-BrC@>%e&2*FKB~b zPab6lvQIzm9*b+adS~05__8%5sOo`R4H8JHaAMAkZS-AJP_}ISViC;Z-MQ9bm_zQ< zH(G4IoHZAwaht86Wl;Ls`Aco1O=GCX>v-?B!`q@OBpBN(j>v`dqEmt^G%yqO4qrh7 zE87Lt`NgkN=eP4>yKYrSzbFAq99c|3s3jitv^5irN#EQ~qk}^{PG0KLQfw;+Aw(j` z*P^~7ju)}&wW8hnX?+T2g6+{uJ>46l+Z&XaDrf(6sSPX?xZd{a0#4^q@0oURpvOJ< z&-`vpB~)net{gAAs4;SS5V(gH7WR-lnVf3X;+uRot=`kY zLPXy7MV0-Ut2D{nD3~*-3X+^^eaR+0)bSU9?RoHkB9O-|V z{YonfN=`A$o*Jp?=5OBrnZRBKZ{LF1H9<3(Z8gx_d=0NlK~HOTynZGmT*kULh&@&Q z=r(rJs?pi>wWm8kzc>3H`eo8^j*arWu5acxO4Lziaj*ymzRYlO%zWs|ie&YIG7VWa zNNe>UuC&4{-t8@kiJPuuv*KNaMj3vcJp6*U>9FH#No}FrZW?)PF~xoKk?&!*+MR-7O=Isw zi?0{7Lf8x!W&Z-maBlmvZU_I#LySL&VrV#bBWL#Y$Dmt@PoD9zJwLYg+Z#*)y=I(R zP+6~z2_wIt!r5~IJs7P|+9E6F5Q+S8!vR@e#^H6$b_}?xZt@?&#q=`EW1s$t&G$aT zn_?eUAg=!Ohck$9zl(XnQcHd)SmMW60`IGn>ioi36JYL^w@!z-sKIuf zurOVM@Ymc~F-`y@Sahj_Ez1A)MYO;xmDq3f4`X@qv`S0O=h1iEziKVg*`b_61jpgl z(iflYItPG>-Y%x{z9yuLwawj@R3i)xpuYg>`FYg}E3yZ{ZjlJOezD#VN3eVxp*qu{ zHNj>9xeq0k7AAMG>L703WO8;RiU|M9MU91A=&&R_vz;)5x0Vq>l9Eynqr5P~ZC4|( z_&8wL^;zsN+kmP2kjc1I`d3X-j}H;Yx`lxnV|av)5&Qw?iX%AnYo|?L6XaniWyMnq zDvc)y3>)OW-oV{!#|&}0axOKyx{rULj^}jsoLS% zZ|k^wBHt~TpRX-#Z|t8oZ{y~3@bG1yPf^0k6>v9<;IW}KcwHwaqH@PeuS)UeFQ8Zl zIvF3Ds3KHt%Dn3cIGdA7iqe7dZm;)e7C?*^^R7uS>Txv3T4sqYr?-p{-v5A~-)^=#xO z$M7cN7e|nki^RSB7B1y9K^LU~128K1`d`2_xWEX*I-x}P++GGa4q>OBs&}Wv7`HZx zS^T8g6Q3E2J6KN>2`Zr15Emof(7`E*L9xBW6a`T15# zS^~zt)LmK1Zh;9UC!ZkZyPt`IQU`WPSBKeWx^E-chD{+r%aQjvZSeIy;bgsnCEeWg zr$-kf9-@Rl)taVCQ`CtLY0Q5wd@&4(I)GB=|3su&`EkemGWC`>9>8JiNFCD^B6IXR zwW@>Mz+3}06)H#G4tKoR<)a}wA+-5jVWSBUBO*eZoBfvSB9cPVY;P)l6jWfEWER~S zFI;VZbnnnSeXz_c>%e=sB|`*Shvc=7K_*V3;b=vU{WHkT6k;SyqM{!0qNWPax#Dg1 zl2E2krhGjnyt9fQe{=L|2UcG=QeQ;0P@gK+4$MhS)@CJ6qc`mg!NH<0uVm00RPyCV#N(c-OA~U;v7eTXQ1T8Ngw#Cxj4>0LsZ*QQ(o_AmvLP$_kU_ zCr>@(Nvh{v1Lxn*J4W;k>OBe$XH?04%cIciYlORz5INBv_npj>=QQM z!#$^7%^vw)g2_UNk8L;8(bk8_BnstVTH^repSpRz3B|8#1yChhebARW%OUw#9K>$yUA%}z!Y$lM5pd$w+&r%7wp-zNhsL= zej_I!gmlxt`R%mtrdrn*u-xar0GbQw*`R78u|5M!x{=Oj@%uLZ)mB+72Bb+3KRYN) z1y%(tq4#un7q-Ow<7>hc=T>FnBnSSSRD5~-N#M4y5%m&Z?z2FjTxOb18OaX8YvG3u zQ}6w2g=sklRxr)?sKmc(3}?;#vy6UJ@TDks8|42bEoPu` z6F9cK+o~k}QwOJyc;7s*B`E#c2Lg2V;a9SB7;|iV_Mz}}mKp?{vf0g&ggg2(R+fvB z`m>t4L9EUm>h{Wf5Wm|u(({k%ax_=hK^cR&iO00i0D&b%p}D;Te-=r@$5~13VOqO) zkISBRj-Yaz(g5;`ihw-}+LpJaPxEqz$twN~TY_`BwZ z=?ll;X_x(iw9c!F)5i_IrbXJU4$^pP;&Jnm8fV+81F17?;@9B3Xn&BDwZ-z0WOQt! zgJQ911pb>39OA6Z2^zzO)JsILJa?u>+zyms+{d85fNu8Hpp%!l(10RMsCQ34)lKQu z%`{oe`*p7_q5`&FFB^uF?0LG+j<_M}ERL}^Vt)b5`x8ej;g=t|J!Kg-QHyswVW^0_|TlQ4R@0JK`fspZy zi7-oF;4G}HS4IM!i)OG?yoXp<{?w{@x+Og9i!J1A{4o{rNH8GVd!l&1S$@Dx43|*w z`C9ga`D|p0_J_VGB=~Zn=EiVgA%l2bdUD>P5(~zzVR>ER8FJLe_`I(=aw^u@9vO4) z4XE+?x99Hk?}U&x1_qbyJprdRJ}?@CIu(nqr4E>dgSgZ^pd~-v$oeZN3lP5bSVsR0 zieG)fto}i*kPVab=&8Jt*MlACQ1$1D0H7h?7lcPb^6S8M zA;pV6ByU0W5X&!QuZIceXdxfN6~?4yb~vuZA@H zHA~~T3tI>WJFRfdvV7$)fy9|RepL)9cAvkF_@f|XE&Zs(lEg~m^0q_G?gwIDrvWE1 ze@tON0myeec6zRmu0?@l3bS2^@Dj)7u@#8^1n$`gU4#Ec!9zOZ6thWbf~m=2Gm%m% z5tRwmLo3)UmDD>$Eyx$B${PjXD>Jv(b8|V}WRWw{HEeBlgBEJezSBAJbk~{Ki8cl4 zv#(K4B?QBCg}(2uJ<|XwoLJSB`|f+qxfcc)Ha47|U=%?)I_Sx*X6Je| zZ!gWx`-_UC7s?UcPzRY_E#*Ubilwvp*79E65me&G9ytXA&2>)dXU#zCg?~XBiD6WH z{9_sfP?HbY2JWk6qDRf9TWbQAM~zm8cE>FTUVe^T`Bijv2Z@2(im&fmS4C}9cXQD4 zA~@=;tQP8Z)ra1L6hHddOL&PVK@)XdAB~+W5P;0Bs(3_J{-na@XF|vmzvoa?QF?wwkDcJPnXRe)lIU_sGW0_#Qb+dbVVdGiMN(~l0kv}A9s_)u2q{Q~bpVpzWY|AhiMl- z()fPzv*P>&7PxrJ`hXNdrp>%+!ov~ZLw&h~1DH|62luRBTEi7Ma_!K-mrh0>2F@#6 zHLy~E$J}U<*SL=B*8XoG4=2Dn_hs2v%oOLHm-|pOi+-bT|H5 zu&kWqPeSoV`Ly;A+>q|b^Q>o$kyuJGTw+tyMw;O@cYm%u(AewrzCiF(+Bu7bc@aHY zISnJ;W2^IL8D%nUarZY&>p$yBRPxkaS8w`(3+ zT`Z+r!K-1%@iik-MP8B?Zf+bl^@vUthy1FvGrhrNLgGp++GPRXnXtt~htkv8=S*9g z#8AbPqJhT9jVwxrao|e2+8LsOPrd;Id*qOQAZjf33wW|(dx-%KfRUa?lilN*yz!p& zIMIl%cvCLiC-4eAekwzs&9-cmnv^$BK(Ai7|A2*Vu!uOo74+ zWW9K?u4DOQ`_UR+*}hr2x5QQpbj3$a&KCp0G`%q0VZ^hho*n7js-h;(+ccj@YHzN} z>v?dDw9bm<`S4_(T7;P9IUbv^aA>l*9_wZG!*NcGLn|LAx+y!!U}a>fNJJ{6}m88#gtT$cvZ7+(KqCq zR>ax>BC%gsNVKRLUzb4*{Vr~bW6X|7iYUu>0AMZq0)2#;hH9vb(l+%TQ{-(L3{rwN zruz~%`}g(b9lq&V^wGAePL~ca#D&*%bv-LG{$ZozGY+T>&9@f!oODL&DWuXfCr)H+3VGN4W z_V0+-vKNV_JI#?pEJiiVouE-5WpCKZfm}BiY{5`X~q!sWlQmc-D@rtW6U3jZjPt z9Aw`;T(L6J$!x$LdrFy7E2ax&$4$??u|w`PP`noEsqt>=6SJs`lD`-{c%y_Z;O3{=;X;cygT^_u@;c%)W`4I_cOOYS5O&%F-p8%I!sptDNSt*5;#e({o#wd0TUA>>D zY;8SF7NL1B13EP7bvlS<74`44ssWiiRS;3xY*OXNe~cIRM(luddPmgMr$&GON1{b= z!|$$ndIz-Xi_G@XIKO%5b8s>kjEK*+WjIF(YkV~=T{kI+W(_>&u6O_PJCepmROM%W zgHviTQd{}G6aSa80hEBP*pnFob6sK{*2!}U!+o{pxTEnlWXx~Vc}hh*BOS?QJ>IkC z;3D^f<)Rs3%?pJ=gEuo!W#dl{9w21ENY^_1RyqGN;~wOk5ueItIwvt861#Pj(i%}z zCQmEubF7r%w{xtm>%J5x^6pJh!k4ljBW>oY0t)1kk`ug>$yvsP{o)8#QdnXc#69+1 zaeims*i`M+p%I}s7i$^gs(Mv$l`Ez~A=b&JJ2}}ja7O*h@m~Po_l-t}4SnQ%GQE8# zdq+Ya@({&t9|mpn`H@@r0=kVJAZPhu=1@zL?~b1w8&Z7`7Z2e77k=k= ze3ks23xoJyVI6-3)VaI6y8D*wHdMaWxUkt(8M5?BZMaGMjAr`->(v zl0P#)AsH*w<$2pSCL$8MXG89?j7MC)EaeB?k$1>Jh#D-E?$Rr$F;CsFUGqk&&N7TFxVbA+dHK%hjeha`9Hp084X|KE@i$#% zdI490IyV0V-}&&#wL$_kD|oT!ek=GdAW@>hiyQ9oHVnrqC*tvmKd-6j37s$-!Uo4_ z9ovkQv5sSFdgY+C?Jls)SG?3p0&L=}A{1ieF&TwT7N>sSXp*_x6ZrOq#PH{1Cf3X~ zB$zrdML~<+`JrR{<;e?TC%cqcEZhZ$v2&~(n8Xu`L~vb^ncBYly7j~^Av*5tbzH>U zlfX);w91KS*WW&rAzP*7F|+vOAy_xCq4Q?+nY5#8ZPagDg2Y>stllbwV+$2hYF(I< zh==_hwj+ntRdCVRHqg%Mz9UQGorP%OB(Sl>WnBE>0Qid4q|#99=<%_5x!T_1&+s3X zgB3WE0)GMFr0r07Wj|4ou&ZgO1(#p$eI)$5ao;>kRhTHyzJDeXgu?21LVU9Ii1X&} zC_k{o+sowO+)Uqw^wUgL?&V+Xn}Ls6-{31NE&%7WuxIiG-*U(IEm zK)^-e+xg|mdqj#N1LsTTvE1xTChflj7r1gR%sHoO|vy~cS zlkII@jt@xUnGZFIDPJ{-)BfNier2{{YQpL#G@^`k`yQl@zZn`+^y)nA3NX|2if*0k z1h0p*3GXm*a{rZ=*3(gqKgW=3&o#*m`QYjXpJxl4cL~Ci@#uT=9b1QB#TUMeL(~A7 z<6R>Wxm&-BVl%JK^MMh|j}mJ1%*Db;_MY-@^{*3F|`IhunujwOkV@tzxmWEA6?o7`NsbI#kQh}eLVJag> zC^C;7DD!D2wO4ahpUc;8excAOdj!TZfNgC3wsRIVTVW(-LuiG0I7^qM(U$rAG5xaT z{?h?(&r;QriXKG0Sh5PWnB^Wc=ZS5$tpiHPzi6fSq4VrA^_IB1NL+{3*&XsNmRx{w zEsNmmfrO+bn8uR0luqrKOK{Myx#H5Ogh_j3r+^QN0 zcS5<66{}V}ziE@yD<%K^FP~_8N5`4XI$_|E@lusn;5Y!oBO*-rTk_0{P**h|mxrPD zm6ZbA9a!MIf#G?b8iET=Z;itv?_EmPCrZnA6hF?~i?11b=v=Sz z$2QFHYq1E61R=cOx}+Qy+s@G-#`O|t2_LPVHF?PUzEBv}agarx1B3Qtd7AF^!j@!k>9DXyR4^zt!a*yn=Po% zotAFKRV7M=wpcl-YyhRL0ov_pAIe z)oa501f|O-=5!a7M#V>=1z34&)OEywsHeZ?5vp*Zfha7=*4BHn*r&mVJb_9cT%<)M z(|=w~ijom5<;}7T<_LLt^;Ns?+oVbD;s^N?bN5mp29SWB`D~ELiMXqXpM7&z-WP=8 zQNT3|$eD|R7JDRpVC2xc$RS$&r?{0|`1C!jXoi!}V$_}oA^G%9rkj+fOjonLtR@Az3raJVb zr-TlvRS7pgyFac%{XPhzXL@?Zc*MjQZ`~P3q0X`1wGCUIR18On(znz`tw6{;O_QVd zzhr0WuUu2^oB;FhfxF`{_Igv3e-ivlTKEb%4Y&YD^GJ|k`Ju+( z$d(*c#*_P>Zc_`-IZx&6(J&8~u}!+_f_Vd+E*EC1?3nqAL^b-E=>ntM($Ckv#NokR zYmK5u8b4kzt}J?vD$ea@eh*=<_>}o_xU7r>PyP}`Ceuys%(z_){=hjb17d@!jY@DN z05(inb0e~=GMuJK2TrTxux*AE-Aqfy5vgHmmAL&YN!}~F!iyGA4W8{cnAm1|UDiNR&_4)#1@pd#}=`W!F?TC>n;^~Kc^d$eJ`}sMw zxe5=jLE`iFwtt~7mbeHm*bTO%R_nQj6mW@I?sKv}1{x!u2o|Ee6 zqols_FgNlBpYT!jhrT<|ts$O`XmcDjn~D(j3xo2}JU`gfjBJa-Sf?ph+e7H&=JGd0 zx%BLtpLK(6k9jh?uFWej&*B4Jh9$|fIvpa>t$+M~EVo_hi#)+@y`bo+y(?4f`(jd{ zx13VL-lyv!$oe?(z($9U#DV9^$TsCjIH{o_&GC04&t$kwVdXXK@>n-}z~5t&@C3In zAmzx{oZ%VUR4Pp_^rd|_uPUYY?->qxZ_`VUUG^M75vkC9)}@3*kyT?%Q*Izm=1yhX zlRey&@azprsH&9Z=UKW?alBfgvOgx=t}&R5Qa9M3iO}rn}D2UYwULW4&oQ{jkg(t%p4hye|r`N`wQS|soV8z{tNg#J>fB` z=(iZ=XHxXFt&d!nXrhq%ktU$#e9@I*B$gka3iU9u=;y2MJNPnp#ZvXZE*U;@WTdu304@;1*1rnVVsD4jUA;bg#Q-)mIjq?`ae;N&r3a<+*;?%r5$C{)rmKiF-8pe?VoSSUzfwFIdJd;Iv?$Jx-|ex(q3 zCZ~Ov@p+vzgMXp@4c8jz?a~?rc!O<7bgGuBHx_tFFcwqgpY!Ya2kgB6X^~uQ56!|v zTO)>}t}S5xH5R#g8%t{0_d0E2V`Igq*YbvWFKF-CbeH1kUw~vC9+e%+`myJ3E*bud z#LQ~T&OTLRg3rI`$y31Yewfy#55j&DFZrI7u|enB@O~Xp@C-G#3QO;9i^%AeUeCB0 zg|J<>{OYa_dE56m;u-6*r3z(XD5E)RRNPYf*2euY3O3Wj0kFPCc021%nox|S4Xms0 zEm9kE80pnEJ%xRGLEg>crgS$Fc;k>dT@}8*VX+HEm|gG9#!lzehZL@}#5+fr^JFlC z4C3TH#EcX2r%6^sSnwEkUunK^ySB)jqTRoF0h;2ro&oQGa)?f-E~EEf$!R?^3ZH3r z9K8_*TlIIgHW2veD(uT@5X#Z*TV5Gl7{Mcr*=ZU9zBAl)&HDyL&b$O!IA+-aOubgI zKJ*Plj2qbM=nLdu0L4$1_GiWkPRejLV7K6b5^yJtIW4L@2qJeof=hBNI=9h8tvuMg z?&?Iu$a`K1(d?UNPrc7<{Wz0;M>nX!@f36uP~j^nu;Fc;9`{D`FMtGBi4~@kknT>Y zg>mH=*8FqkFWtYy|L>^ZgFU%cNTuB?`fVYz5LQ$1vOQU&ST6|t7weXdy$0e%zV<#J=+bmf86U~{rvd~SYb*%z!!t^D|Lr2lKaXx zR4f~P9@tZLSoRDQIWVgk;BR+dRP*_l*S!5)6Mw3i?%>El8vw$ihh%n=@p(zaAWQWV+TG`#e`}23GfEXI`3j9V7_NeGNyastNpEMq6)h< z8rYcgcE8jhGb$wymdyM6gSxc%;%0dTPa2PWRCe-#rbtMN`BmEOtMCzM%9%YX+H0ZUXGr{3RcKnB3?PZ9u-K6vdkZU+` zm~8;4Zjr=kPo$^C^BkfY51&bwb!U89cAQ`Y`huourj030%aU>_*FR+6Cv#A6Y65%nxfm1QY-l|re|OkEfaXzXYAyI{mQ>7(1 zduo6hxzZkVm-Y8r@U_Mu5aoTT4qcyUZ8v`-<;OwQkSKbd*{!?{ep!smn-bX1%~K|f zZPIYSsg+%;7d`6yezgPLKGFBkEUmfevpB}jKnc7`^-e5S-$<2lK{4NrYlDN2Ty@28xpJ#B=v)-FD6A@qFgYWhDn*CBax2!nEu-j~ z284ADKVGoaH-&>z7Z%%+1Z~4=1IZ-wpJ7}^tD&u~+3l_(y0?|vZ_X zo-e$Oj-&Ie+-!6wb!EAQePuPI?2@BG=y1sDRfp6bl}VFxeK^H!Yd5!Awe8L2?DAaQ z+&f7$(wwug>Pf7ixH-=irJIS)+Kjab#@;cU)!2^%J!%MmW7CSa3Wgk>oL4-edK9O9 zi%@`Z&MQ+)(r$HG#1@bl=KI@01CPeDVN9Q1{MLjkk_SIr;<0)jwK+Dl-1UEl9}Bem zmU*>vYpLocHgO)*meJxL-kBGi^N?>qZk$T<-q>A@Rlz2Ygk_i|Owp zi6(8!3hhC+7!?}EH?BHR=W!r#D1?s>6G2W%+hNcy=P$dAcCTgqpL{V6p{?9%)2{gR z^a3!$KfG>-@D<~>WF~Sm+P;kV0ku3EsmL%L?(lyF;*3RC8ZxoO=yIgbL%fxwj^gB> zx}+{^jxsmcSxDrX>$MgOq)fqbcKX*DGY_-%zz zT6)N_9m8s|&%JTFcGtePVsI5g*J#ebE?enQiJhdA{*RrtrfcR8i(Vcc z9r5k*MmAgBD7uJ!e(?P3=>@uZ2-}Y=99IGHN8$YbKGgKrv$%%Y?Gch!loRrvKp3t$ zVk1dv?2k_YPnNI0v)uC^h7v35(|L*r5RwV)&3b!kBt&xCfa#j%Y(5rho(+&)okHH_ zCLT~Rf~8O6SBHnS5X1|}Up2+e8t91o4$xEBsNo(eKIH6oWbl@zqdsM+_ZhEFRj71vIUxyl zWn{m&a0zI~wkk-qE2Tf{V;}GuaPa1+gM)4LsN~S~oP46&@g}3%Q+i%MfxWHKmKRmJ z-OTa-0N=WvJ!bI%+bm7)Vyxk#>sa9}zPYEao2-s8Y(EoA_iwC!0l%569=WItA2JWd z6aF=2JVmIf`Ci$F{c%6zSR%(*Q_9=0^xaR6EoB&wCIkNf1FBG`+CPCdzjJYXN2;@Z z>s251#Qy+|SQ_@G#DAo~0q2N6`t^acS0nyCVQ>3sPE)Mz!L@-`{rXEUh=d--{+p{X7~w~QZAAg9N5dnqD2Xhu7isB`7rufU~7{{UdW z_WPz8H$XS8%vAB+G_W5!;bbEW7Z~n4RY^6O)NWY89XWwlifCs9Y-3mFN&(sMjS|V4>h#es$#6Gu>(KAiK9Ph9C(O zl>GZvy^f;+(%jAtR1JyEeDm>}!HMDBa9`R- z<=JW(eS?6Ke-Qc}wd?wQ!%QKMcSDjhT<^yS(&xb{{Y+OVRG-SY>Ek)7DEBv|h4xeR zK6AT|aL#aQsf?Yc9OASjUU_b{8bZtn>GZEvJH1a<){>J+jt zVZ3B<>O0m)h&2|~@1?nTV7GZXrLHz6jNxRs-RF%$lQI^ORR#b{khBxLm!Y74Tbo|W{*?*E*JqaF^ z%{De%GN|iZ)Fno}-8m;`XKwwE3mYobbed}F{{SJapudnZ08`vn2CJZik%(S8X1xvu z4HqNj0=dmxGc%&&9c!vok>X)-b$RS?vgzVC+td!#X4gs*GOjW@=CAo4Z2d8V)YXWa z%5oC`is+$9C#mnwysc%q57>Ur<#WX&ug%9_N_3N#Vh37g=Hn*4Hqur-EDR*tZ*l90 z^Uvc%EJNe@RBrj{PPbC>W3_o$hL6*5Da9jeNxzd#y|%lEE6Z;nkybz%Ne3e!^{W@x z((0G<-c5r%u3fRv5_@}9OKW|@=~hH$LxIP>I@g=ZugrZK*y+4E8y^kpGLiQsgE;My z)h?Z>-DuZB^2TPioM#RIKHU!$d%%sSu7%=AwJaUh==RZuDrA&400-C-aC+8aq1(^Q zaf4Vj%-XTiozqPgXVf9RvT;3Tx~5r-0;*gb1T8!1JwpUWm4+jsWQr57gN;_G#^Eev=AYrf;043d4$ORpLEANrV#j zG6f`dU*d1TX1?FQ@YTMh;S22$M~E8B?D2p&`9~khynIzy#!-)%wlb$9r{wN@MHFnx z2TWF!Hva%9BfUtrO=%;=G8K5(qbVGMI5jFG1DtYdRC%fJM1I@hiI1MxE1>Nk2V!3Zw^ zk`l+Ofyn$td3(cww+B9zt>Fz1RMz$TTYV*5-J^#RFG%tHN6=QTKGqHospi#+OI;7D zuA>rZ_Yod`Q(Qr|Y10QhR+RoDn#N%j+}3Mwi-|BBuhSKhn!GWw3p_GzVt7~Y_*7u( z{as2mJO2P%9xZwjs?&lsbj4{WfrlTwVT$V-S!UZ3ou{DBrEvBdtWZje9FfZE#faMZ z8T>1L4-o{EyG8?J0H|(1F>BIN-7RS48Ww7`6eDI%2+htmjfr`XqXgf~OR_l(op?zOaG` zcCa#n1A2$0yN^TBRT7{24B!{{WSuDKrb>*^Lm7cBmV}`k^@% z>c`ouMmUj1LqFzfWQ#nBBp*gKqJ_4X=0mGB)9^6c!@9e4wfu+`AN(U4u_OH%kb52(a*5#`N>9BG4ttuMcsl$Mlm{M!S5}QUKk@M) z`r@Qnr&GntXWd0;s>`I2o2yr`$}fa3uzz;6qr!Kf5wTb4U2@-Cfz8H$;3A#&`Nvta z{{R81RVDn$&Q+@a03pSn2;O6t+xpYt@Q4LW?Yw}WlKs|3nV`oZ+uN+N81{{YgtWlog0sLJ;B zxRAS6AB)R_pzG+s$I~G6i8pqht=~4Z}I`w7te-hGL zTZ8;Rn5zr+JZ)yj;wpmaXx#kY_suEO5wHq3*KZX{oEE;({l-pGmhpdiV)@~YC_J~z z2{W{jS`m=s1^|LO9%~@!Q7AI15b^XRR<*w6C`isgt_t>yl8d?34Pkz!Ux@SzRImX=-Q4?ruY)hu7awjXV|KWVd7 z++G#=htyY$_}j#C_>)j;nPPY#3$h60QV!q8sjiGJ8ltS5S|hd2oqygz-5hMB?gtzi zl@n;|#XDgmk;pYPQ3p5&Ij^Czj)ldc@LQH_^a8uz2tjqJcsIn_YA)+&q5D7>>i+=# zp&NKC_?PBhG`IYCDre#oppPVS@Djm+m*5^5Q^8z{=#Ji85dgy1`2{{=V;GmtVa3J^q!GT9n?JobAB?`+C-N#{lCsseA&K91+;oJdN{nOO)ZOG_*v0 zNTp{98Rwd7NT6}o+N-;k2Cgx_=a*7xXo@pje9h`lN{8(Qurq<_Qe9iN?Z9zW3~UW{ zMDi*(a!F?-W3Lq)F_{}AQlu>AuiD#W;~$M~j#+nSIV>Q84O4XTk9-=@Ib4D;eJVL) z4i9SXjNDn{)}X2_HyLd!N6+eO)jS38r^MbdNT;;75WxKW8U_+l2BVU139<$L}3R1F`uM%Vkb{$hUJSMkRUAIjZS4SSpc|!Kf#= z+<++=>CHz2e-P)j3Vf{Zg&%h8xBx;+V~&QLqjL<2A*J#LhZJ zpMR3!X-b-$^=wb<`Fy( zo&c_c!Tt}_{9mfar)hSk;^F>cdMYpXnD!OvxhSnqMlSk`rRh_DX0_fw(Y)an>~X&t z$K_L-a5x=v&{xxze*`WU;No~iWwkofU5!LtmjWXvE4MvyxB6Ft>pmL2@ZPg?rfM)7 zYs+@qBxm#^*!tHjD=FU0(M?NEtNOWx_He7o86LFZ4;dewT$bM?^fZx0jtTnWs`fc* zGK<>)q#sX8t1X$_2moi+yN!QAx6(AdEXuUT1>~W^dc&w0|5zli|ExM>U;8fmT(VXOa(gfT&{Oac} zr?rHprKN=%ri-I% zdQO{dWo4wrZEFGu(l?Y0`J>kT`^83FBW(TU&-gOAS@^HtocO~~^rxK@*sX(Ppc zGI&tv9yqy`6fLFr`%FW&Pu?G&^R6i&Ba%NF`YYlWfV@%SjTt=6Hp*|ZWyT2MRV zitst%R3rdYsm9aT*G{~*mgl#FuNu^pQ%$WjBZU+?5;KlYYoPHZ?v;H!EvH#EoGTKm zBQ}0+qdKX(S6{3s+5o`)5aF0ClaYHxLOL&IJgJ}TFt(~{yVR+A)2FHSpGyZ9f% z7QYT{E^U_jE3HQB<}wfzg&l{reDxLNwl@hqyX=}Hxm+9-uJR8PUwFGu)8wB{xRUlc zf=6(Rh<0==eNA;yYBp$UE6_xD!+0NPh==B$#)+{j2d`c!bTEAq%s zY!OwP!}ji7L?0mIE!5Nh0J1DyS1^Qi9Tb}2tAv#2%;jt9Zwk?j6jws_n5+GyU05^h z!8onNeXWM%d)7veqQN!HNP*N9?A6mpp;|=Xsh7+>a@E6M2R6B+}c5!2=*w zDI&9twy{C~00OJJ%n3ERM#qBvM_R}leX4K4$G?XyUfKnJrU}@?=Q%2!24UeU_gc?Y8ZvCsy|r zqv6da71ZxyeUmDxl-#+-de=j7pxH?ifomCZrAq;VJ;goVxBJdolY||$XEz<(MR-y^ zqned2qCL8I+frHh39c=Kt*Jf^akWoO{c2YDeS!Y~At(G7{{XLAdn#`J{{Y};?J1$L z-auC+hyC+Xt<)Q_l|O|zxg?OoZfA6t%2ATyJc0G9Bg7hc84@`j zqBC3A+=erfjHDIM1Xe}1n`k0JP?E#@#)a={jN+iaH2y>%@R3*%!%6=De$6QH1&gl+ zQ~v;L&0PMJK^{|b`^~s)%gQ-2p1pTHQ7$wx|0Vb8%6#$Gqr`18)TO= ze1{CIGK769r;2o2e-ijM>elWQc@hSi8Hxb$l^GbV*|i(QgJamn@tz4MHP3j5Q@gs- zZ!NU+Nv~&#lN*WJ2t-hoh+FM&+&kdE|+ig{_ zmmX1%c=g(O6^C~@QGz~|EYqG&e=5$npRqYP>6%vPH717L-mTox{iuHGG%7rm0G~|y z)(rD*1QC!wO1F6-Q^OBx&6Y!+IL#cUQ>$*H5p3Jc`%=uy<(-~E0Eg|I_Vuhuty!Dp z8ON<_J7uwq=8(gK*ODmZvB4^w$2{;5Ks*{@i01%vTXR_X&ym3%l(6ZFfD_AlnhtlV zM$*JGW!E|C4ON#zLd2_JXQ8fu+u&n0Lh4>hfzq{tzQ;9IqOQy{Vk10s^{1}bushYp zP0Df7pq2$3_pa#EJZaU}LlW-SdvXeZaniEpk7F|S72e$GODZrU>0IUZkm;Y3y=vk$ zs@TJXv{NLA#aRetV0i?0udRLwd{wpaWxO_*F0ks*E+j&6=W&h8{cGhHws@OvIjtQ6 zf3j;fQtotBnKv?$4_|7DK}}hkQ#{MO)C)VcFDFzcc% zz)8T#IISCGEz^K|R&+9Ld~h?~wCsxjO8nTU*!lcL3F~BENX(xp91iu-PvFf{NYeD} zQu@McdkgT!(s>31z;!EF)~9#R^A0)1Ye#oH%Mg}XWJDN6cO)S89jnf+r6#$4fA9}; zC!FcWk)vx%w2h$!h7C64DoOH=2X9)f6~p6$^7~Vj!6%YQ?rR!!T9-3E!i;4pb2d6M z2*wUN^IqBeJn6n67k6J^(xkc5^%W4wJ~n~0wNm2k*4U=+BsW$8RfVU6on;ZU9IXy zYr{T2cxrnobd6U2>04Tn4ZZcba85@k>yP2bA6ohYLh%0phznfaNvmnG-8-KyG>3X3 z`g_-r{A&0u;;mj={{R#V`)yX<7`HvV3Q^!e*JwRQdh;pHJnGKham`9rma<3BsUn=I zA2)i6RaoDSgQYhB5_6GNW|wimB#Q63(+3b_mlc3)Y()XK*AFn!=>CJ*+G>A8rd{f!8>%wLCGdwZ?&^ zc^68NeU>n(7XV>E#eDVfN5Q%uigk_qdlj#Rto#up+FD%P z#RL5DDU*2s_V%xw#?)U5Z8hX~Rg@~!RBfz(UdMd%T+I;)cEP$2mnOFK-4(@(m%W{d zK?GNCr0HrvPAhACk=1E7 z(b`6HcP!{e)>#fXJ!|EEh?-@tjpGec(#)T;EbL<3a&UR;-<;Rbm-=125F<9Jb$f5T z7ed%$^rzl!@C5YpO4PAIz%5mjR_cNte>Qz0XO{e`|%=u3w zW7ejYXOb9@l2SZ@Rc=b#+)WUZ%vj`QS6++(#zitKqp;%x>s@x#`ZsAL&cnmM3Oq^T z?HRQ@3kI6QC+1lcGXd9VCz|$6GTO-L_V$)c6)j^_2+RX19dbQ$Tvx%rinH5CYvHBS z=ee9@TAOzXys_jrpd6h0SFGwVpORT)h6}?c*P7g5fsUE)SxQq|X>Y{vt2xT2C0MI; zvB@1e@^_cYh2$gE$3HNo19Rw2~}5@f>`XKAEMD!E0q9 zlQ%K8bMt)Qel=~h)AE?+Ssh5?RmEjib!YG;{dofR3t7Lwb0mvx7?twn#(k?+Mj(*5 z$r!B5wRX1|U9u~J!2Np~&{9Ed!xiMpEjFdO(6vdUH&vO##5UvS9D3C^fEPIg5_?sx zXrDUNrog=j=$APfR}lUh*GG;29>#WZKbW0c6;^ z))&l$fs}fDtUjWwC7E(TBoX@8B`%3Qs|lf$`HS}@UJuav*HPj9Tl-BK8CPZ8KYd4W z>OHI5!(vru%Ps!^Gt9(L_AA;_mE-w;z%C`9$;mks%Lt5mg~xJJn$arTdT?s3mRnSr zNa}{Wsw-R9^f)0mB`;6%FC=FJKixd~aap#BnAanNTV7Edb^!kXDvHX+DDC1TWRc#g z50w~mD>Am;i;?Y0EzU=ATLRW50GQMP?NVE5@J!4ZF~?AAZfm<(!O2Bjz>X#wjKdrl zhd+&7XvvLu=M`dmQ5K}ZM+QIZQ%4Ty6yu(?%_!ZpTd8!Pp@-rq-8A?ojRVS#R0HlS zos#nFf^F2CbPZjWt!(n?YO1@ON(lU`21^T;2XtAge9FlkG$jWY&27qSzUxXNWZ4vi zhTGIuZI+*JW@Lukvp7*TS;$m9AYyFbL%{ z@s*Y?Mi>U zyVUk5-O^6Bx@-M<9@(YOsbAY$&HakXu7QF3LJkk(TJhOl?jP|lIrQ_FkJg~lJSD8y z##CM_uT8QtvwH>gjF$2Ftnt3wqah~*H%j{%{5P)5o5X6wbUVdVpTJjo==(mo1Z}2B zjFQG=8ElR*^fl?>aW!0$^+ze^P!OF>4zf(n)(&0xz+6x)iW+xPEWrz2BWFTd2ek4q-!0zl~Rd@ zIXOFzdeGMF?X0dNTU+$MjiFb05ejT#IZPg;9y3@o3=(~NRPU)0sG7N-a&gWH`qfPC zW4j#ptJg6w8<|KA(2>EalE>wc4mlmF`AR@QSl2*@ zK9#w0fr|mwvgLrY6Vw{(qiddaDK?4&rYa{>)~+-J9tYBx>9l)SqZ7lG`kM@$ao(ER zV7LWJf=_ypToKcny=uEgSmV;H-a506)ihBIn8q8mW5Z_fU&2{<5(0lZ^etA@N)eO- z$BLzMqJpa{51DzarJ>ZIy1%qA$2{{ZKw z@cy;*?}jWdZ?w3C>vpmt0ptseDxXZAqP{b$bKxv7mrV_V+GX)Fsypqu0>X+TM-*iQ((1 z_03vKT|HNH##^kGAKg_QN$FPnM_~kUY1c4Ypno+LoM)y#t$5GvJ#f-pXebFp@?<+Q zanxYfgGSSuiqPnil;d>&00YK#i<@b+JIAt(;<9;I!j5vmbH-1tWys7)9jZqpbKBCE z7{rZ?es%3$#42|kZ4TlI6>)$?=dBC16aYEtN+T@X^G}^4Skk*299!5i>)2M6mYp`C zFpk;@E+zitYFN|}LS4Np(tZ+a7Md2Pb0q#^$sz@f<6M!?BDgEgQBB2No{T-n-X$*q zXqwKIt4pX``L;KfiMlkJBeqfeN`DIP?0iY6_>r1o_HTlIU~!LZS9J%8?XNC22xopU z>h|Pu`BGonX}0=Jz=uy;d6cWC?snu8&)21RlCABc(}KG1`F9U!y{dCo=yK^j%$EA4=8uabX3yd&_%7j^#mBQrRVV>0U+gm*Xs+K8-Y6dq%X=tnL}vrvCt2B!er{ z_(121=)=x5?I^~~^JRy4y6TTFG#2k;%v_^fO2|C5EJ~(xfKLLQ6dU&i4bNN*(Hd?! z0Dd*pcq~H~iS*yK!qeM%pgeJmmmP=GHMC}%vObb@-8eZVdmg3md*R#oWnpDIyZ)?ZK?=nTXi3NeHPG0PF3$2r4S>s?qOhj_09Tyj_`a3i_!R>s z!C)8?Gq`89azoyHx7@r`{hdC0UsIkJ$VmB^1MAH?K?Li`h5rEF;MYSwk83QFZiREW z@&+o5z8tla?w3q+*0|q1biKzK)Ly!ynAI%W+7CK642(jQCm?g$rt+Hv=i098x>DKc zs=JHslY)Mf#EZ*97?Z#?GL)K5M$vJ%x?*c+OWN`CZteJ1mA$wi91eOKi&2EUmiu|< zCa>G%A{+oQ=}wfjWmNbgNd>%P8IMkw)A-xIfhP7c&@nU z(>%)eaAP$n{GCAuUSjX}^r)cXW|;6X%m7 z19$YSFA=*)>1!F``G2K!D~EZp+>hm4#-dD`yuf|#5OdnO@p9#)?bN&?yzcrHZ1mwY znCTEa@%mPjRz=85bsI-l#{=nBtnEDYSeGjrfs{TN3YO ztL$Qp5d!8+%HFO16;|^|wM>WDyNBLp0E#tjcH-a|;QhpBjP+{d?iHlCVnfK^-+psg zPm^e)vkyYGO=l|_&7u?q>Ij(M9PU0hs z#>-tlh*e(90yBvR2C${Gh>Uq-nu)i2BhkYsID5#eKBI5|%yFJGl_2BmS@(L}m)4S7 z7=pLTK~tV8_12pc!sHA#(~MR`7Dn3WCUQywPvUD2X*DE|MN*?EIa^e4pBc2MulzA@ zD@3N^8>3W2sFS07iqpG*pkSnz}Dtx_-VL}zCr6+4Urk$;0zSDJGV#di8q=Mi8HhH{~ zJ3_kFL*AiHlcHNGt zLk%lQ#xCbsruer}hggQ));M4=nG9u*%-P^opA`Q9X?U?B5b9R;YaD`4Dm2DO=rde( z<+i1BcNCK#d4S1QjHn~hqf2|^F~yi59#v#D>QIA}Ib@sfulxhZ#88B4DwO3I{IB|+ zYx`RZX}m9`U(Xx~tHpEnb)8NcRK`k?-v+);xR-Mhd)KZ0)2pbDh(KJx@L7vw`!L)} z5zgX0{eG3>WeIGs`~`h(8k46V_38OP^QU$L5fe?3Zsdc-O%=9JE8D$iDQFOer7eUSo)-B|YDb9fKGif(e1MUf z!MzTsLhP*2=RTxyS??1o;ELQxa(?d=$!x>&!>@5#(y3>umLiLix;g1%yMWTzYeeQQtb;Kbhkv_o%# z1`nle7pA5zI+SC!MI}n-JrYvvx53hdFOF4E{Cq#)YF@>zDA_*+u5g$AO#=&$WGJ z;J=5ra`-z%v%9>uvYy^x(yRR3XWF>2vb`%Ecxfn9PE7S56~*>n57`{#`H|oQ>Y}_e z_M3%Sd^XBBi#q!}U0? zQM0xyp*6Gxmy=f2S2!SZ-les*^EQroHFjMw;Q^gU2kxG=I&M>GJ2JhzE4L?w?OXmH zohR8y7#xQeANjTY&%+t{K-wo(mghpW&qL&KTW!~Aq z$75P|H#&!tC9Uu!%P5Zo6)Vp?Rv*F(iNqHWNZX=~RL7p#9E$AkVo%;<8OS;OE9R=w zGh2c%d{ynjg*QI%QP^s)|*}OjpeHz_Q z{P6?yub-3B=c9|2lv#=J`psd5B-lEI-QTTzm*YE8Y2#gT9AjndynAOg_E&{%{Hu9a zpjkK{LtiHRTec8Kk@ck1KbMjqd##SS$n=Rs$IWRorM&rZUC<9;bxqgkBQqq zJkY?>MV_0hiH1GEU?0Z2PlnSimy0YN)q*=1{!{B(!bwQ>@lbN1J8xs?{VE2A#Y3E) zGT((RrH%HaXC>SDQ|#PH5y0nndatWZ<;i94TDY9`Z246i&3|1AQcH$@*aUtx=w1Z_ z?2;VkcGLP-o?MViwg4xbeii8c319S9&HyT(%D1EOqYQuWENC&0b7vh*M97RfU{pGV z#8yz<&pG|vM3aHeDvgG*Z>hv&wRS4KRfaKHO|^D;@PlgTrQ;~}X_oltX8x5|%D~4_ zfmgg|{{SGXLPmdjC>GnWLiQDemWO>QS_=MVP1FR>7C2L%C73r-RA=z4s8^KdX&Pei>YNkqYG0lDo1WVb^A%#u{NtX6t6Y5A znIK~^pXXCrzwZxf^kntRSA1JHGLEJaqKIGTAr7+P}1gTg=R)j6^DG)8=X8ta~Rsk)Z`l6Ho#;Mf0ZM_kO0GD zv8lb#<6DvF^b^CE-g5nxL)-Uc_NREB0=8SRj6yD}l-m7aA{#Teo6SsM8 z{{TuMy3G3y3F;gB!`8TmSw7V$X50xqeREwNocNm9XC$X4ylQpjIcj|cOe&=nXLF^P z=4if#p9_4rs7MAx`clNjNg3~6o%K9-9SVBuAG7h(%oUF#fO$Vk)7INIzxL2qJ*i0@ znh9ksf^ZaaO=VHpo|YOs(MYi^z}o#fV}n##LB0b#`Bg2vDR%(4h~){v^&e40v2D~B z1A`VT6y46xb}6KBzZR2kgpWV+-8ua0gz#n2wX%uhEFZIX>s|N7;0J?Gr<|Prb>_Yr zc1t)0M{m^EWF3{x>M`cSKU3JC(DiG`jLoS=>nf6CXtDTYRUZw`#`@+wl|)auoa3I= z-f5A4pdiQae@fxJJb^V!!cRrUJN@HbIH;(oxb-|tJx5VgPo~;=mwZ;u9*8`+&hX#@ zy>VQ0R}#p(#5W)jl?6aG=vvG^c9uCL30HyjtjKH#xiwbY*Oij zo=TANY+?XndvTib=0&hAY;bni0+tz_g}X&CnUYdd}3J!A6n~C|HFV+o8`SucAIDFl9(ReXBn54=u6Ode^Lc zQ20Hnd^Yl~n&Lf2N|}mBcM!o-{ns6hd6nY+?1mXW)!!MZE22~kDW@YjUY#n7OioJ! zp60e5+%P8``c+A^@h_(poZ2*0*+FF`&j%j0dfGhTftr#HIA;U3YuIVQTpSK-mQKe^ zX*app{2p&3#kSzj_Fpt(b}}3NpT@qj&@`*)?b7N!L&;vDWI#gub6+Fq)?y2#kxtfJ z5W}r~eek`Z)%-1Q4dFA~K@yW8Jg_6Zb5wC!v?|o3z39CSIHA5nyZcE9>y|-^hga3_ zG-zJp>r{d}Xuu_!5rgfX_0infSix~?7)2Y$AawSx3jLyWDHmO`(_?M6YX_WBe~I(f zxw)d_qA{gTrCN$D=y`{auWU8%7iu^9d_o&( z6|-ja;1Bcd)0F-jQLwhnL7(1<84np#8)ESVzoF`jDog>+%9_=?WOjv;ry!&;oPQqW=I|@i9`pNz1%{LMf}% zhpHQG510b~01+T&y{}1(#c2|DR101?rbcAEw^DdADE({OEx;cyZk6(NU95U|&1{Ht zSOP^WZNV_HT#Zj1)-r!8>7?3( zJr10y-y2iY^+%lA4aB!&#szjB47X_fMWt>$iPU1b?KaD6fG~OHyFUm(4~ewg4t(Y2 ztxokUkF2k6WiLj1{h8YbBOf_6Pg`PSwZ=FMYWfnf4@{mv8qL;;`(3){q-OwqYm)5w z3UR$f{d6kL(2>d8(ATHQz6Ld4|$KC!tYZqv0%lnZAqstG<|)lzhTkdp3a#QJyH2;`KfGQmStH;B*j2_U$L={?+Jg0ep_iyZ zwV=oMx>kL{6|kzAD$Q+7ION%E$7#ry(cq`2ll<0BQ^ z_-tZm@;z%dKM&hRQ017R%H;4XV?_Zj;~jd}4JbHGKTpv0u{D)SR$VS>u-|_lU}8N! zwI#L%!3WdQmKEQB7Dwj(tK0hMco+PQ9XU^*DC|J2&lAGYwXB_q zDsx(zXU%fp{{VNjR`Ha7@hpwU1!WbEAzp;Mzs$;$wk9jT`589TGutYIjy7}H16M4q z46lGY@$XUCTN$mlpSrRhE27h_L~;3tf_e(^QgU`Wt5#IguI!%!~J*%)9~pDg{8+SsndIsNn&lQI4RB%49{JYdl0;zE>DykA7=! zPJuTO=itgW~F1A+L-82pvM& zJ_(qC_hm!+*OPciZ@klxjBb`bKN{Ag`7FEG_Oke?;UxUu=zTwB5L+b}=!f}NpZIS^P`!93^(*@tZ$fAM03l_DD_$&mNWau2+h& zoK^KbA4u@zS{;hp=~l0My9QTR1I#~Kw`riZ)iz(c{jeMOr}SXNvWy zsYW)Q=QfSq6q3l69=}S^wzCBB)~1F(n}PV6(6@u3P*1f?wl|9EbUzP#Bz#liD@{Bd zvAbo@`y-QINa_}eq}oTP*v7G2M4>Sl>t0Rp1`jjFs;qXFg@XF#y~kA3TEuB~Ci>c&;9M0Q^7$zi4_D99p%Hi+&;aLQe>Ig67IhjGgFljE`F7f`*KOO??IX zPTE_G-6xUuvm0**uHMz)Ga-$+YWj>VS5lI1bHcApC(MjT(WH!nAI+ayj9R&JjyB`5 zu8^nzl6HZ}%~88^Fi?PCcdv1__dLY}wlc+@!VU&1Kzj<1!p!5XJuR>XI_9LSH=ziJ za8PsguIJ%h)Hb^NTFLTS>Y96=FytNF4wcwckh=Dr9Z3 zk6a$+y;kSo_l8+WgG-VXJwi{M`-7VEo+=k{S9UIRqZ=mdeEIu9-bDw463n*QZmv<1 zB6%TsEnhUxZgN8&K9%=Zi+^U%40whl-*Ko}On`)GT(BAR&3R{ue`D=l+%xGqoENFt zW?iKIHRw>sP>gw7)X(bcqdr-Z1GtQF>M9nA)rjaTyz!sIj}Ukg;HI&rOK7CwCB|cK zqjauN#H8ep!n;*fC3NlzlUtObPn;Unut3hB@DHt15fLX&D@xKHoyhd8r7N1nTSKF< zj2O^ypIled-wMfs#Z_BwZB*mwUoz?0!ml~TYv~V$vob|*yOlPC8sn|Cg;(HbZ*w}$ zeP_7mJvsER7yY4NE8?v>Vm?VNa0vD_>SpAfPR}DFoL8QFeos1G5?uVn9e)hgEm|i& z7fSIwhSmj@utor`k4%z%Dro1nnph`Mfh0^-fcn;Tou*i<~=^sh?rZT69KrTMZqkvvGTx)YGjGnND1 zv&7BQhcYl!m02Xyeud8qL3baBi%!upn6MfZ+CLiXW(3~4k~@D|#nWbn=KL5KGLD!P zvoHen&m@4q*1Ss99ZqS!#d|MjSw`Cz^a#}ImWqGpxaaY&WsSEoka5j<4~77fSG3*F z?*YKCT7kfmKKxhBVkK#$Z-`fD)Y4v0ET1kqevkA#y222$zstenHP?75(65O!%mITb&*fM# zMX+bFuFJsjD|nMmJ#bk5l}eqZC**xzBCqb+@;zyJQV;i!6_=}SZAk}l4srO_vW>{y zzG}$T7>!yp$NHb^UUJ;xG5GH!4~laa+%k4?6RCNLuhuG5UwhSTkx+%?7y?u1iYNVnt* zm8~5%_eYx=kxJap?ka1lt!D}X4jp;?Yhodm9mXpUQME`SSYw<^x!2_Pt=QCk4h3yZ zbbdt6+^XJxp+i|P4}AabPznQR-`! zdd=x`Xe6noHs612~LA|&m zupDNzmAZHR4rJZmp+18^U^Kdmo|7dkB2?7;rDCYUpMkM=3vD2TR=agJ~Stg6QN zdY+8VGPWic?ur@|9&Mz#Y~hstmC&O3YQBQ9JSP#7#%4e4pVGBv1S*X573D%Url~eVHxul4<2N`aFR-{(SBL#qV^)-z+I;@G$*)0rr zh^mq>Y4(>JABN!YMO(VHNnEG_2_06lnF?HNZseVn3B=6wSJy zMI3$=O&1rfpKs+~TQ#CNDZ92=+6Lq8RnEPq!x&#Nc;M$XaDn)N#%r5x98B!M{pK~4 zv^J#(PvdqkKZrE_$M^pL`01+h>)Lyh`}6oMQ*8xTBY=I)LfQyiw$OdYO3umdj>y7_ z`%Jdlm95mt6}*MpX<`V*1zCs<$Jkaiji-~TrU6~ipXMuRGq~~4RyJJP8cr%I(KGd^ zi`pD;cP(RH=~7a;s=}e+6Qh zj;Ezx_)iy|rQR_Z{r*4k+LOUwKEHguM$&qJwrk6q)}hs>>}NXVmZE`O$+wia|UK=OVH^Y2uAD!1`m&eo)>I^){-0 zcRx=`@hh*6ekqpJ#=3%_#xjTjHRpQfwR^2xwawkclgh`Jk+N%+I6J*|9Aj2Zdq=lo z4;pxi@5Q>!#r^lsE1U%bgkH)ki@mY)IZ?0aaPeEL_^$`CW0fN|+s78+QR zMN+RjYQ7{p`i>-g8FcrQ-5)zC|EbM{*y!ovvLyYa6Z(WgkFXJ#fzILS5Z8ZNN577Gew zfPev>^s3TPO8b)vj&Ds5cJNn+H2GqRQYBVBHpoH1uSc-cE#iO(q?vs_b6#EG?OruY zc*9|qAaFjF=vpqQWPzBBg=+GB+g+7DcPkwXQO>AB&*l#4D9EM=5J`p2q{Q%`<~R1D zE9H<35tG)e7mife*eXXO`qw2@8)y;cTb@<>N$K}GkHQ^t3vD~?cOzkB3zkI-8RF`$9duz&Q()X8ttp4SY}WN8q&jXODF_H2EQh)>ivO5y`;X+>j4$ zYp*ZFD5p*O{)bdzn)Ez9EnBdu?G(oOE6sIUhk&)MD3`g@<}rWINJjPjDufzvkdcgi zdW!ax(^?Ocv2RToAmH*iub_S(Ge;GnBdxLg>%#0Ue2DXHD=bGmJF;C`6rLN(X4_QxR zmV#=$Pd>DWu>kUYD@r^@J-w=hx!A+jt_LKZ*)_*)ZhaDMmIEENM&(MC+z8Jgp7mrg z7#Yo0ym_QSyL-nFz&mmmKI5f0y-1z<6eV&&8RorH;O?6Z#l`x~0!qIwH;j(e<&2Bh z85s4iZ}>ZFvF@Mlg0YdGOjj;8e6)y)(X)O_NTAnRIAKUsW9|x)=@jZx0qDPvTC*S@ z@RD4PqbvNXE8mw;VgLst`B#lo;rR-ll^3CJ!x8P)tvxrI^(Z9E59MBA;j#Y!6Kh62 zcO2rqA}9HWY*)?VU%sD_*;R8hr|{_wZ#O^3ULEn;KlrU>Y!irMKhC|s!uU4x=bxIq zU*md~@wB5Hc@h5rTKDjan-34`x#nRXE`plWzYZ&sxw9=I zW1&Av?K}dcYnHys-2N5SINB0^N4-|1<73hR9Xg)%&ufjdSB&J~MmYRyp(AXkobW50 z*Chv93VTK?=k2MD50B(eXD^x#4;Zgf@TdBoPeotlUQMTixs)6z9Da4{J{aQKKlbpW*$_=E+m0}MoK$b+gd0e2b!>{-Qq=LIvMAi9 zE+Qa&-e?*2t)w_Wamf{taT!Ac1IL&TTG&tZV0EqIvI(-S+Haa14E)`H3Rfj!Dh?m9 zJ+s`?=5OA^{_*})#*Joq5xGWO8s(eiZ~EvP+WiWTpj*kf3#jFrsmBEVH8zf9hF68y zlAL7qBBMrCpBRS*n%? zH;ew|XzFX53xVq!e=5~R^rXE`dAn7M^eFf}F{DV|r}|aa!6VpJUkL&Ih4o`rUym~$ zwc}g;o}ZxV>t3g+6bYGzKZdj+0FHXqOJw=vKK-id0s!NR#)`J2Rd>)+5C?Gls~++^ zYAP}Jiq_~}D>Cvg_?X+V?rN*Y-Cn4gGqj(fZ&3kYf1s{IQ6K3T9COq1uE$gV0FP-g zT!qlxa&znc6k?a}5!Hd&v!Zpx1K18KoI^LV4^6dtP&|>2xExg&kNR}^JuzQVE{7x@ zk}ODYIr`NIiSk0H-;uVDZ+f|MN0xhVYQCPK;A7l$HH6-_MzGQil&@~>EH0MY$@63* zac1xN)RIDh46zNWM=_3vH2JiMCX>v4%=zwmQu%8VFj7e)q3vFM=B27S6OEOgy-E@& zYuk1JeZn#-c*Yo>w9-F+Z^!VOof9n8x7z-@BcZ=_e_cu%>{-(!Q^?5A=Umazty)0l zGC$oUS6QqnI();9)rQ|FAIi9~DLE^z>r=l8IYti7+W!ENo-}K9e5*>o@H&01fV9+O zWtHD@obEgdvdGN9kCaou#pEOXlf`q=isxD#bEzj5n>mk*Y*)c|i=5@qXYsEt@H|WZ z00}mq&|+o$d9P0Rwom*b5~&ysG6F}|yz9WRkN8QgFfu;SoYdoG%P8B+^FD_v(*;rb z{zuYRITll6rMxdx9;2dSCLye7WmJ~8=CZSv-p00q4OD3 zZyEZp)atZZOzAMrKtaIz*F^>yTL&4gSHkjnl3W4C7Y4ekQII-x71@NTdmPy4a>_{g zzxIr_Bgd-ye*K(${v=nC+*^cFcVv%x_dnV)$hU~}`NvnZEBO=tHR00lGkoM@(2DyS zbr-XW@;PbKa!SXf{2hdUVO+85ZX5Yiyc-w%R9BpB%uUby2-VMoD8KNJPrxV7f5VDj z3&sBc6S=trn4A9qLa#Zyy0$8MN&f&{4;T24XshGBRZlyl4f%?|do@9mmM67#KN6uI z@SEy69G$qw(z2nmW{V1?k9vICBiE%H!5BskM#TfyBCgokZUvkT-7#88V%}i_GlDtD zr(D_?6+m7MV%gTC(8_y2tZ~#HgQZLMKofcT5m#Ewi1!bd1Fx+S+m=u76ItHoss?x2 z6@bAfJ*o+G?=ga@+Q-+WbvAlR%9&%gj+m&W(iK>O#3<)9U5d!z^*aM@NC$pDO7mTM z_4O@4#gbr$f(Jrtt?}oHp36|1O|&bxFV8k}jP@1B+S!x`alC57Gfwp;~u zZlPQn_*vs3*7~1Y^+eM>qf6EpNeL$^Mh$PnbLMlCUM;8if^`8}DL6UE71r2z;@Kk! zDQMe0)YfyxC%K%kZ|tMejN)5Vl{t8RZ^O8+8~EX5*7y2F{Avo4@VB@;itjYv7-`U- zp5a7p4s(%?D~b5idmYDyG-*;r{q4XgGsze<@D;tKTO6~b=(TmDI7ly^0-`oxt<)Zs zo#I^|OuW+W?q#uz%#jvZQFgFy-6|)U)iZ)|%~-e8mTPd0orSp!E9g>jcS5z#D-3}~ zSPq?duTt=)qZ--S?L82#2RzpNdPbXl01{eTOue$BBCW^Z4L0Wu3@>H~>kD!Cinok| z(n`ivX>-QcIX?$XvEJ$%l&B{;advpFOMJ-Sz4FT-6z!Q_&{;wd*A?SGJ; zt#Upw(Y07SmcFo-DAVQhak)P+TdL5h7s|||MXHKO<6{`fJfBL}y|=Q2TK$gq?Ve;o zrd@@6r#)AmwNP7Z?UU_SEv!#r)N#dOBfcwUMu<*92ZK^WadB@I%n?r{a9pTK<5RFYoZCqpP?M-+>;Hb$wVz72<#+FZWB7Hy0W0Uf2>F-|Y@T%QX!5f}pL;UN;u3#k& zN9A7W@U9Q|lR(6fJhfbU^{zZ)yd&yF{n)z`$mDpM>y9@l=jl;iH~Mqus3N7k#9k=6 z1+(*ybr^=k3YjqC@Fr9DWukEi2az2RB1XH&sE=DdsI+WgkHJa@h~OQtG_-C;~%L$?*9OiJ1M|6iRuVB{A*GXh{x8k%YUFLIqUsv zRw7Je(!Pm&DEYZ{Bb0vnEcRnf2NOT16q7g;N1yMXK7b0~s`Y=@ zLqxB)^{F-pZ)$y>3(9(tio@{lh~w8)*UK=>ob1kbFJoI4V2SQVdJ+7qnuOHaly)?$ zPE@&}HD-+~Gqqpcs-@xo0EvrZ0PGcV%LX71Qq`NNKHX8d=U}f%d=WU{uTn+Z2F2pw z#;?2VP!i922Vp*71g;IIT-|23Q?S_nE53Ic`eG458vDU4Ools5Gu6b zEYNn~)tInb>sl|1`s{4}6ho^f?LK_}0CzvFa=L}`Y0)dVEY3hg9nE#uz?&(CIVoOo z<6uM(?IaM4N9kOcy*=+E(aYDgmm})`07F*CR1>lZ5m8v-T_1Ai(ACm2s7IFQjncRe z4_*D986snVMnEUHuId>Kx6!fZ_e9q=7NV=Ij{Id0VJ5v!XX4TS0E$&trt5?mJ^I%N z@b)r%LuyL(oq4Y3;@=)0wFLFJU*v0%@B;q;#P-BzKfW2RT*3bK3)AyG+_y}+qxJlc ze7*kwk7@h?3dGUOIrZV1PUDezoXhp0WAeopM!Dsi?fUzD6X;sLub@h_*q#aEPl0R^8Ak> z{h%#;%Z)Box)jmJpzN`&M+&k(J0N*0yc|3Qh^*rBRC9w&@vuVmPRJol#m{nO|jT-0aQ= zT-ED+I4RuuCmak_nBrt^*e5k|Hc}g@H71R|g+S>E3P>I6i};t~v!m#;eVMl0$b7VK zO!YO{URx}Lxd%U>uQ~XKq(i**){?G0Mr-mV8nJ%s`@fVBn$WlK>`v-p z)NYkeX`uuU#<*QS#I}X;Dg-B{R~67(>so!n1xEnm1a3UjSc>u9Pv)+2%7afv&aO=a zrMpEn=1}q(U^zeGTUw@v;r{>|SX)@?)=}IT2ky*HQIPtMYnapZ5pNjsCS#A`5}MRa zOx%@Dy8yn?u%SiyVu4)ZV{M!nn;t z!Tu!B7`5h-*D-bT?2hq|an`-E$Rb6)E4zzPkKujFKLb}Fy1ajo$!mPP@ZTdE`g$3= zmiqet09QPBPIUbL0I#_D>eA}oOM(QV8HfC{mngvgHPz{QrRA{N9OM3)43EIqZLjDy zI^D=?-7+{;)MXnCN%hFA2KaK;j9*N=mfUuZQ@uZ0`Y6gZV4GW$&wb4uKUrdf&VYgH zm|^)<-D28568L)m08X}^H#S0Gy=AjFC-AG5+WZG<$^y5f z>QTqzSh^`bW&UJ8Ba76&5bJtvyG)A>{7d(~&e>u38s@|=2g>Tm>Tpzy5nkVGuDHs> zJa7KQho8>4KON5wr+IGi!mE0@6EtTaj>flyr6*>KgrxM5$i0^IBoD1sxV9GR81xm; zTSxn_O5mQg6~&rPgfI*-+*Pz}n@1}uRv;cO3rnZC=+{4QBL$ z2k$e@U|HOG@yRJUCCSL|T#}B9(8`wWo4P!zb&F1LL2u<=fd$A|$vpil%=D+ut7yRS zh9~l_;v1mRoNy0H@NrQ0zDKJSbdK-CS371nILgUgu~7@vkoz{5e1CW3vq# z)PnP0nSlas(8nFQLuyI;;ECMGzj}_1AlJ3?UAKex0Js^)3gl|=v z`dp-3NAxwkTPa-Dl_&3G8Oa=1Mf@NST5#%f;$!b2z8GD)WP?2=YS4vu3Zvn#`3FtY z9&^@>x3^px;->{Fa9&3pcWRS?aL zP+gCZ`T{D!>ycHj0Wm&3t16NA8FBcLUSa!*G1|Gi;fvxr;3osdC~^ zmA%DDV!Rnm#ZPZ099OF! ziJT2ynk-!9ppNRaM{`0A#--^Rb{tuT2cisr_zbNcRj8DH^| z^Zgmg!SXsW?_PyxCT`Q(8uR3jdZ7oQ{!nw}|NMhFPT=$r9S>t`|M0~7zIeNBR zCEou4sqY%h`z&sW=pt(Jc$Ab*`4!vvuUXk>@W#ZvpC=uKaxt%z%DgrKuIyy5G`k)i zSxwWV+w1xp-V4IXc26X*71cm+!1k_V!b`PhJu>yuLft(xTj8Cejy!6;Su>INp8&Xm z%Z`lS%DhKd(@IMx%Z2O3d)JMj^Ua#@*eria^F3ovR&4aoVP8o`Ug|Q#J4VEx4P;AuD_VS3WKM6u@{69G8JjXRpO(*T>du$+AML&jT9dF_@nbh>-HmQFm z-;}>z{VSimw({h|Va_YirMFg+=$`MMD~{DIPudCDfJafXLON=EL+#1;;V?s zA9Rd#t)#WugC`{Ss#CzpApvP>zdir^^t2Mp+oYHwa4l=?{y*GKvU^iwR5JL<<#W09VQtW%xQ^O z@I?@4AvTxGpF`TW88?td4%1HnNsQwo(vDPb?Hg7;cN&D#FxZbT9b17zNjo3kTqU}H zvw~`wf?1e(v)~TfwvW=Lx3v>CAytzd&~E)reLS7FKQe;9!khlQzs!{lp5k#O(;r-s zR^z(zRYKk)heJ7A$&2(=Kxonri%>pjL~8@g$Z%bKH|KL$)|>#VBXuK*vsQDZx>SA^=$Wq@O_lK4YJQ|j-NJvok;#4 z)ve?P3(5A6`RWEgtuo0jB4D<p4!IM3_Ww6DBNcc3kg+9Qs3xs*V@QxVUvHTh~) zWjk-{{{RI0sahoyCaG>V%NLwGuvLH46)ZExHkP)AS(xEt+GApVmC)N?+B>(}mP=lG z%N|GOD)stX8+awQyoz9CP;kH;e>&&)Z+8(nHKRsC=@)H=I(@_{-G0q3?0$8lYi}j4 z5NeV`Cur`Fj8|0+ln}<=RO%!nBQL@F)txbg&Cs{MiY((G@WA~II&xFi)<-nwD8$9L zm&6NQ$@P#e^~0rsddKMV(<2ChS@&Wp8` zX4~p3I{xNsyW(epJ7$REV*TQA+|-tmtc8v`^sm39tptysz1F7wmo>JqWeIM`@qOS< zKN_)d;d>kS%Ce7fAOwDOSHZ*!tFCjD1palt+H^XOp&y*3qgCCvI2okfrDA0HTeCd- z#l8!NL$|kdXfiv^&cMT&|7F}JJRzYcsIK$EF|1Zjq-T+j%DL;iq(xtWm>wmRwEB#h!NU=X zo~2JU&E2pn3FtYl3`B0< z_0;OY{0sW%jkc3IGLh=M{xyG3x&Ht}LP@NGOXvik%Zw5D8nLF${)~W*_3TC7E70I^ z(q5*KpK*(veCMCSv3z4AE}r;27eSiPkE~?$4_SUN21_Cr<^|OLhP?b-Z%zLIT@K7D zwRqq9)TQ8De{2K611BfxUCeR@y}8NH0=QoXF^5#-ec*WfE3Jla_?TP(G18|FxoM^6 z`VTN%)!Xu4nYU{;AyeN5u=O{_p}3B!D^fSx5dh%ytes7>Iv@5&CcECLBh8?#HqAXK z?POuV$gYe(%09Ks*?hLqussN_hj;Et`c$W8&Rk4x(=B*xZL+vu`fT-~XUmVJXXpoQ zt86O{Z&Pb}^|q0DrrTUH zr`u=p6u)DP=A*uiqe%uh#zk;qpqDo3mqlt`j(0L52h+un-Cm-a-kUV+v!eM2^ahZxw_omOG_EoGW^NUP-^Fe z9Ql$DNi+IZbi_8(=xWb}@(&z+3%ii=fm zo-fCr#8V8RWX^HLdfiVpD)30v5@ZqIY5rB`-ZUHHwA;=LS8E*MdCw<r ztlc)%r;+kN zQ;gQlo|$h5%Q2C=0yw8AIkOIUQ%M&5Ihf&d{@!t26^x=-_a1P>S3jnpg&bh0-_X{k zmm;c2zLFZ+mW?CG#mAePMQ;}Y~D=BYd)5FD>GJ>wz z;X&XEk7=9c>MG>>WRQ-)bsg!kLJI{SdAcn{&rRrxFKnZ?G}&Sp>4R41Lma^*TyE$= zrp&ojAYh7jC(PnJL?dR{K+i&P&2ZPy;g2U7z#g^e9wdcBGYsyoGWH;^pW*9@t@_Gs+RH1b2`BcZ#2%^QZxjXKEu5h(9-L3!P0xpGAR6? zDpdoIF?k2lv@EqaS|K&b*>J}V7#JD$0=4b*v2n83XPRYl0{WBCRu$Hq+IFnA_XuN} z2RV)Uk3(NUK1n+>^SJw3X*B-;;2U=i8&7!xvNDYBCxP{>cWcPz9%SU_1RcLC3evil z%+hW^z#B7~&$gJ{c~Qy=CP&ZM3c?bKm6_F1N=w~loJ`_BnF5}dH=t)QV)b?4|r8e=iuHlYr!fy4wPWVT6ad2j6q!J`~`C}RA1$`Ahnf;@n>z2S|ybct5 z?XR3|ZHrqhXmjRaK4H-GHRE7uMRT^bzu-w7{Pf@MXw;yd^H>zh*z23aJ$3y|FJUZfV=&w<)eY*T z^%d3lPV&~$(PFvO6AL3a1mw2kox>e#=t@ggf4F` z;z?wUh*{*rG;59m=b_DA96WGyZW4BV^*roq_LQ#IUWSF`$h*9q!wC?-?%SqTmeU~! zC)T;$H(a~b?rvJ{z*w|RwMdh9nltykjc>z_IU^#zvJc!xjN;=U2L1itKg_cdYk> z!i)zT*Q80|vms_np|d}A#!XOv1Ct?VxQK@T0A$oEhjeO2+OyB31IgRS$;K}-7-xJEWmUef<3Bg)lplWm^x9E zWS-^yKX=>Hoa7qZPK*y+cC2d{XIqHbppjH{7_GSDk(>QYn|*O!HX>Zpy6k#zk0P-? zp21}!BODP{ub<7-9N=^x%Bdt$$3F1{g?ew?r$*t*}i&yPR0ns*#?9 zdmf|Srqg#Ws|f?9YR~pP;FAJ9>FNz#u+Xm7|e>H#E^_}1<7NRqlq6pF*A%1&!H%=-e8u}dQp$W%4vxI0+urKDOmEWv0> zjxY)4y;i_5wup88+Mm+At4+1JyQ<7irHU(tl0=LHvz(u5w31q$ zPB&X5Wov2XHOD+|TEm_oQGpx<$gOQe+}yOAZKO{yf&&gf6;jJazr124i)v>a@JOyX zu6+_ZXthoklP0#$_8ITNrdY99{W+`h>Gx3B7$LT101yEpsil?ZPnOCub8yZd~Qp`5Ltqwc12C*~{yJiicWf)S*Zc-QZwyS;rg-#=7T? zk+ms)@jiwgm78nil=M?wO`|hj%*11X{HvdjN}e*O8RsWB9M-j^&CrxL*x9py#4!Bp zo|b>|&sB3;#;%ULQtWozf0b)kFpfZ5 zfCpOVY_Cn3y7LjR{EfA*VR9z7GGK&p@{&2HNi@~}0Ig3hydB$pPbT>5;#HpOOVd?E zcM9TVl(sg2N!kai5IL`7@V2ogso||M@>x}-n%!BMI%gH*9~m^0apHX`;sIpxS(-*% zjjB3Vt@ueJweZH56k}?L7mRUSxO%ht`&w{tmRhx^$9OQn`zPfN%%b0=R#V+GN_5mXvLf zd6CTS(sl}{PAg~N<;-yWI?|z)@pADQcH`9X_*GNpc)E&DM!!XGTU=G+P2}_?74Jq* z@xR@%+Q0#ToEqn~?j*K7xvev=T3eqa$(QT$@+$*e)S{Tdfo+{+86=ToB)1S4y zzpD(L#?(0IYZmBYjC8GrK{{Yvh<@uZtD>qr5 z^v4rhp!s_4=M`E#QtUVr#~KsR^NRF&k>}H-+*3ySxnE9|hkT7Z!9dOd>-DRvcI5%` z*C+j|g?!|l+sj!4V;?>-N@+^h94}3_j9ZO8nNhtPn$Wpcj(BsxDo^WHZUoXJDqE`Y z)W1rK@(FI0K^tU6z-;kZ#++c?yp5+)Re4kCbBgg)e`wY7)GQ!=w3fDm%(vsxroOVd zYx{#f0}L<9atIYIyDyxCa#x;eZOq#}NK2NKV!9(=O4Hg!b~!~YkF9DOK&o+!`qdpi z@x)~raf6UStE(%>anK4@Nad58V3C}RnoTz4qQAM10HADx>TyzFu5dH-sfAFoO;A`=a# zk)E~dQSx#(~>fk&px=hAp(z>R$b)m%mRVP=DUlp1)^jmr*3nD+Ol;I3b1fu zP09yVsCA;*>(Pu-M=Y`KUBq+gPql*=u1-J*IIf024UriVIY;U$gchRS^64ZWFd%dE z>rw3!*ywh)g)K@2Rf}QC&1GLdlAqGH?_y?_JTSq{VQYuVh=KRLeQQX_l^v1H>r5UO z=v%QB#9qe6QZvU~S8*1FtLrWzu+#3YozL(@a6c1W$ArITjaOQ@j(v7z((Rj;i4OUg ze<50XNXK>)sR|16XNBng5!C!$tHWWW*-3kMD`8SGkO!+C!`{93;kUybGr^A>?FgGx z*@-89cD8xh=qulRCE$${;r@#D+BKS6e1tsB^GhkmR%-KWXFqB<?@uL1%>8Qr0L7~~&8C}i+9_``e3pL6A0BCJH{1AsHy6&{v7u z*o`a1SN22{5y-(u`#nW_55e2<9)=2ytg|=-`qv(1c~QFSV_59$dH%Jfrjg@)YQ{W% z?)E}Hhml^*;I9SEb)e~&>wP2;M;UpodaT4}X+D^*I{1NPBlwARa1;G{#UUfPBL=Z!X^kD=yPU%Y7OycXLvmR^9V#w)yElHpEyuRrj_qCIXLbYMn* z3h%cADja*%OGa{3-sLY93~2WaoN}c8b<9CjsP{A4Z=cq%kww#sRh@Va#w14p0&=}M@xf! z2Hfh*L5xILdy1zN#Fi@vT=fHr(DS1zNpHgyAKD`)ZZc~pBxxAgSt6k?BWGyOr9pFR z=MY*HE<3XGS|U&hVYPVaQY4Jo+6UuM<5u)ab=jZA|01H5*Uzkcu1x&HulYQ?-J6aJ~V^%b=w1RuYg{&f${o{d-R z(%jM4av+Jg`YELwy5yQtjQA%99@S|iVC8+ugz(^D59DcHXxMzH zLG=QW6^h^vY0@!)H=J|8UsG1xQn`xXLMV`2%oMKJ$1)SfN2PAVG(kuAf4T=s%z@Sj zwt!iQ%M;ww(%|mt`te?K#6>*?Qk$8Z;+t6Y9a1B=COb(;IQ8PXeHuR}NwY!#+Y*d^ z6^{1i;^s*Vn@dDv#cWzjHh!+{LGIX#wF7C ztYO+Tn1Si?5m|l>jM(Y=90o9~EI@2@90Q8z^;7oCDPswjD%dCKSsHX=)@u@5kru)} zg0rbKt0_nN)qj#Sr0<7x-=F0F0L+^6OG`P`qWO|$+&|fF;+8va-JdXb>_uoa$vkZn zDB9ie)84VAh0&fi$aVdF>$06#rJ(7#O>)WViw$UwzrsGGRz9<(?gB`H9CCT9Xb#iH zTezUWvJbi{=A%|NvDZz?l&^D{zz%s#`_p$2OMI1oQ?M5DZ`<;o_~Nr9QX7>UYs8dX z8%+0MB~e`yB)J_>4|7s1j1MJ7JA+yAEBS9IpnkT z%|UHzvQ5pCp5B$V_7TsFmIv3?oHWSBHnY_!B#z!=JyDSSxde2rxRmV#^&^~8EHS1C z13fZn-!K_oy{joo2n-mBe6Ah5cTjsT6aamPxO0JxG>GGv5E#~ax8&!t+G zY}wKo<1?Im$cGV+Y_+fp{Ah%e77r(z(@5HbW)4 zFx;aoM@r`P%|2_bMPA+#?2s8I{{VCxS7T*iCDZwE#_~znD6LI4!W}nV)I{VWMFepN zla1u^;C(9KoAmhMRVfYRBn$(l;bp{y8i%?>Dp|fIEcVdJ1D^Y zD(pJ7utKSsSatHoaro8*aXrn%R^^F@VUd&^ZuB2Yp6+vLZdG=w3}i1A#R>8&o-ImB zavr1xFn(dj)YU10k@}G?S0sVAj{fup3FVJx`o^ z{SAJLT0gyk^VHw&XZ?0KSQj=Y>CJn^i1Rc>2LwH?2mSM27XT+t_8+Z#ev;5Kaxys}MMA7qs|9S9$Vc=`f0 z2pDnlpXFYm;TepM;5QyxoL5$1AKl!~4)td%<4^Qo8TpC;62usfdhGrd-YM|&bB(tz z(zvgRVW8J-WFI%kM;~AQy>9$2oW6l5I0bGSit{lmn+X@_c1Pl9=zMP8ZoO+TY;KA* zIqUbU+%!@Sp13`*2jDBh{9+WzX*X6IGyed6SGH)fY_o{&^f>$laP10j%k?s^tS#nw zwVoDhr3VB`-_Euf*Z>hh?!H2zv0@-hDJZw&bbes%hTm1k+`WT$yy6 zqt_{#_O!?st2r&yBdjKt7_J zv6pZ4Ko3z@MwdR$3}2l!G^>O;1p^=Ut3PcI<_jiQWe7Pz@5M8DQ-w}Gm3--!ET?fp z{{RA^bo+tbG4-R{L-~OuQjd0eVEfdia=F?M*QHqg%AEDsN%S<0r(6IA6c6{QD$wo@ zDeOXv9&o{PPx2!pAw?_cla=5n*EFf6-39rXNdEwOw2gQfxOs>piAM?t>rVN9xu2ye zvA9(uY7jf_qAPoeK`0~R{kqv!hjT7RxFc=J+CM5&GcueigX>eKnRe<4Nd3NgclKS` zTx^f=dcSQ8ZbvGYzl|8-W&wylL-j!7g4DQ;a*wv(!6pXTtI-2B_c)gjUrjqzeYGf_TAFWIS z^xQ>M7g3Hxf}QsGV{e{i8TG9Fs_*!mt`g{rYrRk3YI+$5R%2BLscV7z^v|wof3>Zf z08W|h%~!QcsT`7h3!$4iEZ?0-4_}!62B{{aZrvqPdm5E*ZNq2HQSD3GuH?8r#kJjz zrxj)q>PY-*l3u~yMXOQXTP_PqJ+W09iu;&OGVEL1g)h8-Dbm}r#IiiQT#geTbbpOU zVSjBs!Det`iLeJ;x2-MRz?w{njy{;GsZu`8wC>-Z;&LS}bebS#JHOTw_*25158h*+ z?$ZK{{{XHu_~8!2{UaRr#dzAbb+)a);AvS~WN&GSJ-DmNib4J3egdjN_dpjXAB9*5 z2mHJqYuTk6M%FxvjkkOL0!bE5H?gWOYKqy*@zb7_d65qWW~$P~4?{Z&#e5E;}3b`mH!IqU6DnQhebaQ@W0Xs7!dfXUJj;`v zuV$`6bEc&65YwO!3LzxDT;(W_P( zTktSOl_GK+1M7;gwh~}wLHSkp)Gff`L64w)^zr+X9!2H;&r;UWxD(d>ti4X z=NbA@_IU;d^*@I-U}`q~42O^QXgBt_`^ z8-wzWDo?V{o)BZuwMaDSq;&&e_RU4nT=81AbN(RIB(S)!+k(HmY)$QeAbAbsJ-$^*f-YT5v z)X_al>94dghT+^C=QXS0-9+j$vK9(AD}^1{9)C*mo5a-Y_c*$R!v6pXQY)$WQE_p7 z;)S-mmNXtqGKmd8a{Mlf>isp|2nw^PMOBr6+dIn5)-JSCDr(C|e@&e5uAMhc=bP>K>DVE86^FZ zO!jibAI`bGOHj4Fye}aIg5Xy%sm~viAsA4Sh8XNCx|DgkoUV$Ft-XBC_E|hM;ztl! z#u#C|l2<>4a+;~rwCl*9Nst?HF88)4aRiKIPxP#pya?(7t0DHMTF4b&7y!z+{OQ)i zMsRo49T<5!Q?hUGJ8dHAZm+H6RwW)L0QAQ_54}y~HlaMRG`Lu$`=EX9JJvm;JQuLs zMsW*1#vkpThPO1eCTp2TPc;7kD*4NDK2FDFMWd*@7A&*1eNSKIUSh5U>^p~WKRWcg zC;6222Oq6@gYi7rZbZ+Y&Ze8WtrhP{dLIc{;chICaW!oGM*!%yryug)r{HTOsE$a; z<7~jzx5Ny-i)v3J%?I=~`mQ1#{2B68<@>p#inSTj+b6y&+4NaA=~gSxFfUJ~c;%qY zql|wSHS3-nVf~Y*ob`wb^YIY(et)6chIW4>d7_|8+okJ?+xeRI-`Tq#+8XG`@I-%F z@k_8G)$e_-8oihHV~{?tt%J^CL;BR$kM5@ZKS5BJzZLy;K7z4B9sHw?R9Bn)Uiq|3 zfsU7GN9$gX5! zu6!<&{w235laW4o`cyv>0rG7fjxj3($F)cB+1X&b7~{cN zQH{58iAUEp^gmaxNm@2&^CwfA-cRq-@znGSIAk{>F%*RV0C#E0u96EWU@93^81c26 z<{w(~fB0#=cU4L4zv5kL@}W;Y8(c`a<0^UsU8jJ)F4|~XQfarz9qo;>0Sx+-A?J4| z=G@EIlhoEVYfgr>RhOfGkexcRSA6*zGKfhn2ianIAbP}kk&BA&#QwSRaa0A+d3NJZ7LMtc6dez>EYikal z3@>?d_*ZlCC>?n9tqmt!gIlplqC%3ye5-`)Bpq>4%eieyMN~^hNXKtQ&$TllMzyJri#aH_~PdJSaUZ>Dfv%Z6iR@5zRh4Ypp5GUR~r_z|w!uK*vil`6Bpn3|Q zRJ)H)wenYhykK=^>?;;6ILt(nP4gb3cNA5hFSxa3Q;$98CuPvnNtbX4%lOoxoSt!? zYK9whhCC}M_o>!CKh_VZ5eCILnXw>a^r&pKEusIkN~tRPw@`wUmYnk?&iwrI3^Fq0e$?@@)vf*c<9; zE1e$O$(eSzdEDRSm?8SCK*Ic>F8arK;I~Uo*<7quq{g zC~^M)*QjKQ6P$v73YdlGo<6k``Fx(gjT1|)iHm(MQMrO2?lH&VRA!D9=&khKT9TNX zf|&=lYK_cLFD3(u^J>9)9*rrzwK8sDQVCe|>{_CMBeXtiaJ(r8k&jxpaMrs|?wAj1 zp((a>D9a|($6E56diUcTUXTl{3tVPcjkptlT8a zQe%9C=jF{e%0NEtNGFT?%}M7&-Hi&AgPD}>Bf#9N&!t96vh(thS4@x-)${eHw06X2 z4n6ZnvX^sM)K;@aIL=c!2Am-bcL7!vVeK4(kKL*JkNAp8o9YgyVXFxXfq{|vR9bxP zZ#lPffXAHnZ}2r|d|>|ot9*SbhNUzrR#2{^?zzWq$M7}L6>g+!JsLD;h^y}B=KT#( z)b&=Gkhd?5=**2v@+9?zgiK@u+pH5A7W_kyythC(KqHaw^hr ze9hr?WvoO`~Sog$jis^h$G1 z;5$}T%c;L*0e16+$4bwcw%_&np8XV@Q*HhoK1UI*E`rb>vEN+0#Efo?;~n!=e9aop z_8D#U8;B%~f)2t6HQZ|LzQNaOuS{(o;;!k|@k@Pj%^I;Pqa}0NyJ1oC+UApKEm!sT z7rYOzzlf}4VR`+akTFf$af8>I(r?}pgku#ahiyVdbdlmOoD;BMRKH}NO--VXGc&*4 zR2-V>n!F;<5;IY&G_)2LK&qoZD6SvJmw#gM4e(!-w#UYOMS7Ip0N0Yq<%MnHpKr*s zKl82^U+_Myt6iI|O4iOxkTy4$I{?`4R|ra<#;im!RHr+_N!tB=4?mMwVCqLajGWaw z?N82ItR69*wbtEyJe;9lus}V<*&ouduY3V@61vA?m?NL`qy81u3iQ{yj)v6rGTT+f zpm1}*BChGWi%#(a#$*|;V)_Ymxf%zv#>3FeNc`(h!rH6{R7qP;jIv`2*8~E7hPvI{ z8-#TE7;{E4vwEJVY$TUKxOGld#HbHnl=ZE<8J^PJaT7_r@5t$rD+7!$= z+($62Ju>P;6n5_f&_<)=clnnCu_L{FrFs=^<;r!ck!sAI(hWs+m@J-8n>Z{7dh>g` zOe))M_*#01D*NvDyfft~aPxGqT0u#+k;(_LPV|Vuov9H0Y<{Ge4K&eO@OR-EO2G_!W5uqEvW}{1(708z<>rkMKJDN8$}a zSY-$O90RoU`KzB1AKgiLBIq>gFIW1W<>LPU7|Y@PZqnAyDBkYnS%kA6l@)sN+|?`V z*Va5gsM_ie6ux?w7ZAs^#^B^+XOY^t4~d%mmp&`dB;A4%Mab{Xb(V7hrd$!o5jKA+ z`R7SRMoV}8XSAhtXZ>h!Qs8QK!G}8zezmWpY8O_wUR|uK9w97uE^>Et6`QE4M?J0A znYjJdjP+saJ;ggq)Z?F0W>hL8fU5==IowZAO6;hWSX|1JsDuUH$kFahB-+64S3PlJWjJo_IvdZvt?p4eZlF^T9JHH;+WX0EEhfS z&ZqUS4q0_;`?87bM-xx1U*>zQua5QmafwV*D)yQjllY3<)BY)HF`u7J1a|UdDg2Fm z@}4=>Q31HngSYV*ZWLyxhsV;#@{JQwgrC@qT&R8QF@@{^?Ne5Gm_zrj>(BbJG;ml&K59!;c3vKq;$IeN z5VoZ)yq5FtE)I8t)MLJD-7YVhG@4lt?qgELw)O(PXnV;m^{WS2Jg1P96Jzd?$o*^9 zEPPLUtmx+I?nK^04=z3wP|9>(P&fF^#t(^{91xtt+X%#A&JzD z=e~Zm)O%tNwBoU=GN$&W-6qro@E*IT%SR5Vk@Bvl>Q zaB*FJZ0D^{_gt%GvFH=F2ml6VI8e1{KrCQ8TQe}+?t4^lz_2Hp!+pb^6n6Hky;A7h z$@WOxft}MzyyyLzS5VaP(sdsy*s3lg)NXv=Jv^xLv4VLgyBfZYZb#pY0p6Phq-l%+ z)3Q{eH3xuC{06a%Z>u`xC9)a1F3gm>katg~H98{WJ5S|P2zPdG%C%5k3|#HGOcYbe z7(Rg1qsn27{{Z8rti8U@s@VEdPz-coR~RHqpE9~2D-xc$&-*5r;W@_#{nJ)u0Am4< z(yN(DrNIdV$YPv)~mD!oU3u$6{M1S*uoE6a|`9f#|A_L+|}JEM;)q- z%eazQDDG=AOF0=Zhz3s3Tw|?u7Lld2NQzV{s9v?^RkWpJYLc{Vd)#DF$Nlm7RoKQm zf$nLl>c{T^_|*P+Wh>^K9-x|-sC7KqxpWmIP&tusKia9|F@isMgnzwNW41xuW74lf z97~KSKS5UyPsHb)mr{715W_6E^~GLUAtQn@^fg*CopJe8;lSgX^l8hNvFB90yN+jw z4oZrRo&()!T!ZR=3WFs!;Ah^XU>Fb-{#9B)5AdEn ztC7lGm)vzmE4wJ%T3fLD{CaMz`Cx>cd7V4%ts98E&$WR;_YaZ4!ojdD)$kvh38$W16=~4;e zKPb6C4P#D^a1?nM1N*({sc9}UVkG@5PFK*5NZVKW9RybqBJDR3oOjJ3xrvYZ`2M@A z5L!DnOf8S4NG#9%@o%OpUPihc?h(JlYMZ!uA83ys?^Qdej#$||_=+&W+|^83e8&(! z<(hNYmlziEeqCx@A7bMeueJFLq)fhJz|w;2gmzQZe*^yjO6cuo1$R1t;OvomX1K<; zmdfpCSR?^>b=$))dg)f>BWP}v<_Q3fM(vKxStZK#D!fzpdYmtb?bba_AY2SwGY_HZ zRlKj>2P3IJ&b0h%0ACZ~2wlZU$I_`nr|jrYzhA9#y&LsC%2K^3KQGLwsXz^sySk7# zKT%gSsKC9r$>6HfbtT_rC?gpIADF2uVW6Lh3x797=WcJEcv zd_WsYp4u>B9$}v#sP-bScwcKZ*w0oSDuv9Ve-Yg?0kE)(EHlu5_3P1;nxh*#GsH=I zY4asFQLj66FAvHg!MY?o>> zX4#bbeQS*I9qa2F{KCpfEo>k_sL+%9)7;k`MN%5`Bfk#{a(3jh-)%oKTg%T8IS1^G z{{Vp?{{TwJzr67iBVxutK;(C70 zlhGWutaXKzp6UrL*Si+n1!hNaEV7hIW0A*f^IGy;PkEv<++E2es?sKYj6kkE?j?g* zvjfYK%&JU*aBHVL0yN8cdnzudKznbhuIyB5ry7l1C80>X1HE5qibWPR4GL{O8OqH zp;`X`Wm-dN=gB_9xulh{#GXxfuZcjD#Cp`Y%QfQ+w?38Y_WGZdc@?0MCvY&*#mLAw z!S}{1$F=qeuIoAf0H3)r`d5!jo0?LQSY(savGBFU{q4}&vltiH0&0!C*Pmj0Suf|7 z0@*7e;=5n?S5{kC9dhTPz!BA3FNgLwcFQE1-NY_)l@S65HTx@-_dX3rUx)EWCk~#7 zNk%K4q&Hf-Y}XdVJXa2ZPC(B9*GzsNSVs|M*6y1CleQ7smfOHq7ZIbbwREV;2+Sjj zmnCClwbDO|IjgmsRGLYyr4G(8amGb-UIX!2@NM(Ak>*;?f;kp3hFl&PdeL>@yL$$K zQ(Kk9(s_TpxBI8oroQl8+FhKNBT`F+h-CifwP!wA$Cd2OAe>(^M&F9OPadzVY16>u zMG`hM)MmX6d_Xks3SV8n%$`8b)6Nbn%q*<@H5^E8XVq?G034#CC?~E4YpG37OTU4% z+bM73vHLh_MZi+|-Npz#E6c=TUnEjmBff?snojS3@+xT>jQXDI`=r~rZH6(@H2Qj< zTHF29aSQTW%!LR$e~C}6P>u+rjhF2+zQoAfgPeA$BDVg~({l~;!nYA;V?++z{x#3+ z-Lyvh_T0|ZymJM{keY3%V5-2$D{;HnR!my#4jYlX=~3xE4wmI^7Hwe0@W@aDZU|xP z*0f{zcraXRT<4G=1N6;l3NWP`BR7e1cWe6l$bYphE>sDXU$z-}s*MD3mNs&j*qr%o z%AfG5?EEtf*SA*F>eDBZ!(&AV`G<09`uIZ1GIOlnbKFKVR`d>Z`WX@G`Ev~MG!n0J zGN&&yd(Mv*4)*bAoc zq!A-z{xz$qO0K$o)!HLRB>J3=Yb!$2ppw~;>Nk*;k(7WJpgn&Y+k;2a?`I6R#GHR9HHlRZjXCt{b{V;?pF$>~me#3~NW-oVxL@Lq`{KHXnaR6RHus<%EB z(=CL1b)84&ZMeep{Hu|)`kiWCL=iGKF5{EH^s6D^lObJ~9)_x3_P zQN(k{rb#<2lF1^m9N?UKSD2|v5mILLUplf!YilWGRUdQ=R@RXzu)npv!5%{$-|n8d zsy8}g>Eq0X@+pykV4HG4`iijym(TtQ z;n()QGw_>rPgUtnRD zZ{jWw)}fHj&5bXa4{`-`pd~ zD>eagJM%(@I94zJ0DV*k#rjlDmbKK>ANpda`qHXgNALU(4H~o6l!^e6l$TDhtj`pUn z+S0YIzVvfCSe|RB$$XioV{ISW9aWu{Gmn|GlUgW@gYK?AmCkAYJn;Nqx(#|?sV!{4 zlUi|j(?*nKP2WDS9M>J-^>SDBL#a-+{s~*q*G4%R+xb+%z!(_(t2RFqX)<7crE~uP zOifygRnw;gC)&U7$)uWZNiV$|a*UV72lq1aPRYWzKZRR{`b8rz(yB%FX%`?|{xF)g z955b2+<)JLTBSF${7xyleOKUUL8yiwd-JLO)Tffi@HLpR!l(XQkNaS0>XAwTR+k6< zn40N}p3VM0@J=a0o$i<5UTe6I@g9BZ32x#8g4y*3tF^RwKRmzp<*FC9i#)u)_vF;d z_SWBeDlmom{mhPQj!E8s6IG;;5P3hHNbo{4nI^~maWw>yKn?`b41d!TTr!I8zVvz# zar66`430){8|#{ZnZeJ^&%Iq(?eqS+PkcmaNVY%n(oR3=nWUh>@iv)Ai}(nyW3%f&M#!JO1F-oY6|$%kM{Gs^zaM z`q_CYSf0^VP6D=|%3GGQMTk<7{caZqxSUNptk_4O*!c_Lts<+ka*K%#met z(ninw)P?5HF2El~s?zGZd`HL9&o`Pbfz-8uAw`ryL7zr?`%HCB&a(@DplIsX8+V^aSBZE4H^^X5PH?rQz5Kj&Y1{{SACo;7O&{{Ss#ANInlz4emJvw7DF=yIe%SxRtk;$M0x zDLA>1oS;4TRjDnF6+10Xr$yJH($ZrzPR!UNV9qJJQzh;6 zvY(hVtUx~a{{RZ+W7A|Uk#Q!$+ZnCx0zqbEiGX9B>dbI@R;sI#QF|kby%<6=oHu8g zsC&;I%_-E3KZ{pLw%ezC3X0mzTe3Fhv`mXw zJj=HnkPljeQME<8$Om@X*|4;cwFUrVtw*g`n6A@|mRiZ^nH>KByh!o;qOs|d0mdei zKljnDX6s~kR*nz+v>ks6?54CWr{249iR4^prpq}HdewF(3P4qqwMY~0`V3f*b4!4<60GyJ(@k%~@~ot2AqBWJ0fI{e57QHtNu zN{g9(V5P0FKL*WnvpjIk6n|%tpB>(>X%ZQAQHAnUMnMd7ky=)y;MDhn(}_HG`2loY@X$f zEev;8%bbGX^YpDkq()c{hrMGZ?Syv{N%rOjPrMc!{{ULDb?YQx7VX#${!jk^UWZOf z>$x1snF22_#4|8c^8yZfn$Tj~fbEKgYuzeK&$Sz=gt*2QAO5{cbzLzPPoHq!{!L># zQkLFEkedFHM(}wzXxp*6GQ9fKQKK0baVI9O%WLT6km$73$V4ZB>X>;W++KR{~zjDhF4Jbw>8>VL6mMp>l$P1>0b<{noaK9wz< zj*d^5msgG(sS)+BI;I+>+eq!FS{6yNR%F8b=Wy?yDzx(tHXY5Mp053yNC%g`#&{UT zGE~w;@vs+!qht-eD~;i*opG%LGgoEucMew|;Pc+0^WH@1k9ypdG^v+}?!8V_b*SDg zHwvj0)(2dQ!dPmpC1a`;sjZohrm@VR5sk;Cbp9TczL5dy06)&MB9~8+Sv;#-fLG-{ z=B(RYrLDM%8+nldJOk_AxvF65)Nf7Ap^2wX+V?E$ZgXr#e*Jep3@!`!TIMxYnf&Q- z8gfoK#w*G-N#xXAM-t493ul%YHD|(l_MzdMiKL8OBgZ!Mk{Nji1E}v)Jf^i;N-1i! z5A7OJle<2tu_<$=+P*{^;{~ICyrKDFUb3e&yJ)hAx=lfAlZ{{R!wM9N1|PV<+pPvu#fcE1Lq zfTIO-{{Sr9fd2rOL--oeFCjP>2hoNNWa!Q-neE);YnAe~W(_#!88r*fj*4?qpEEp> zO>N|33;9+~CC0^PnnH3Eesx{$B?p!LYWYLxzm-JjMtXi#Jgn%9mt#6hnMQJ%`c-DU zZb6LJsTlt6(-j*G=jIa<)K%nogS29L(EPb~V`Pw$|vN zM;O5C#bnv(lf}u193IbGcC9360f??W2(L>I1r5)a##MDTX0!tU1zd{17vd`_-r_<3 z04!w3rxkTB$sEBb`~`aS=VRw8!Iyzj4?R2Ln{E|hLD!7cX|BrKxe&@QJt|1umL(ok zgy41+x~s0oHzTn-?d_VYGHwSStvRH8<7A(eF^io1kN*IC)Xq+8Vxt>sRgx_A0;@g0 z2Pz2k&0LWrZmB2z@+!Ne$AyYaeFb@Qr7p*8Uzv}26WjgXQ1m{PN=ap2g0|?b4!oWMDyHe+t)a4tD3r zlk=qh!knHUKZ&E+*MDXHKjd_()K^vb7%pYqj&j<{{W3hi6M?sM};QMZNIkG&t|Qt-Hru{Mr2g6eTh%DfTJ zwMe93HVCe{ye>%l$Z<^n0EAt++Q=z=Ubka!uTXtXVmvoR8THLbqE3ELN3Lt2Zw!=> zDV8)ohMM{U$NvB?5p(seV?wuY$cKxh_GSo4c*2jw)4Z874gnup)R#%Yz@0euH3Qhb z-}UjY;A-6}pVLEC;P!u+!}yxXlKf0T0oNe=icJzQ(OZL)${Z?8TMFMsjJROTN<+%+c;}qmyZH zNsRpu@Yif>N;({su+ga}E7|`5T~?^vx$zfsZ4V&zN92n-L`rC@mLRbLqEUK!DKh$qryf_J!*XCfXAE8jf!h{p4d%lgfwwoh+*l9P`(g{AzZeQBqjQ(|vtVyXxON|&x>C~;-;}z{M z;w>FcTTrpuf4@3bPNCxu3!pxBqooXg(_|*5P*&(=PZu{WVA=9+r#`DLj< z|yR#&J z5Mi3^!ql|B55b=6YP1xkqw_ceECf(Q^bTxBa z{==F~i`#sT>eZBQwA)p}-oi)w-D}^YhN9E7cl9_b)~Ofqen(fJd{UOuGLz^=WoZnD zY~&BXSD|R$DYDkzY&uVr;dktqlqcr9gbzy8wD^N-VxdyH{ zDofskZ2e4LCU4@E(d9~@b;0~AF5HEU_#b_{d)9~Aqdgc3&VkXD#z)^h{p;^7sq(rK z$~VIx9R6aa)u96J-Hs0ilG6tKe;4kU9DPM8 zEe!4N8u9${bjCZ>X5B;Gvo22FKPs~Zz_}z3p{Sc^`#~fS!;XEaNh4IQEv}ygmhr!p zmMRG}^QOXp{_JPcq_S{rB2qxY`qhBr2P{86Ynh{^C2b5VZ8?9m?wA9BM;YnGXnl>s9pNr%ZuzqXE17 zn)529W4@U#-Rc;geGLXZr#)(D(G@}5G4<<8RZnn7*1YK?cB4AvY*0ZYgF{(#V29c^r1l zL)>@J!I52WCp`V($Kg|3hZ9amcj@>Fl@mA9jAtZrIqg%y8%Wm}2OIIg_7&eu<(Pbq zGb#on`@^5kl~IFcDV<06TjpQNki{l=$v)zPxduw&x_MJtnQY6}8vdc9y2)$4-H_+a zvY4U#MS4eszi1s_K)Y!rlHXAC6eA_{J7i@Yw~n>t4I;d6yCWdgh*f2VFnJqBLk!oK zUk6Tl+Q(cfxh`31y#D~q`eR7=yWpL9)an-38U@_EG|}c*Gwa7s(z{O&NqarE`wdFh zUa?Y73}gU$405OOHSxH!IUY={81x5(#rfTZ?oOOspeeknx?# z`Hq$H-jDI?S%kXkqx(a0ZS$>&j&Jx9o*KOaz&{;qquFmQ+Fdhsf!pl&1JeqASNto* zt%9iyWgR=O^8WxU8ha*}G#|uy-Tnl$GprQL?+9UB5bhnXTyFC>~w4%uwBt zlMLUukpBQf{b};p-M~rP<^VqH3^4k7*N-}G8rtZ%)k*TpN7CL zcIT)bwa|cKBuRn~-E`$mHwi~{DLXz^^(0GWVUnx#t0`p=11PyC0D78*Tq(+ln z;I>CEvXkCNMdifZSwQ-Va99BYdhy#e&xoAo37>E()uaTUxB>Mdrs=y_&Qxr!3k@;6lS?1KHyO*vfjKl`<^DrH1(p*Q599C45 zDW2Uh3FWvRgZ}`pRy_O;oqema4$S0q^hMKffPLXnn}o;S=Bm8-9+ern2e=h-spb)) zT6?4ity7;+*!&-txWxu=dy0Z-RfjAqnxk!K&X`k4>V`?HDbFlDv02xenGTc3pD{S# zec|a-ODO*Es?mlDt zhd(#_C)T#oSY+tmLz>#1WhJiWH7PhfHXgAd>InLpe~}R!07urCBtg`JNY0>wj01We zhPu;=T@HOmv_4qMjEakSAA2Ncw^2_^lwbVpVJ1d;Wo zl|kxiBQ#O5Ws!5}dKwMwxm@|1eMhA(W!A-VvQWv6gqlMtf6qMBM*iQtgFcw3-u~O1 zKj}?PEt>oaRNrPzH1Wv0UM<{ub5zWzokl$g6q4#N0o0T1wNhdQ`YF{ zr%GL!JIti~z<(-&a)nzRNWiKR>I#P&8HZxS6vw+P$%yfjoaVXJOTA9jLP;77u>}jk z0CdH1*EYUo#lc=O_5!@ym5J5Xwa8%~1M>N(A!GZq(mm`l) zzH3KKIlQ_(@;ZGhXzY;^ZAGq@{YtmmgTx|_SF;Ihhc6%=Y$bb>+*H^427`NLELPft z+FiS-Kf0VWWFPihr|DL;rYBCB4@QZz$6hPXd`2Hv)LmyP@y7}xDy}dwTho(LvS%`< z&t{#!uE#rlr08~9o#mW1AKFO^U|cKXetn6dbnPxRBzSFD`m-tiwO{PZ2BkbtCkU?IR>Kd08uDK_X zSz4l(vSG>NC)T~%HKd}CK6KM(nn`IQ4TixK_Bj;fSrlXJF_Z4XwyzUwq0b;^nx=E+ zoD6yz^y$^+vN-2X$C(R2!-(k@PiAb0@M}}-E;tf7{{Vp0(b!0FnOJ@m_WscM4pSs) zvaleK0pq1wx6|4f)k25B>c+H|(c|Pu!~O$OD*%3HKljB7enHFTQ1WH%5`I*kMBUOq z%C(W>$NIS+=(O_LUy{k#`mw;O=4+t&Op#*qk>*Dmk<*HiG`OuDW?-stS7`5=xYqY` zj$L72?#Dzv9F9+!!S`T9IN_&+dU9r7Fs3kb{7S>KxUS(NTdrfAQlD0 z3KQIw#%b>iQ4m@P&Uz}2O+N5g*uKx2X^-$DWPUYT)hB7Z$kHf4-~tN}E1pwG+AwcY z(&+-_K6HvOPu&ZXk9wXz55mchln$H@pRGDEa0U$OGyecg>eO(~NBn&NKkJ=y&PMTz zL@VL=NZL9NLVw1tTWEIH>J?&KdLDwJc<+CL@;*IdsO{&}-HM5zkbig~#c@tu4w$FC z%`vCIFuqJ={V7hH0Q<0~(2AmE)W$J10lh+Ik(*N|o2Z6;Bk5eu-HqcbZp)3Q#G?xg zdHPgOV;E8zD96!xs7h)$=IwLs9Wmm*RV>%I$0Y0=R&5%od6Jzr5;9_6z+$3(DhDLA zV0Jh-svp`Dv2C|b>+7=_`cy_)8DP+66}tcrBzx4! z5e<@a+nTzV3-b&L=V;wa%*dM6B^Z_!Aa})7xzTNIBMM2u=p1fQ`PWPPvY08KLMptL zsKYtw+Ol!(YZcBm+&IJ6T@G$HoM{f050EDxASe+ zs@#&uKi!ag!|~77xLNItL^cjR0qIg$-4Qrf!y!L)hHJ~MgrzpykI!axsq@E{O6&Uj zzDLlSf5!g+3hLJ0b>O|z<{%Vd0dU0jRs#p|{OhxhLdg?8N=N}!BL@fCzIV{&2gC6w zI}{A6jsPL6=!BFz#OLpAQ_s@AI}=I?-Ycsvbk!?zw`bqwvGaDLeX892LAkq|S-iJc zH5P#*g@9iykTINgucCY-sQKDqj^s4TvNCQ}b5%ao_&{u*34H*7uH#KMH~;iGaZX)SJ{ETR%fqq2_VRmirQ==6?3>J*r34 z;)W)X&c$A>Aqu1FO+%Gc$p`VTo(FPe%n2g}X@ z=klwi7|Lohm6Pk(AxGOyLMv8%7X{19eC!aQNWcd@bInPqYtU%cGhSaxw)W7j_m)G2 zQCz2i^eMbA;hi^3xzwiA=e2=gd%4CQQoLl3)9|iWofOmfD@9S2ofm#k{MzB#pAH zDq(gA`?c^l!T$jGC|83#BAUL1<0F4(CAOhA+3s!a3#^f6g338o^%&zd^nZnaXc_!} z;H&QucxS96%;+1l^hkEKH;&Pd>XRgd9~TJu%#=9zh@+ka>36WgSh7Nmcvkd9aItFJNbAji_Y z*>iO?RMKr*Ve-zsHvUywNdX7t#WH(>yqN}ml}2l+32aCRtQzii!kzXXJ1*0a!S<_~ zJ;-R<66bt1P=K0PE4_ z0y!d|WMI@Sqqu-6jzN%pJ*shLzugD$uU0r|$zJErxkWoPQe{GNr9=LoN|qv${OXj} zyK+MUJu_PkZ5Q`NLnKg-;Xz5^+d3)LVuGY6Y;fQOYkPtfLAC{!DwgM&t`_*bg041YzQ{8jHsjV8_p~#Ujp5$<9 z<4>1zcN& z&SqUQZ7);%Yyqc-%JQBM;!&LCeR}k(+D!4wEyRh+5;5B!QQEXxSo4!2jC<8N?XBgI z%N*+j{{ZvR58nEY^=cM7T~2jr4%DStZDvw<5V`#;&!m-P)h+6B6xq>r&^EV;uf9BOR;{2OX(3HU9vK zxg>&2U)G>!JMvFGF092IL`&)m@70<0-wk<{WGewBwEtBYJ{{UlBJ^hVXlG^c~cFvsuG`?Npa?k=k zl>Mq7Arw(aqy=A-+|`ALc1{l!7`eEOa?q$B?E_Wrt|z%AORPzcxSi=rvOuXeOL42) z*<}FpWd7=opN(VP-(Bh%1&mQ+?nmG1YExu>=yV|a)REZ*01}}4S1ju`bwZMkr8|2~ zy#mMXd94_1+q8v;s2Ha$nH+CXim6`K(+Y;2(5zPuc*+!S9Od29r9DE#8{iOr1Q3Bk4}|>RQz12ig>Ws~9xU zZVA9e716evBB}kvpOr!OMqG~~Pry~CD;VNQ%K%|sJt}V?P6%|}+=8_%*B(j2L}YI2 zy}uf%EIO6yu}CEddMb>5wbGw+HwN}O-D1)UtKTXp_D3IC7mdf-pKP~wV{{j?DgNsn z!nRYwy1dFfyEjle)$1kKgVqA}`mT5}||VajDe;%!pj9&8ZYf#2?$(}Tro?TM@< zLVvh&D{kAs*NgymR2{*s8(##*jfrF)m=((@(@Rp+UqcQpP6>cJXNy1Hj;$)kGJfvj zGwe*%myWp4`WQ5!I>LT%jf&cTr;apb+Ok7HgmGtOyG|!xb+;>7}!B_H?B@W z$9n9b@I;WP+U%|U)ymbw;OmD3xp(voqa*RfaL&CoyE`EnT*}hsMbJRW+@l}?(C)9I z=e#QnFj}aZ{o@-HbWR5??OsFSuZDBn!Y$V7;6h(5yeYuWa1JZnv>h2V$P^=Qkp1U= zN&1@kT98wlw%281t0frsN0P6@_*+=kCc6IsPr8l3X(Sz4hDjcn=quBH7TLhQ5Vk-A zh@ImM-0%%9rC}VGdXC`AB-@ux{9?8IEpH8_h_kd!vBC!jzI)QCXJy;`&PtVKI$QSq zjX5nNAHWZ_Mr|Vg4DC2hkkOyAWGDDnKQgbid`bIE{?qRhAX)#DBa00NHEwE)tKSWRn$?IN0`)Pa$@#W8hExc3W9}>Q|sNCKxbJ{@`+iMue`B4vW4;kY%$2>L@ z2ac3lP>f{k?P1(!zXC^lV7n>>2X@dk;1|#V^ERCpa6zR=k;h)Cs_el?pui){>_ z>m~&Lhf0)zo#;!D5^JYsmt;Z-^_+QiUuFz^o zN8J5u%PxM=GVN2G9H||M=xeLcwT9GeRIqP7K1o0w8odl9dp~FQ9&SGg{{Sz({{Ztk zkL@KN^3OkwA=N?M-b#$m@gbDzY4E3&`F;%&NJ(dDywQ%pYzau2CGe~TK13}dYac#V;9o@0EsQ! zR#RKc8VN1;sb9U7=i0pX?$S*%_zA}3F|1;AA0fxDp{~YVTHkI5%d+fwYt-wg|4n{Jb-0g%7Ymsfm%lHN-J}{Nhsf;(%f7kpS{Phs9N$cHuL#c5#Y~? z);=b)(^C2gtuO9RmA}blw|RfMjC$8^wL>zV-j!=jB#pXiQkY30Vv(^V*D2$_64>ZE zWbj@}Qfc6INWlBq80%hn->3-JI<~u4pSOen+B>8JzQZC*@L)53&2Va%cV8eeS7iq}jdQsyHK zI0X4F7>ZJ=;ke@mJ%~lU%8p>&^lE%?+_Av7KZ(sIt>Q~t492lVboVbvd+VoQ}|V?$mf(Ko`_|yg{+sGk@lt?pN0IZ zNNE~*!21k1{{V8SiDqnIuN5e{nMYI~g(p=L$tTp2yfvjc`SkmL!D<;iJE2AQ?4k^N z1tz5XRQ)oUr4!D_AjijiR64OVROjFEA^!k`b4hXvX{nD=F;Xvu^ss;A9})W0t0lqC zWKo|*r|prx{H(*!a4Q#88mLRT5YY66ZL47ux1z3pT2Z0dhTZmPAyND)nKd?9M%9$B z&{7{W1NWytO2y38MfVWc+QxSTOb)?CJz$R?C_sI7(=jLf^*{H>rj(SPWh8wnd6{Y` zpUBAm3{v?qx2NY-ys0_;;ii3=xWuj~<|#z9&>`m_8bbtRj9`5#seyqf&Litf9kjFY z5R!BL!2Y!kW~vu03uwItDIIOlWRwHGYHNQD-N-X6ivpg1ywz^sL$$U8ar34h^43-a zkHfWd%MlgS*-^QTDUFCoE!uYaVShS_{5dW6Hxg|_&zBg$`V-!|U$BK?iGq*9o**Ec z+^#AGbLbZ}YCSILj!6N3+8Ctq zEp-!t&o76@FjQ2_;pol|3HBzsHPak$(*g7yDgOZ3-!=@6euA@jnKijQr#`wYKys=0 z*OvT$@y@02zedvM(e0p;OSzghnpn%MU<~y2HSVWOMk8@`(|mYSuwE?CKPJ6!kw0ESn$`saZ(O-o&{wzt(T;S$Rn zV+sL3DfYfemVCw@Bv)K_m5aL7mQmp|@``S12XhQ#=5 zR&U*%Nv2hie~Zflm2k^ON=_9@8bSjYSZj#tn<*VIQDB7M+~^A7(N~^}xk_7D1S-wxvl*H%)!k zJj~lIj%Nr$GqX>(bJ)LO4-9G^75$sE81)zhy~W(KUPqh~VsbVdk6d)GYVl`=wEqAZ zXcyWxrx=>f@*@+Dq-UWXgjbh-$GX|p{{Uy73M|A4>>_&^RP|FE5s%irnWID4es$#I z=Ii3B)Sm87^E&X9-VP9q9h_}v*G{MBzlU$NKiN~`(SuZtpG$V~WnyqHZCL!W4{_J< zHTV9Tb7`vB+uqsCmll@@B#KzuA(V`RTnEOlf%-4TZx5xOpare{x!-AigXf6)A5}fY zcwfLDh#IfKe;m*7$Hz>`W9IDBuH<4-2|p-P{l@(bc(bFe;(m>es~#l7ELA`Enrq)* z%@y{Sn(DvcA72<(*Y%|G0PcS}krEIO1M#M`YI+QPDt(ha0Z+`2$j@_+tu)*7&rfkk z%6;+cnrS3)>+eh1+|^&WhS5h=TvRgJpc&dKX2(ne@0yLm<2WCkH&4j8wz`!X+Acr< zXSQk^c?OH9Ey5GzON5wbr=wN3*^lAR)}oanmNt#HV2mC`LW550baZnmCmCv<=JQ<= z2h0sX*ElEqYYR=elMiUq4>Aq`z&n$%s}1H5L7eB=MS75%nz}r>P*9Uini5 zj?wm}+}#0e6H~~|?-UX^8OY$(d0h@i=bE!6wV>-Pzf9G6VXz%!ko^rF&&?F7N6e!m z`Am5QJ9|}EpNwZ9)%La0->b-T>}m-#-7N60IQnL(Lfuifw2x=^8P3hlI#o$B4_efd zP1BBXJ;p^=eJ01RnLaA4Xr~`PxXP3-J-vk%cjD2uNCQFYM=AdR)~o*j*tS{D`Tqd# zs)k(#%>Mw(Nr&TAV%11J8EvPH)kp|I>5laYXq!JOpL|w)whyN%Xe4PN9aXt2?kY1o zGGsVpJq-d>)73Kbq)ZRUzfdZys|g%Nqw7#K&Ie*XwOrgbIu5<)NVON-i%^=*#@0)V zmx>4@8F?f)3M=M6j+)Zz_cphd_t3tap!s%r`Nlr8Vn-7SzJIy-iFoxj@ zvnNsRK9%yP#BT&$_-^hyE2-9KC*m7`f7U4K525z1>@7G-e(|bqF>O6hXw-Cze-P+C z7rL{U9Un`EJGhcH$C%+qA5qP2!*LVrlg$zNtCeQ;BDuc`_>K<|Y4$o+l&0R=+R!D| z<;KhmZC`WiT@+T4LVy)kjvLGv(}U88sWo`F~%u221GO%#4?A9JYGy>s{By zofcb(Sp^% z_w(96OZBp2ajW4JfpMW zixy|)nDR;QUr8T(MtsKFmbBYPyP7L_qcYCy5+q*dJuAELbhmyB@l~tcLnV~4$+B3O zV<>n(TI4)9!aZ|NVh$GJI(6$_t?@l`f2YjBi*mWfN#~_1k5wM#)Y3h7#9Dgzhd{ir zS&U}kHmsh9s5SGv5zVCCUO;8d%xuv!V<6YLTI!-bHVGvqLPV_IybADJjZ!=PYW{F= zlFH7R?kkaNq0rh^K7jZ^eI>twt<*RJ&3?Ul*J?FcbAceQGWcDnW5ThZDniWsel^%7 z@{hc$&!D8`W(o>godDG*;0Nc}P&Enr$VPodV#j#_Je++hWxKaw_s{29IhLuV(ASRQ zL(9ld(~7ed%MarTO4K{R^6vZe;BJJ*ylDbN;o* z<5Q=Vj0xoY>o+D+rxRFRHt)Tk!lhf2-yGILtau0Q5BQq0ilw-j%_mWz`zW(fCt`85 z5mJd2hs>lCT=d$+Ff%M}*DgPW4+H9IxYrB+0Ihif_l_<vew!d$}n9{o>}9C zf|vP^@PU*03Z4sFi=UWAAzxWK{#DLf_>$5b2(6?Zq%bG*s~_-`#(7v~{9d$lsuG-0 z)>-J5%E9F(cLyNJ(0&!K3~;Jo0s-te73YiMM=iomKj3vD_>xlGdHniGT(iSedzz_7 zx#;T^xd3DT03M+<-9K?}v!>(JoY$Yn;z_{9ZUOb)D%?IJyJ9gUx%XjQbD=f4v~2cg zc(xW}AL#Mp=~fcf+AsiuAbmpD5f-(4bBSf==xWqDz45v>c4tmSD0fu*(zA6)R(m6Ig$0 zl(#dvG^Tla{h`Ob7GrQXFhS}~eBb+BqP+eVXq20%-bW+>lALuumF^}VJef43?fX(yrAAw|ycIbs!1$-9DIc|`6Url*!bc%4j`>z>v1LT*sLp4H@LH+9?cH;ht(MuOh53~kLz z9-!;h8x=u!9-w}7h1g^Pf!~_*ZqSq>`Wg+YGvRPRC!Bgz>8qFH=d1%Pj>WOpfu6#e z(#Sdy^v!1G?r$748ai>R73f}+YQ*!og>vgN9;D~fG}d#}6OXMP(n0lD9bfjQPu_3N zm-|c;$#2fNBOyK8ig0pIM*QgZk{{I}-0J@Tv`~-Y$JUSS6dZ2A_iE+Q&!PP4WM=25 z<43fUC+g8hO1heVdylO@?dZe5J?ou`&VPgQt0_E-{7OFxsLj31r9vxXW9@)e`Bx$1uO9e+!IxT1&xj;MkjU~{kr8$U zSCTQ%@(1Hu#-y)ktK6z|tlhga7vP_UC-_<8PZ{`DD_gek{ngL*rk!-@nAFMsvOV6Q z{*~;;MB|^s70Y-N##Y`v@YSxjVzNbRa9AvI@XLdY9Cxme1q+?Oja;QUyUA*D)K`_- z)UB)P_Ifph*SC@gEutA`Zl6K#UU=RW)4nO{x?hNO-69u{PnXQo6~6vIy#Bm^K=!Vy z#hwh)JYe=`Q4>s-vU!OT%2f&f0B8=ieV6QNs~mBN)L}yK1#`*D{zq*IMwC>kwx;i{ z)?JsI>*Q()Nf`iAG`Jn{?O9^>MI0=C+?s{;DVMBqlnE zymM3})b0rYBxC*rY9o7dl6J-T6ItHu>r|XkW@v~REPn!O7PKz8+;DsIP_@+kcE*1b zK)tveZA^VlZ5UsmIZ7)=Rk?t|NIQ?xsLG*v1+(sJM&jmqLE=BdIjrZ3bNon8(uYO# zMzF4}t07C7H0k5HjFQp)<tfH=z0%F9&Iw)fFvCD-xDsEza>^CIT4+IYd^CYWwe zWCiAvU5ynqYc>2NfMrqm8e?h@gO58S{nJ@S-f6B6NWJHk321b7Lc;NeroX!~f zA8|}F207#FR+}m@P}LQZ`DIDOj^Or3)nvq<`yRzpD1$wK<*!#qa;`QjFy43X>d!vDK z2vptB@rvrB?#r98NnJReQ{emG>^*wU`rNRwyT!cm9)O;Nel_bll*MfB7BFc1a(!z@;*R-zD=LWoMRmv@TCH}A9<60BLy?SqYoPJX Zt6%tD?ITl=6|lUL1zeTo7HHn6|JnQfG$;T7 diff --git a/temp/Untitled design - 2023-05-12T110028.218.png b/temp/Untitled design - 2023-05-12T110028.218.png deleted file mode 100644 index 8aa4dddefb31c9d391e72062af1f3ccd068d190a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 576537 zcmb@tWmH^iwkV1RcZZbH=wQvFi4;I`nd!N32 zy8FEMe!P2Yj5Vri+Ba>@^~qe3%AaH~P)Si?U|=vl%1Wxhz`#?zUQftKuMoE>l=Rmx z6h~Pt7Z@1Kfxj-;6lP4aS0JaWl%}hMgPon3y(^5Tvw*39q?ojoqqLK%HIPq$hs9ce z8^{IZY45O+^I(69+psBU1-sGd52<$5*j1 zFqW>ajsooL_AdW`zf!P!8ac9aumRX%1U&^_>Fms0ji@~BZ0%hHJcStkW*2z<{TG;> zK^T?lA0I(yQ*!|oN$G!(zWx(p_=j{44-YmEE;a{e3w92EetvcUCp#x6>njDTi+eVU{CdzXd`0>H&-DBhJRB1mrAB4{~X5A&Dr+vK}=29 z&1}v7@|DZ0AsqjvrKyR4tCg#**?%Z5X6yPN5L>IiED^9ZvbPXo@MJYLGdFUxb!8A% zxBr{pl*-i1#md5-ij|5(ko_+OZLR)A!rzd87s~$sF!bMK|6=L?PbY}Ex;k4K|I-nF zMe}cn+h5*e`zxkKrhmBt@Ncg8FD?NwQmB)yii;I)dJO0aE^>6p= zE8;c2T>lo|SD1j9o2#XRv#_$6tC=&E61|b5gUbg+BU@WDFG2Qy!T*Pi|K}O`S~vbJ z9se#m|5{xBHE&*t1^!MN2WK@02U}qYBYSrvLH7Ry|9>g@&non{qhHI=za%jGKg*bK zwR93L4DyigM@ca?&#d2_TJi5be%-slQwLtq(lpeU-)EfcvUzy8e|sZ(7POIrGBSRhC0Y&+z|gP#YI0r;hrHXOd8z)ca^j;Am#`S2iMg<}fCv;Yb!DmUwH!@jA4N$pb~-u6DPywZX0zf#g;@r7;3d zVfExr$GwwSaLp6;ZVN7sv5*K&2=<}ZmB#?Y>WX5;iKDU_OiW8Dyk56)x(^nG_Bui9 zj^Ia7eMWuf(I{^c^(bL>zM?5D5wm<2e|H>@UY=ZFn19kH8xe#J&k~i-Cl4jx8$RcDd^QPb?2~JmuBXGC4gdD- z*(V%?iBvQtrFitzpn|~%tvu5mPW(^b|L`$lrS_RAvwi=pMjS16Ie2K(ICT@GlzVd5 zf7-nhA8>)ueSe>`oliR?cT_sM;%jjEkYhxC_k3aDfBOiJ6|WT2FG>l26CP&}-~(~+ zze=nTCg&%=&O5tw-3&jaynX0?!P|Jc3kshMh0KOJjYWt;J$}3UsL1_N@$~fE@}+o6 zrk@U7=I@UED9{w z_tLL2B@jhv0J^F&a11{wP`0$2jGwF@pyFJNUzFUZnOqR{t-V<45Da~|<&Am|+W4pl z;G$%hO%XH>EURuV&xk*8p9HoIjRDGLRPc-ydXwJ`g>T2zh>*8#-267CDscPV@$%(; zDqMeQ%pl{gYhGHOIX*DzKoV^Xy{$kxIO2l47h>p7yGI=i$~n!g{YiQ8fY!!XWU7{D z&y{fS=>qE_sb=_*)w|u=Xq5d|RCkAWJa1!b+sHLZSF<0R-fkm76>q4rR}rYajUY1? z<~BnP)#2}SuMf(XlEGVI%=lC;^hMUjCJOa425kZZ~ zNCfvh4=!+1?B8*wr=uRg#T=p-m$pvHTy5$iV;!{~Tw@noV?*>~F7#%%)ldJji&V4OMk95gH4kpg;SMA36c(_m3-C@qd z69vbPtFRM)E-{2lsv@u=5Vc%or}I4(z6m*+7eZ2CI&OtCU8-)V&gMgS(CsN@U|~lE{9w^2IF3&?MISq*@k*QCSHTr;1-6}s>|d(P_oC9^!6?5S;b*^27=HlS;)3-q zEv-l2*Xlgbt9I`_SKUO}@s>L`?ZCW4F6OydL3~Comi!zidu+ZurjGLPr0HXW-j-VKfC8&4(XUj z$y7HN&L2G!L@wt&I~voctR=d4ly7)_1ZoGqcJ5R)H%gVtTiz^PJGD$|^BaNfC)kb4WyhF&3Pf*O7il;4> zj(N)@ty}G46JgVUfYGdesMMb)mguc!1S>91r*h;m{{wmXN%vDQQdx_O)#?qQt0|iJbJ>&XL!I5YMhqL$H8`j1D55v?(O52z3csWRmRoGX zKD{RTt@yj4xGt`^51ZS1)I*pO`yN7+ac{cXR~k01vT{FVv0LQiq!nQ9{Y4XT{HXNWIEjK&I*OEBhG9J0^%G=k|7YTlGz>rX~}3{c^)aF7L(TJPHbZ* zyW3KHj6z~j8fv!z|I=kfx8B7uS-tqk1W4arI0Hq_M73!hedyNZFM2?)`n?5Ty^hEl!@~|H^zExDpY9ub{DW2M}vS~Zt=$^2)o*a*U z*q)y?wHEntPD_>StV5bX%R2IJi8L-jYaZV_4f5Lhh9S&|CPblNKZw&tTe!6PdjhWd z2}%QS!*6qK#QRV)*>9&EpYahBM`*gIH2^wkE1H5Y)#jj13sRHn@76}%ARP|1ynEHm zVV9#z_Ic8`URv%vnPyYRS|C<>b21pX4h$II{AL7L#XK0omHTVpl?WbkdQ;A+Cr<}o zVD~R~RX$XzWdufp%OrO%j&Ig-Q-M(h?-kGMt*XKHFz-S{(WRrrnG_pV6)povrta6> zF@$eXe!|nE&EF%flswPbD~;bs!c$e|vi?Cwj^qaVFog=nB(oVYP0%}{h!!S~CB#yb zeAd%M#@$ybfUKRcJZQ_6ndHH+@udfqV3BLG=4C~v8= z`cL2v>}*U*kl|p8lo&Eof(+xl9)H*`kQ_6c?^4dTk<7+%E4cgnkpPV}h0LcCJS8QD zi4rdU3b05<{z70^t5gvH#X0HdGq$*^#T;=KRt2K-d1q-GKo?0k`t(jE=ECgPk&?&i z#@oeH0eA33uQaM_wxh?!`4!Y7#ykJ?vW-qzwV(|cck->viW zL?g%OD6owDY*QyAB~OcT!>%Z2mmyz^aso$_o zW-bruHbr{e(Hx_tiJ9YG_VG`4oT>D)6RyU4wi`)RMt#Au3Sq(RbJ;FL7Ow;)kcXCn z^fXotq#dGj1LoabGP_`e*Z(}(aWz=F+XQsj1=&_nT@+T6w*qD(Bb$XYr2rM6gg$`j zeHWxx!o)a;CP<8HuW|tY073L4f;&e$;vhaVVssEACx>qpx~e(~xBM_`Mb<_Ghn@@A zzo`o)1klNRC_F}}2>`9iLDN99yF7MP?G+$Hcd^U!g(wlz8PBG@Qf?cgV}tAkf?G{& z&=QinAaAOo&Xb0H0BdN^2M>ry@O4+qye_d4gDW~4RV;0COah8GHS0FxJ1UeGPHVlN z(`!-ppV~xRL(&#c<9=hkac0W;WI;ih>*+1vB#=E~GBjp&BL^sdC*=g#`YDmE5wcIH zDy`yJCP0$B5Blr@RrzBnD2N%;?;$8Cd`{W*!8xFNZ~N&jr7-!@TX@R-yTu#w>$e-e z0`)6Wm=K}vr(Nc*_M-#Uycp?ew0>nitZIOdfW?GFFShX^GAYWY_=Lq!9bi|y?D<)1 z?TV>P*;sm8(*$06$O++#L-!M@@fNq~>->HAfb(m#j;@82%&_t1gH!ktPg*p^w#Fvf_gt zNA_CH5A+BI4KO0R)ze3&AcU{&^Q@_d&1MY~RmaCYO+*JucELKZ56OJLz1{tk6hySwpMolH32HT$!>To$6|z8;9p-YR+O zm6IwhaPC%Bfcq);$U?ddPYG!KgqXH%7qB*7fXc?%^z(r~&0RlFTYueNqpRO|rD=3# zXz$@pyVmXJPc#F#;l44{YCosOCpan-_i+^)B@}xV+txx9_u{nB5sXp0mswD?{F-?m zEd@*R7)suB&l{=}>kIk%VB(`TCs+@szVX<)W>4D}%8bclD&vaX%Z>+Ul_;d{1A`f}{3wF|M!3;U+Q)LwAA&sS405S0o6oj7#~O?=iXT9$r_+h3u9k;NK<(iDk|3- z8e9PUrtz8e*EBK*6-S#iFWoz&U<5Vu9Vrv-IS!qZBy96;oz9Q6MBR3)KmFN2=Y*k? zHhUd}lTL46F7Jg>2_c!29Znkt)+?M#Ivg~knn`r~U=k&huw8Kq(dfK!0e=06Z%ZE1 zzj2um-yuaLQVCJN8=~fl3k`Wg48dMJjeVDxjB6fxaWIAz@Emm7{V+CrCq6L<>k*-T zpu7gc$C|4usDTuVZpg5R62aA7`~nmsa#2gs4y@ttSDcz)?h&zz>#E6lj9%!fg%v>oAW{^GiMVow#))87xj4fu0#R8#j9$`Gp?#E}H^`QiB|d|Ku5|w3 zsEBjZGJ=WuT!SPw>W&0zNpC{nH*8bb(|n-mKWIokg-Xdwt;N#QB<@nD=(T~AsJGIoVJ)e`r~dXatV7!+ zLRcxJfT|?^n{O~2p?T?V-j=|S7G<2j7$M_1*iBZ=rY>w%X=Yt1)|}deJutvGk8Lg7 zpA|`JprxcC4N?0vw_yv}AO{z|{ffHnxobtd=$04)=A(o2i_*m5BpDZtIQbHThpqmG zJOrj3Q!p4l1xZ}NF6dd%B_=`#Xqmnzo)%OG^3jH$VYJ3>YA;UvT7W&+;3mn%Vo`79 z#;+_FY2FQ}h$wBIR*i7txc{ug-B_%h^^o)r>9b9cO z&}pW^L&dzNBbG&C{(UcdC}6ztoz82@fz6a4>@yCYbLn^5-YP7TCJ~9F$BbKKwMGb=u z*+v-+;)ZIQ+%b=i6tY+hFhiVhaA0_=0Q2D=eCUEXY_dCbipkbF#G_O@hr_Dh@5)2_ zsN|@IB6bsrli#sv@|08((}4BzHt5vgbPZWTIR(~6>A1{`e)Tzy0E-8qLx9FK!ff3w zwHOYIFsEOf?UYtbAQM4UZSB5bM89%B=k;Hpqtst%{w*fm8!n4JVKt9NzYY!}DZ2uG zou6+_`dQdKR%?woFo*c@fbrXo@F?Rx#D&KtXSrJ=b`&wT4ZYtv1etHil^AngVA>~Q z?$s!UX`8)!LaAQ^S#5m%{D{kfFXCZy*`PJQ9^lv6{3HJ3v4x|QHaCz~PJZ+*wIR4; zS^wZmgo2i{|GIrE(D~O8cT&X7E_X88cUH?wXDq9rhq1OQdR6G7BX` zLorok0&R#f@M6lgS{LmQZ#SU-+aqEnVj3=upatSmv-yPHhUvxpLY*Qew_hL`eDYwM zz1F=IC_%k@oYgROG?Z8zgWiKu`}l)#H*M;)iI}qyw2f93A~nqZveX@_&X&O#&<2D1C`tzh{ZL)Q+))e&pUruour`}sWFAbEn!mja5@=}jh63M4ur>*|s^y!=DpS!7hDynS5H_iHe3WU%LA`!txrHDj zz(nfUj_anNsVO4`Bw=mqbJSNZe1b9YaaW#_e%9g~^g?2-P!*Ss-LDlud0rd8M4Dz) zFKXm8&+8vUN$Mc(ck#4JC3sRPZ1nDaTdg919IjM(qcPe+$D;AfkK#pj1?nPhIzur` z^{1iW{y4D2=?H?Q&uFg;8xXOMuq99}L;H|lJBj~RVXe4@X8na3v0_!3MXz&2;OPVu z=*}Jy@Yu60-&E8qrnm-IZ`v#Zm3Scy%2tiCdT{dI*(JRyN~_xx-e2iqv^0^N$No@f zxsceTjIYmtFh0j?R~z-7G>Kbc@X1mr{H~pCF!<72BrDFycmRe2r8h*GOSHp4=Fh0@ zWX)`-w-srToT)=rQ^&@y%w=P=^2{7HC^8!RF_>LqRgMY0U-Xcs?H!wmIaKDljMi0% zA4_+rPZ-Jgvh%BPUOdH=rW#YLl$6Tew}kfQsBZ$Ou!&2!oT14zl~i`RJl~rTsTbhJ z*AV5@&MO6A<*0RnQ~<2HLqSHvE)+g!X$V73ro^0t((e$}e&)%vchMF`M1V^wO@tSy z3dc8#TP9m}K00JH-<)Q#>cl4l~~Xs>O7V zh7@@bzb~i+ep{6rU#%()#_@dQ=5~ z0c`!D;Sju%iylDF9)r>!ClVq5{Iq%g%bPTqXyDCZGu!HnYT)N5(iE89B=pvu?Pg`l zLZo_V-AayX3#wxzHdF5`g4&xrzHUQh$wU!u{T2jST{!96S3XDSxOdXGoN|0WU zt)CqcJj4(3L_O`1Z?5Ok+ht~}*3_;<9>E=$f6GxgPj4kx*cZFDx6q6sl`=U=IWd8Sgp&2x@ zW`@+<7wX_9IRf*6P)GhBU`OD-7|B?@ueUkr^n5XJo-Ke3;gu4V0L zg7aO?1C==n$8Yj8v{CJRIfjsIrV(7M-ue|llvqjiE^ogSJLD;U+kb=Bt4B`yLK7Y@ z+Jf?Pm7mGN<$~?IJ)=B)x-dz$rO6h!LalxwEeeaCZUk!KZ=CF3#^>~#GzjmGCEr{) z%(`}a#zR-1^-JWge+cJqjnD2n#i@2#=< z)3)%{;Yg+K`mF()klPw}f*wlE?pWC`t!Qohx`^@wAi$EUd`7ASQuj3jTeJX5Qioe> zn}GTI1|GZ?RP94G6r^k|>w;&jK1Gv-N>ZwA!nNT_(1x9>`Fu^cIr5&q(fL1f_F-n3z3^>W1DSY&&jlGP#WOS*eAg zvzE^gNvlH(;|xC~RSqRIHvN`^Y*Frd0$Eem7d9l2sd~$79QkESwO$lP8W+V|i1EtN zR8`5@OmvyEyRwkl(=45`?dxv3sk!gFG>>=Br5ey?oCA2|Bh+C`BHDoTQs3ppqk@Qq z*x1u7QvN771gRWm;b#u0RDE;XV+Af0rBGsr7rTtpry^#4?^7f?YOgL1N~LSlY%Pga z^JpnnC^gie5rE|iu&-F*`G^_~`N8$ndI$~UrD`r7aj)WAQdzVy8@~5Bk}qvCm254D zl{d>@c=v{9eDC@f{n@I2iCrn8sI`q1MKb#v;YWL3q=zL;`_MX#6V|z?{S@ku{oT=z zRJJ|h4mT!OzqNj*h4gt8bhMx>9sMl49<1U%Nm`FEJeo9pPQnoNX{NmRpx{82Q#Bl^ zvnF|q4k0J6#_F)YKM`O+Ab#wy<0ys=bny%rB(AgQ+l};q@`tAdS+x#fgSnRr4CJ9 zwCrzU`CYY`WQ+rJ4m#A!I`hdN`h%%M3F7b-!0$0_?lb6Wa4KA%ne49_SgzGd4a-nr z@V-om_eQ{hg`M{C{1hQ78O8Dzy_VIwu(3w|9|#anR=|%%zL;M(;#5l!QXpAZf@FW9ZtONTA5JN!sAdV_0iCLyH{=nV5ee zd03rqwSB9x&Eev3!5_KY)%05LXKVCt_r!!%aU5gF-OM?^)R^?@ggC)M$UE4&;vVN- z?X)YMPeVlF7J3YddG>;Ef*(ao1;_CAxV?9&rjOpW;n}iAe1^tXUyC-h0*ZwRxsk_p2FMIgI=+{Z2a$=y0nV+DJ~8hd7YfRiCFKaLZrRVEkB zV=jN9*(;f5R=Q%xnrFZixr}3T#H`>Ink7Gck4d()1^Mv2dIC8NCyUy1^MQZzD+-v^ zuWkEt*Wjy2=u&5e(ZSN4InSc`GNqmQ3mcAuF76@jX@a@l9LToab0-Mc4NKiDLB$u* zvB_}MQK(qM{;HAsh%5%3EcDJ&>(Xp8N0pKTZZ<2R%hsHgYH)+xt2F+w5A?pW3QwG%&KDZHTTCPU$tExRH7PrA$aGyvwYO!l9qhu2E z5tq2+ZFuCgbVnD?<(ru@{-7iKoNOvhB*;vT!W)!InzPi{pwN$?jb<%n9#nx?SYcz3 z>He`8$ewg0SDL=@mH0u}Dd0DsI}UR?k_%#y!q4xBHtpB!%epKasaw6dg9*e;yKJ;h z^VCp~b+n&}PK#zSYUz!&Sto-$sD|)_PkLYrVl#7GJ35{bFqx|vU7hy;Lf?HV;@E} zqxqW<5Uha$3Miudo$9qp#7KnDK9R_9{_K{(owIU&LqU)iC-a1{hAezda?LvZ7H)sG2rlKwZ`#+4tv(b5FOhRjJ3xb ziKf2w!ei-Zz6d0NxJ6Iu%b{v{a&IGOP@yOaELxvY4Y_-|o(3^SO|}`yAnSxY3~p^( zt@Pz@%OWk`JksY(4>&`)h#X@-vhTZuD*z^CT(6tFUUPknlBO~`VeMr(?URJ?XY;kZurWc=?HBn!cI|yS46=fPw0Kq&$m=&M z0;~FpHZf#6G!?b)d!ZITH7EFsqRYiZq^6#q355ZMu7if~hJHNkozruhhKCmh@+xZc zI!wY1rXjQQT_1_YQ+rCr<9mG*u40{@^JOsi?!)T2&Ylvt-yPJ|S>8r_9pr1zBFz$6 z3P;!*3|=i?;GWH?SK)4dApF5bcY^r_(@!ZE7t1|Ajf1dw5jh;d(+2i(r*^^%L?z6S zCsuzzuLxxdD~E>e7fcNzKl~DRfH?>OgX2UVH#HvDx*y?R&MAe@&N1D4r#MvqgbL~t zhsnEjq7wwRl4UF?!M{EEgdbUU_-u3LI87=UuOp%J`T4?^KCj?F8}h>}*%6{f?xS*&^qUA_O^C68ml(AFy9L$CBmZ7rtI zP1l7@XBmIB4rW*{ee({4IV@mlJg7fj3DmT+mgG+)JGl!;JsQ8yDDysYcO1nqB3o_y z{nLlRmj1HIxCNOB)$efsVQj~ln#fG$q5C{X$8BMC4_b{#82!?DT2MY4(SorMq01$- zfeMpx^_TI~`#0HkKz9$LfLz1WdgX6ZTPR6zwP|@yqjUxU& zvt6feNAQo@li%N`gLTG&81OS_A890*B$;IuPAZs-kRnj2={>->#pK7s9dTu4HfwTQ zh-hW#%7Q2aIUsHc$G4`rG>}dr_M_mRR?YNfmg|}7t#&cl{w-4>|jWaes#e%tPq&<(1n zSFyoqb4Q1>uY9=q(e8y}s$gL1r`+^Ia+x;Rw^yf?A9EhejAK%Eqe-cGq0&(6aAI?U z1X3w3ARGyEI#qS-i=)RLkb8niftHX)fRx{6o=rojt_}O-KrvSHXpliS(Sz99iwk-ZKJnrG)ZTH>0jQ`CMmaiLtMhW3#%#Jt{Z9f383%6Ux z%DOS|^%OFtX7Jp_Az4p~&|+-LdbJ(8@9eh8*rCAk7)dhoKW9r$z(Co)(V{Bjl32fznOG(ss^8wKVs1(4vn>dQC zOGcC5Zs7TD*hk1Di|QzT#-A7Cp0ntr!fyLM-wncaXg>bYWgc#-TFPhXxtZU{JGk+( zLnFI%Y{XjKMhj)Mxh-Cyo3t#YKw7BNB(~SwaM`sXWH+!p}M)73Rrip9-UVxSynBZ!4`h?VOndi$8|2Aw#0j9)g5}%(R`M!?NR$u}7r7ThDNsk?yI?8AJ+A^9I zI{1jB_{pMf{8&Hb`(*o|97-ko43f+g}!Q9;tHde&L>(_S4e zWN(N-a>DYZuoL`o#WF-xiqoKTYtg!B8A!rBBYWFZ9wC)(M8L!mq$ zBRWLW0u4+Bfx(0XH@4Z_+Mv>gb{^TPqA^_!Ev)@zYctNeMu4)skfGjIvifx zO!%aS>ZVG>Q{hsnlaRJK`x5r*oUmsV#cs#f$_5onx7taYq&l{kkNnVGZ!4R_0*PVv0=+|e=jij8NiuOP?1Y~-V_vf2(uVbox z(NM^3j8b}Xhr{SiskmF~u_kXf)9cw;8E^_PqBmq6&rU!p&%YxTdZ@TFcmGgPzpZF| z%L%j8Za%uj-(tD?%dS??y+VgGH5O12Nx#16e=Q!e(kwZuqv|H{d8IC3p|PlIdLlKN zNLe05lGtTaFeb*Vy7JN8WW}IGaaTEAm2Fvk^o#;<)wS151(-yjIk(2;!P_omLaZsU zF(2pVw0bOT$G3Cf>W-)tG-~6ierWj=dw-G13uWW0=3lWNI5fN*>LRX0USL%mE`FoY z-Bbz4@ugOP3)`4@8LIlJOZzr+H{Z;cBz(+l__7oC`}AO3>xVzC-g#unVEl>&<&R8L zH#tRij5$>yKdB^*^9*y+&5dPq-mDMc;UN^k&0jxDM@541`A@u0POiysgg8+5WhZFO z*&N5kL#J%tn+GjErqX$4BVN!Lm{6>Fw08N+MN$RncHBF1nAxU;D58Xm?pxPB8P6~f z)sP`F_+jWzP6&EHj|qp@hH^SG0FNbX4?IGrZKf4kKHIFIxxRJhc^i8Nk#Dz4TrUBW zs?B45td*UfHL)}nAK~a*2t(wXNjDI_357PfCeB?eaul|&Ap?PKflt-jM@p0<2y8n_ zSz+RoDI8l=Yu2lgB%h7-Ci7>wiwMpxV-!4xS~=lp!I=IxV5LWyfJfHum;Te+-C2#x zwF$8we7J!2Pkg`O^;8TCEzMbe@R_rvCK@pUQ%)34GZTJ`9kYJXLvx)!6h&@xNtG2c&ASi%)uXxh+|Xswo@9LbK~7G=qp4Oizg6p~(K z_i|EuJ4a*F=m|Iqp1e**y#yF2D3tJ{kt0Ek*dZLKCLtVh;|hd}JHr6!PkTY9lsV;` zlkl50+^gK7@`&p6s8&&N2<`FYaUF1;U!N9^;>le+e}iMQqFMr}VE8TUp-r?+4boG8Vw9;aEQR($IB zy5AwcJXW>6a0eA?iR_*)XApKvHhg5>;TBq4#xSEeZ}Zya=vA3_eG@^S;>b;3Z`47M zrC`6vt9D&+ZeR%MMnAL6c9b8Cd9dX`Rp8`l4LHmcjkxnG-U=b3aX)&r(P2e#c?@DWU5QeK}aPfhmw0lxe0c;oCT_c@31`SJzFmhPK7YY_4$?*p^;u=lP_kq~_#SBj7>id5JOr zqG7N$>xQE+{Jq4xNhJcIAjoQnn=6ilb>!|v;b*_wa*C$JY{_MX!5-$wXq;o-BL(~$DOvAd8vUPVn3--+0ML( zY$O^Re%~wjG(T3MUR&|aX{=nW^AN0vR+?Va5^msqmDz2_y2MFI> zKI$AzQoYhpEK@;>m#G*ZTdM$92RCRL`Bd1ciZ@h)U`#t8%IUHJax(a&T7&o56bH^) zoSAKItox+I8+;?|5AEw+FgUl?Zw`J^?>4*g80r6gNKWZs#+nupSPGpxJ0z&`A7*|i zwceJ*K#}H7N4cLnZ8Egsyb+x=a8)k?0P1<^nxYsV7NV1i|F*NUXM_kY4*s6irjc5qo=8v=BBxc;PhdHR<7 zLr);tHW-OUA=vd`5lNS0-82+OG6$^nj*MXx;-+)kvQV?T&MGwX3{Ha!@3GY@)c|jB z!4)se(EaV@QkrV%nnS`mT@5ms|)=%9Jcx|?YR@R(4(+pvr!@4 zyH(pIcg2u$i2LNu57&AD+9stWv! zL{pA;Lc_zcXR!z!60PK|qoDKaoc_u|_#C|9J9_$ZX4Cb2pTihjZc(lB7C@GmVcMs_ z)Gs>$zn#W`H2oJPuKgKS&gAPh5>|hz~Fm?KcvyBiX{r zYV&`Rj1_D5;zl|tZQKavNOOao)#g3x>$LIkV-fLMoD0?9o8U`NmfU(f`Ri+)dext5CRNtd9MPCnJ zkn{Hu{4sKM-X~18d$>luly-|%;qanJUHI!G)3V0?sj}WX>S}o1MH6b{b3l`p zip|(0@r!@Vs=#~c;7Hcklg-Q0VH=M-PH)MW-tDURScIgBt`ZT#%b#p-%+%bcA)J0u zS7jSgC0P)EsBpt3EYX*DeSSbHjAXI4p1Pi@JpUTm<+Y!s(2^xKEJw!HWkfdXfskN6#=2B^DMx~Tfq$mGxxB#D zzh{oYEp}&T?0YYk-{fBSo97de2j0i?+^3+Ioq&5v{u2>g)k2ZgM1)FwccGY_KC`h! zM$15Afor{ih{b(2;*cEte6%3p9>@c&M|jS5m&FF1`}F<*vI>emTe2XHT67Wj;nbt@ z3L`;`prQ|x+=Mg*n%5=%R}OFNqXTYZTn;kAm_@`qzV~9%_k1%9bab@E%>g<;ey~n0 zRtk|Om8lJUXOR6qaeH)fi*LC+-r0sS zth$~jkKd%8-0d2qM&Zy8p|b_1gzAhOcP)Eg+rTb+b!E#KRiA61yuM_DK0!~8N?tJa zu}3odlUgsYbcxwCoWTa=)P}!25u${bFDY@5G#yMe8yv8>tn_aL#vHjD&QD$U+xvtIMq>lN9xTUb`BSw! zs)MlzuztnKb%#@n7!7s^bfB702O~ht9$n`ar62p`g=bA2utV z9{oJeG*@03k!Er2JVH$1)H0)zFoiQDe&Z>IJAfaV?HHhN2@DhtQAVQ$*HXe0qxAFr zPUrMU7sq}hZNapNtWp^qsa+vOlL+7M582r|4fvEktIU@jjn*~4#WOF)MD_eyYIyz?@3tKiu6c=6{^Qs4OO}=WumW0LaSTu@cLcQHPI%?3| zk)_;Vqa?&aM80;%lAW4N;-xw3pmWmihAWCbc1BvARqDs2uy#(jS8IrE8DC>{C6lVN ze-epb9{wrVCFnIfiD&rYgnQIAR?V9HQ@mvtk15m|r7p^4_Z*`hAn(w0P3OWd!jUzp zNpeT|{O}U@auM*7d)L3=J02eIfzOOj;6Z|&Kzi%F=$3|o&k6E94aHufqYjD7XOt~G z!n*;#eb9qwc=*Km8E?qca2OBX6wSlvuxgYClDzYlW&z$1G!MxV$hpfy%!r3gs1dLh z@Xx(bzNFGh&m(3GB}lNPWgooCSj%DK|M}^lEiIl*o52P`IN&jZJz+T19X@|<=Mzn* z-QCmd)or%50A%<0MF{T~T>%9@)SZ6>t4yqVLZPaM%>m0d%!RkK%NN|IPHs-@Fvt-C=J06yj#Sp5v?#ErZHmYf>c4Aq` zQ@D@+RfWTC|7ieZ?J4I0%cp&Una+kO`7@IVeLQVZYXVcN`(lI2&LQf7T(3&$6?x;r z&Drf(md)pWsoyeA!AQuQL2p~p;o})<;`}MlOmNh2lP0R-f}n3^<~`w5W5R(_3TdJ5 z)BRP0y|pm8%F4c?y>^1uQ28@qhYe#XNsm}eDIm+G3eTM4DULH^EJc9L2FtI;v=~N{ zjLGkjT_gDPMtdWql+Vv`(GNoXpuKBx9L!is@VS(^`*>OGp|gWW{mIJEXcaEA<~i_~ z7FUTR#iWjaw(aHu0cRd#PKe}raz8bypIjl;Z=V)~BB}__+AhKtdc@Dh5w3OL(Wci1 zfH=CG*%p$&$=7C^$XZ12eWz7+uFXc>UEQ{dW{?$eS0{Z zT+2X|sOA({Sm>E?fwFVfbZSsofG8*YwETI#pKH9Ks)eB@8^6-g(LtuM5or#HsG>ed zRcim_ay?Vqgt&Ba>YqE7&H<7vQ?IT57LzeCmu`ZmUt!pBC^HXGk6rGtBFcl7>olkj zorRs>@~_~AD0C}s|2(~TR4G5P{Q(z3%pR0L=HX~miugeW>;9U%IUiJ0c2mE!)03jc zsU;spK!j{ld#h~$|8?fwW0W!wDEvksPLl+dxWLI95C$Hge9ifSe)yN`fCq!S=gong z3yTwv4{;A~A^kaP)iUReo#iC;INNbg_K@%{k2-*)yB@>{+E9}+4u<*MA!<3x{f(1| zC`*T6xe9dCB!Dz;ux3q|tFjvFK%TPsmBi&7^@pfts}G+hQ<6w~x`oXs{|CG^x8K)^ zY;S@K2MKlXw)WCjAA*Ljgr1FNs{=ZYwf)XqcoLOW=i9Fx$=84!Q%QvkVH2$6q=0kH zGTWjVuYK@<#?71BKKs3MHD|DEu{{;v<@er`I?BdU!Ydw=x~Nc4tfCUG?(h z#S_|jnH?R^esX|%;SnorOscCz2X7c%4n5V2OowX0EIw$zdp3+>~5ot&_;Ku ztODpc7alGHkEemlkQuUK)ZlNa1^93`q!u?8LrYTOv{O}Us@!Vf*S1F+uL9K23PRj9 zsk)|>8kbsA_ZETS0yd09E9`pV?$EPK9fveqV;T(*fv5^m=t9jpCSV~{t5MZTn|Y?N zU=_6a$AOdIXWu0b`<~-I@$fY8@Oa_7hf@W7rDh!Tx+v7_Vp^&&@9EByA;{=J4AxlhB0$DK3cr!>~7f7 z@lNU0t7QjU=#ROk!#@l7Fq=T`OTx;#tRFu!eP7CYtsmRw{kUDA(afgWI`0P`>o%o& z*4y9%769Ze+PAAZAI+5c6Wg>%wwZ?BdcEJI66{)D^u6=^C6ruWfKR=}Ja_Gb$BPqT zRmk&88!B!o9;sutMD;+;zhM^KRj58v7Jh8D@@bP*7YtBY0FVk!T5k49*nBnB>Gj0i zGB3W^UYM5#LY5~?u7wJOCKNtL;Fci7UbTeWzQ;Tsp$1IMAY{3Dl@`cp=1d;(RLG$L zz@i||XlS|E#bZ@kR|2lV@_nNX5fZIK$0vrT&-_3C_rE2I7Y0-@kL~Ew^gdmeypBO0 z7|H+|=|ab$3mlF+cDtSsW7U2IqEz5vT64cw0H!v9kCz1SCNQ+BxMp6@WE_=YG=|IQ zL7E{UA+-X-RWq6~l`8DidlAs$)s!C|AGv$nalGs4I%g)qAjSwwOV}$Q#Lsy<5P6CD z|D5gp7LfPHb2Pu{_g?c;`|+0VuUT`Nz%#eMMxvHLN}gKYg)`+cjGTg3GY@-DpADI! z10Fl+g+QYeQWEa=J-a@!+Xa5;0*_B8PL~lyNnN1t0*6D-*WY~Nn{U41>#sl7eXG@{ zUx+C%oCb#B;zdEV#of63mzj}ErsR=v7|~G)AuR&iUFnpwdIqZsUcAs;)t0M4jD+ZF zi6T%;CPDk@JfIRLUNXDSR@$iUdNpUfvX#5Fw#<%BO((&A5-^boga}Ml40l^Dh*U*K zfH4b-@^BjX)Ax_u9}nzON4Vd!i(#sJB#0;QsDgPCqg1W4YE9qX1%!@ZMi+#k0lZ>p zam#$;BZkn6kivoeu4BL3ArhS*ngDLjo z9j%@JW5LHHmr8qnCZEkkJi$X>Awm> zWIbWNpne(y-9FDviKP-(ZuaXsTZPi9e8!4Iw=cZDrnza&i@FJmOT_FIROfW31>wDN~>8`_jBN&)Zvk$JfUj=(u%`4~+u~H*5RYN|G8cQQtwEONoD1 zTjUIvb%3Z%cV3$X%{B(i?Q6UNkiG^;8u6GPu?Be#LVo-@ZXjfv{?^RaAVk*tMG(T~ z{k#f5sNS|UNKg%nTD2Bcxb+7(@TLZ%2QS2_S&6X*1cv5jp~M*LTAW4&#E4Odsl&>E zD5#$Jzy9t2!TsHyPoF*_AMZFnK2a_gPQ#gQm)NI0$8L{^@pL)?aKGPix8HNO-_doE zzV9iTr@-=Nb!Y*wEq3i$mw+IIO6aWwKoJd4po~f$l;Pq&(U%(d8YDUh5fJgLF{T5# zX%$UDy@IW4bVs5X=kuA%<-*K}Bx&|V%pwkw~t84j32=VXL zx=jH59nbwS?Jq6VKi94*q2K7%`{!Xz=XK*;_>zWkxjupfN79N5=N8G*k*#wfTMVVv zmB=G0bc|&rSfJkpk_s^%*`>sB-}CwO$faarN*oV+?myh|%{O21;eJO-pmrgJtygQ>^qp6y&pCVI z6%nkK2}mJO1R@5}tqBEX$YYJ+s0m)wee{zB4t?aqu_uqaO2;n5pu{LH*o!Q9%^ahE zF4pq&f2ma2)A(AoOt*R-!Q3@=zuR#<9@y=TL*LP*M3g|1=oYRlT*A zu-nD!RQy};Fbkfjo2Wu+;%tD_1|J=RVmK-^*CAtC92KNX_nN_F%|JUw)yS>&ur}|{36mCWm z=m!9_!94~kl~vWvs>o18nA<=jF>Y))W$n2R56e2Fbtz_#|jcbgEj4{&DgE_QlhurW`lA>RK~I8!In}Lr6`uAVqK~;>9F_DG3TLq z5YV84(&yKxdAF!rYhnggv98TICyb-x>_vV$?e%Mp$o<&KxIeMRKjyT&?r^BSe~yn_ z_hUbQpM&-8&p1Czz;pinN4)cZcq&<;?rkwvrkV(u+#rJj%&5}9%4p`XfSIk6`IM0I z1!*SST0u?&E|&?<&sRJ>4VW((<7fs_9?aue%gwr&mx{Giti>FV)=UnyV9w$oV2NSQ zO9Zh*&G@#@I$=Z;6o+F~Q6w#BVBD61Z|jPeTfy%yFIX!(Qz9ef5jiDuJ|y~xRMb{c ztJS11yGirmj_}np)HeWmnxK@hHpbVR;Oin7lM!Ilns7zL&D^k1 z_Lx2}qztC0eIaNfXilg#fod~&lp7?>$st;vRcPMgR~aKGT*id%akw=jO&z;vlYve;ikOEUxGIX%7i>-q-@>W zm!;r#D+a-9iqgbjwB%Wjz4M{lee5$1AAY0i(77KlkIsRCx1#tFVBOmdMC?X4cE9Vi zBY!yr>%BpTeR@AscQnnSuW|y#J9!PYU^-adeQ*DE4oLT$*b008AaH~$yhvQ4SaKZU`~hiR40K;Z@coh}#_iSp0xa}W6F zJN+k*`-n<-rMn-GA8==4J4Tuy<+M9Qd`<10&Ph{9(oWGUP24~=_|;a-|ML~c`;e}E)b_(?{cag{ zZ^XZE*#Fph&p)4`Pw!vjTp)Wk3Le284(C<9&v^Wsj7L$)y%VAetgS(o73*3sFfe9d z8cb){E=UI2Q8--Ij6zC;OpK8RkT6nCm@fn7%Ybo8NEu+3UR1lT4!df__Ef7@6yGc> zE-DrXbGezkuF2U|Ax)ifR-7KYqEzqa(OS=bEX6Vclm#qmhU&Kk-)=Xo6=tY<5GAJp zl-O%Z-1(^0G3)P;A^@)%Q|IHN86r8GCP%*~FOptTLt{X1z;C}V`0wcjTnAi6Ky)x2 zUCr=MU?>`t9Ri(<;677=ve;K5WKKv(R)eCRd2q2wku6pOWEl*#fk`k8j89J^nl=cr zosWzPoyD2bJb-hTwN2!8BAx@$sE;#QO)zG#AthWdBc3h;#xdbCB_#5oqgoaU4nxjF zpltV@2CHdYmld~L!BQG(MX%v&uybt43GNvuGjM|m>mHtKD3=HC?~Tmj_YSiO^!!U0 zV(+o@p?Zw?|2sJ!=j_L$@oH9fr-Sy+$7|=(>xhKLC`BJ=o(cF}g=;ey;d z6(0aZU|STYblJb$5!usr`hBuFADpZa6p_3ubpO;Huy@QQz@qsC&R$rx*XLvc1gf2W z(ch2>bKP@Do00@zJlfwux%Q=RK~?bb@&`UWXRC)$E2{kb=@YWA2?mgp_Qp~aur9ChK%}nqNZH4kkNYgVfLDhr zI0?MCu>q}X#ceTq&y5iaPTT3nLQtI*=6ud5mTh94&_T(emhfl!=dR1QWX&Gy{xi<^ z+VSU){51~9+YZO0`*`o1XDW;P`#jCv1Lx{rZy3Bi;{KW&-VH`LLE128;Ce|I2LdUB zbw?!*XZH((jx7S8I3s1oJV&h-d7CT?YAr=j))lptPUpIPnX}&9&>|>UECsR!q=Hgb zotepqB5D&9Y1SpZHY~MaSu2`T-Ry- zNLj&@y?HC4B?hbY(xQM(IWIf`UI%D**P|qII5B{b_xcFkxyyWL1QZ_IeCH9e{--%0*y#5oT&B>2^Rcg~J_ZY}PC z0I!4Sk%I1k+qiJxiwgF z9Hv=Dy{^Ivlf@(KmR3ES59b+;G^eN4pg5;q1D0dL-kz0%H=a!R6I)FTQAYGI4WSK~ z@Tk%}CBAjD4ScDRQ5`n_Bxo~G={2V@d;ofVzEw`2TlJW}TK*4QtE5iz_i63KM>{r;^C)x*5`e)<_*}GSA zz|VmZ5*Pob9_EjP1kS(Q^RFU`0{+g^JT(i&KTJOW36e=X0;*!vDFwR z3uaLw+?EBk-f+7albL`a$`E2SwwEUah&F^thcJNQmMPI;Z|?Kv=Y1;v-eRBWz4*8v zdyyRukl%;IJ^1>tDI<40+`%Wt#xUt^4&0X5YA!B{%oH%Tk+cLB0^v+S!Y=OH372ErUAlMv~_Z+W71|%RKKgogoQ=@yo zx>LZq{Vj|bQ`+Yn4grEUjfWl?+Cl)wbM)Uo`uledjkMzNrQE-u{jPCS)jes8SX+Ec z)Xey(3pt$g!TTfH;0uQDJ0B3|B6m962?-;Q$PHL8YzA4hJLZPe3J_ILTefb#4UlF~ zm?~ls5K$CSYw*-=6uZ4gmD;<6JeTy01R~nqvap> z3g(dz{l1mBH*~uaIGfXYytc`h?-$^ASpuAX{-JZeehs_N_cFNe&)2=PaQf8Q4^%iL za67=d8q@wGr*l4fx7+XJm5AVbb4I7{UCTUhfUI#~tY>4AGlj1nLQ z{J;2qF1DAO^@0%B4eP2X)!FJ4&cqQ^G zzlh{^`d(P$KPaG8z)Q32O*1l&={jJ#6@Y*#8_lkQz>Ptg4c5i>jRRt>g_fQvCGB_JiJ1Vdp(@l3UlFtE7e$!>ky_}2h+r4{0 zG`>H+v(q^U5wv;)Up>g8oeu3t4g}qBWU-D8$a~}70kPG0xX2lk`p<$Vi5T=JrSDit zK=LCfWg2mrCk#wTnZOVvCJc#i84|A3 zfX`3QXstjY)~`dFXE--jCr+t1s%ouTRIyetF~%ulDFykKG0iK+JYb$iTrU$YmkBws zLjo=9L%W^@R){G5c})o48=3~KJh}V|7(}X~d10w2VO=ZMwW6%1%q#+IY%i}lGpC6^ zdF8gwb2b{%%XP-(GGQ17hY)ag#?8r6ywBu;RiEZ2&L=12Kk_@a?8pszcmjo;^?#kT#H- z%vP)UAZJE=zK;0x{DjLqVa(ac5SnF9%w%-73Wu&mYS(I2=AkFvF! z5&X}z$<*g@=X`Wyn+^e(u!AB{OH6mc+&!z|pS-c}mLq=^)N6Ap-f+}qb3Q%-zXBfA zfSetSJFoJeH1@R~-~Tdq-s2mJ`(H7{1=ok~$&Ey?cb5=LckTd?@owkKh#Oua!MG7gdr1#%$SCRDVtMb z-N75?!H#*lO!#!U;Q7-9t&Lb-zCxt-i~v#EMV#1JQj$$@s}-P(rh;!T1w_{QJpExQ z=gU0d`T2t9=O;|lh?Ei%5fZ_h_9+ZxB)�)gOiddFx_ZN94~N@soWlI9RHjB_|lF z7B7OW-YC->GgAK)cCPSl#ZXwj+ONUP7>0~#%6NLZ;(EPc7)PX>Hm9C;isYw>k9St` z8!{oBkdG*&?>2rFM}?v1b9}!dwEH;z`=O(=8$Tka`IMr%zsCKnA3oC^qwROEWi%e1 ze;37m@7Vo#L`w1K`8Rl0eA@*%$l;zu4x2JVf1U=bIq#X2k#P^&;lf!eT=K^}9; zfU1#D)CQG`UVA}NcL;#YpiZ7iu?&dSX@JNXuNq2OQEI{3Dr#L(%4)qps`M;~w1(bg zLe=4tR;!RW0jPZ*jg<-gVLrMXqees#0Zj&LKpVApwT>)^+JRmhwq-sHnj_fTN9u6S z=;(c`;vLGXWlAceri3vMXtjd<^PE}_+-!9E1XjY3252S}R6xxsi)cJ4cbKQ3VH`#% z2#K2o6k7p_A|YcQ8S|QvbOA%LcyF74q6a)0?o^QnzX*bXfy+4H%kveVpRO3(L2uPE zV-Rl|sOYu`I9Y*xVw5U)S*-SBt(K`)4H)(gLjTI>u=nm5{&hB7-@pF;C7chl91##m zV6%hF`7lQ$Xkw1zn2Gqo8~aP0kB^@FXotW4ciB51-*Mj$9T3teB8^DI@dl1g%IlCV zGpyd9lyih?KQ^9tlo(CnaqQ1S#E9NK`+Ewa-j36JS7F?5tg|y` z3c+yx`+0oCfU=zv=naMZ9N`!Sq~z{8r)RPEL`W%FHl-_JdrD5J8^fJa(bFyk|&M+(7wbo7eK=Act z!Eaw*aQXe4bwVEo3@IU}1Wp#uO3B6X4qnzFf*edX-k6Hm`lb+fzty}X_It6K6)B5# zN*62a3|PokNrRpSCbgo=Y0TE_stTmkuw3jV!dPum9;{*FJXym*4QQ4?dI#bteg!^8 znDB_r1~n@GcIr;NH+ZNBG0JdF$T3R~gzV-=4GcIUhogb~pd1 zM&{m-xj*Nz_QEJ+yQ1t4h#b$c&sLpZ6GA8Gu5s7va(}zi<9%45h`MaKBiunmVV#Cm zy>4u$Gng%;!Pe;$tu?Hbz$^|CNgibA4~ixZ&FWu3g0VE&9#Cj&JyVdg%Qw$>ZuOW9 zLW0#;862!N>)aj;RkaqBS`6yh#GI01FfHp{l%TK-LA)KCTS0Qt3hDE?!I%+IHo7nH zrNevI&S(|JZ@`eO*Xuq2;&asHwCmTCj*cNPO5wtUH8F;kaI1=?7?i0s#X=2pBBUNQ zXRoESV|swk>*UPGb5=nZL`mIQAhv>aB0$Ng*G*1shBA?5!KwgDQB1>x>Dz*@-){K2 zR*+_3si?hoS2GG-0w@6_4O0d_T_-$WCQJhZ(x6`R(W=3&EKnBCM+WBsN{qD%Zfn8I z+HhL~CL0vH@rLi^!rpK-)8S|jJ#s=G!Pg)B>Tr#IE%b%t7d$y1x*gYbb`Rk84Ox_y~7i?{ujrZg8ZR_-rye57$H~aY@uO+6#JV-#M-e^F$xApro?e<;h&F{^A z0I#rMnK0Z=6UOfAydXQ#0=DBUfA9&I}t#x;X$K86U6dx;fS1`-JKresJ4wfj=Z#FFl96ty(1-~f9wnh8b;sdB1U7p!MkIZY)m09i(*By!o@JMx>4%)|K7C! z*|8TG#L$zm_+E5ji|9;J715fH$-7;|+5><^k91gr|AL z^L55$o^ZKN7zaj9M&2+C38gGH{~BZn+#`{-HESvsUDzoZeXTO;9v4T9m$l*B@`B&L z-7pRVF5`&HG-4XZu1IsD&Dgh4;#TbH&c`@dpOBohL(6tAx(71|DP;_!bxzJ66@)g= z=-Bxqt7Ma`u1EY0a z#3>xxd#vGp??$-ebj0@>{(ax|D;!R%xMv2ta|GOqhzOi4~XEba)>GseU&kNVOX5Hc^LM4LP}C#@6enHk4XW){14Z?1yDj zf+|;fSNGN`h!gTKfb-xDLK{+2P~EZ-IVV(MzhCKw;(p)6x_G>v>~}_`%bUX~YMs&D zFNE9bZY?E>hhS;k`ahl&y>_15Z<+GXHM25 zBnSwILD^*nHQ%_}gQ)||0Z_M1G7&~%AY}~Af=g*>FsfVt)nIi50yUtxL0_x%^=m;S zE1>`KG~qfXBoZho8furfRvexH0u%|vCU0vK+?IluTeY4aYx5wN*M8^*JBxlzcYDiU zD!JHt-|0Af45wv$_t}FJ;x-Ymb3QzHy0Lj~@*`lmeRcJVJ-)~7Uf;G0_wu&OlRJ5F z?vqG|R2R zA5cvn9$R`xdt~~NgYqgiTFvmrM`|u;i+I?NCk3nur9YnvG=V_}C**G9yray{&d0rS zmxo;UEn9C0J0sJs)`P4#&y3cM*~p?Eg62*|O$-sX0z4QC0~>L<-(}GGZ5YrV z;o)R#PRSbTz3_@<6{s|bSYx6dWq4n|KOTL*e!zwHO>ahl+#67M>)q?_>+RdIudTvm z|DE9VH_YJV3>#fQiz#U$T&5Afe7@q#mrr=U&Ul(fcp-Xi4LN0~0;MUG@_qsxrwx==u`D;Nxu8r9msv0jX>(?s#!)?`*rR#T8Fy$H zhK8IorkpTj#}D&^cSMXK58l=%IWtRgXJJ?LF3t=Gb*!^?3StFeDXTkxij*>@X~HxO z7>DdEd(On-=Mxds&XQ&9E_d(pv=%mp@qUd2?D5VJK5()V40m@QWZ92%AB_8p9j7r2 z<E!_2xM~GTw4R=)QpSQD$>bc5ru&58lN7BX1e1$Dd!n&R?SOLdNcYb^6&N z-=2-8O;)Rlvksb!0JLZB)@(8$G8@vU_p^}h*lZ3dDIm?541uTzV5Dp{QvnIR3>h+6 zW|En$FGs5=rJz-Dr^DFl;@vSb#&HDa3`!Yk7y(WwRiIj}%ez=%x}c&o@}SlR8M3vw zn-gr#Y1F8gqajdAJ@6C31haXK-HKeMsRT)f@eL|P(7@K{o?Ymn4q&p3!`kr2w*{(- zWy%;Gel-mvK;eW~vsbGt+tyiPlXW@guA_+4C>G2xOYw99C4H&M?{*&0w$OatTTI^l9Lq7#KVAnNs>3Zr^f^?od> z&?;zU!CEVBw~Cj=n^rdIwG?jic6(8s*ZGnCvKi!r$VTn|*x$iV=;x#1810>p_;)xT zx*6PoCiz#5aGbiKue$)vD@M}ZVfewT9-I%v7htSdyRVM~WA-Dz7CqQQugERr$J2f^ z+<(Tnb27w?|3}V7Pm`U0@Bh^Y8Io78*&{5c-`j;CqTgK*|7e_@kd9@yo%eyyA2T0! zitD6QOu6mSh#CfMM$X)Ytp;Uk5W^De-xS84J9RLYlG|FqoY0b5)Sayn5p%NKPDp>m z4;woR!TiFfaop0=T!EFKRjj$(;i2&1wjtXUV@tkcfWcxQL7>O4*&nCq%C_bneL(wh z`_Vx*>PpBXdO@C-Je zSSQDGusi&3J0S0g*|W1?4%#u}(e9#aC)s{73VuxGZc55edH3gY@pl-Lhu?%DVrTj8 zyEx0nSB`q{^W%9xXV(Ah=m|rQYQ6^d2^~v>jy(rp1QNvyzDtEvhaRWonY`*fZ!C~h zi=q^}P7`ywTU^ucr?~^JunhGy4H(9Ram<*;ggFz|F{4~&%=3(#GoaY|>i9e{S;NCY z1`rQ0$WAsEBm%^HNby<$BpAi7lVebVuB}TS0Ehc@Pa|wpn~wwhHS7g%ZYNGb?(jQr7)eB%-I^pPQC)*NN>6uu}Vk@O@XC0v~LB=a>E~qU6yBzdE5ef%pR!Oz{a5i za!MG+slPjdy^D|^FSnb`ubPouJsaE|Gp_@ZW^mQIRJ5wdO!)kK!53y+pFe?fh63J4 zC1{r`T5DF;F)$OqOx4k`e zo%_1|iIANK2ZVOgpgnvq-!rBHJ@_=4+Dl_z%ZKQy{DK~m_bI(T*L%3phA9} zm5&wPZod=qEQmz(+1+^BpuTD(7bmz_ZGV-agH80=~(c=`4fx7!WNtsopXqJ-)V z`=T*mDPlCY8qR5~nRsM}AU114mzgk61;dyy&jae1P&3d*Hh5KQn4B7%$Z8BzP8i1# z(pC%uKw!*yKuRN08lcQrn-wUR>M)%KjG9ojqO^uV35o zJrwWL$vkBDvP8DPU+>BzKotVPvJ6BSBacR5?wQ^5!renw#LpqBSc{;bVS&*khwe^l z9l>ooo7NX~$43FHduc|+n+7DOHCFY)c`wRbmSt-mYpua7Q=W{u53NF5>j9TU3D?UN zm&=Unb;dZ3cz$|D8Z5hAnnCLna8|N{S{2K!;P*d%$GY4gRY2M9hZq0}O0#B%Wi8g% zWvRG1)w1=GVJL!#hK`Hfe-I6t+r5h!YL|A7r8C`aUPu4OqwPnhJ+_w^{hjTd4p6VL z3Ap{<@gYBF*es7XeeBikzTtZQHF)LSeTQRwe$4$f!tUDzK_XE=4?m?n%iilX4x9Y} zf~IGh?U+NycM1xf%?@o6vScSmJ~Zq)d7s+8hZRuoa&2N{6UzM&XfMECbHUyoAIY9* z{O_bJVhFIif150m8VpQv%S02cF%5}-zmCh?tK(NK&OZ#x74Kym=oqP0mz_ay)YCFx|#tj5)JKqF8tBY z8Ie8a@%($*`~?8vljbh<{&H$h!mgfVUnezY5#cC>VX(&;3X!AQtV7!xq% zgzJ=XohLla6Q0HiOo}O6-9#e6hn zRgi`O8 z)7Bw7UH?bXEXe4iK~VO&*eUN;2`UEL$l0J-sS;{3MykksHVIgq7shfz&I!aLn1OX! zAregt1?%m_u;rfh;cVTXd!HLNr(_tC2N)E^8yM!SnA7NniV5Ab5fYTuF;vmn`9;*% zsWq7twfYo>QG%wZx1!XasjJgdF`uykZG#=25p=;#+Cl80FWH^{@yg%%!uRj9^M^7 zF3SBq!t{u^vfJanqnP>Z1Ol5ZI%JaW=Gil!U9IXtr-aQD;$77tN%5Ra00!j5U=lDC z<+fs37ObTJU^fcA{z4U{3R;8J7})LvT(hgQ(ghP0Cwq6LDu@J)6@>(uoK4G{&Zd?z zMAje;$b;f~C6v;TRl$@n&KC^B1W-m50ksC=W}F*Tky66Qj3R{W{XxdTjMxPYtQA_r z2!rS7-AH$x6wX|Kw`u`0Lsa-4*5(i*tM$;t$lzWCPXyHB6ywOqDWht=TR-t#B^pD9 zSr$eoJ6n|FqYx`y%!n_=?!|Re%J!*RZfMm$PcRlNTcJQH70kdeBuvwQY0k(*7_t?l z=aeu27;?hsL7k_kE9QB^bvEY0>omDMRn!cmY(cTLGQPbKZmM{ZikELUeEVt+PZ|=y zPV6CqHF~;u)?=yF8cWuO)_lQI0>DliyhZ2_wF8KHkloz+f7xfiWQe)}OuZH=*63lU z-j&qa{%m40#GIZji?C(LoJB5Vj0cd2(9s#)VBhWS?WP-ogyzi37r}%5)4eu&1I(Qx z(m$y_V|(WV0malFL_3{uYaZ>6CR#Wha7dbfWVc)%2OVwe2IP|l@ zowR*0cJ(X|i_rW1(|ygw`R~UG`fEnuyp`POdl12Ul;Mcai3^~+hJ|M@K-I`Vp*Q3bcPL(i zWo?~qAN|n<#hXj^=xoCyBM(1B;`c=$GDDdib8L}*V|*eN3`}@_n(^s+!KdqlUq4;( zG|zatOc>dEc?>ynjfy-lMu$J7#7JaKpcDkK)n??N*L6rzDFV@gR1?-!&?-7=j2Xj}kp}LpAU%={-Mr(HcVGxYdXqa*DebZ`i&%5&~ri5~40E54uiYmpT%4*HchzIK_LNI|# z3Xm0pQq@i=Y{n<`v~o%bk@Y+;q%9k|1uD9uOWQHVw8Q<`KpTF4@Z#9Rk^?q5qsG{D zSZeMa*!~kY^tYSqP1^p%=+6?4I?%H<{W@nuosNdSo#PmNJCF0T9f!I@LLFxuJCd-I zzqCPq_V@lh`l#0^kSSXB$Vv~^?4B$#A^V0~?_KK=Jc58@(@TUyu{)+4BK}5|0e7HN zP8bH;A1Z=nx#8vO3;yx+8~*eE|4;n(-@l=%btK;$4cVYsy@!s!C$)O1{qr#3XCWLG86DhJO#g_d5NU4(nJ3oY(m5d5ujv`yFxIxWy1DAn;8G*l zCZR(-0wUH3^>Upt%>$rgXbv}WnHi=3x4~3#ee0{OAa1lc_Q4AoQJ7-5q@|%gCvz z&kdosA$M!&2X6AtuG{wAJGQ9nB$1Gl`cQU-+zjW;dI6n-HRyx_=dT|bIWGL_y?;=k z|9Fw$$Hs=!3;4l(G5Y>K&Yu%i?8`T3r)1u`X1ru3+B`k_fS32Ph5}6mL|Zg;%fN)t z^~g(CV6?N$9Wq9tIE_k?lP|Vj$nL^O$qWO5!Ip-M40Zz~n7gDK7;!nk9F4fQU>z~~ zMJp_4CNE~Q8WB;CE|3X;#EdC3bW+Qt2=$HEM2xAzBMPkHgjkIVfCJ2m^yrvbq{S60 zZFtUevHm{73FwwJ@%h`-n`uFU;bPejVPTL87=$q;q-0DqV@{Yx#?xiQFQ1?AnMXETAS2N{PG6tF+3H9!?N&*r>(*|yYS}T@Y!OM$L9Oqn7s^Xh7 zlYITUnopQkC|i_@V34g^>@%v0JOHgWkTSaCt70`9DPfv3rg_9T4ams~J;fnA>T}4n zZ|HYy>=&_Di%1bvn;Dx;LA79-CNFx;m}i3y#9CdJ1x;FK2O?r;;j$n@tBPC=*-g@* z)r~~~+S*VZMqr|EFMJas%GFx)XyeA2f3E#feH{3>UrR?P1TaIBh=5WyMMAW7x(+9c z&@uJ>KE*#vk7G@L*7@kyA##R6Gn&qg81HN0j!s(8ipII#o2!Ro7a-XpmJtMr`#gVS zJ0|Av&iJP=$9MM*nDY@)(a4I3W#8^IARGJ3BNY-+$^PsQG9+ii{R}Vd*%6qGBpB1& zI2w_{JY8^^X8=GcD{i+1fBWSZe7aun|NYPZiU0gx{|idMAmk7%xZ^=qE7ioCsBKU~ zb8PMUIV&PmtQ{P^QUJ_(||Ov^@$o1Mi2%kX?gEgV$3uLNw_zr)RsHruNugL4evUTePkP%WcK&rD3^MP-a7(MFFoXum(R>a+#5r!l0J0kW+O6J@|jYR?*?2n>2UCk(R zynP77>@R>hVe|;^IF1enHDz3DgSG~d=GhO$x0e?*Rd5@?RlwAc$OK#!H)_O;o|G9> zwkUn#1ZB8$YgvaVa*r&?7Q4glIkzl`1n4J@{(LUPmTVxTuTXX8p$Oq&~7)d`v z=Dwn1BMTD$-j9@zs^jiH4z!U!b|aC22xIcF#=;SiO$P2i*E2&(z*#oxQT6VfPxv|! zUn{jm0e8TyHhr*H!1oOiHzy-9AaPalDEJ-ZdG`#bvw=Vu5@Vc3TrLx?*9*pR>{-Tf zNEn!L87IusjLY=``hWgk{P!PUu{zsbQ^44ZU@%QoG_UiLX3g|W92<1QvX*KMP_y+R zx{L$nal|xw!FJl!LiKM3`nZXhDp()j>B;DG$QjKf5n z6m(3&UOAkzBPs%MCI8y_6I*k=z+s>N9>wVUaZX{qdEa=nA@A?~bcKU*6EVCYWar&> zHl1_U!#=Otx`3vH9@z^evo+Q+uAsrwnpv;)Y!A_&izbc9wuiRNwc5|q&I7Qu2I65GSsY7An z9DB?=pWZ`ky7+vYGbcuLgwLFu-6>&AMrnAxT1VM0pPumRmn%M9CtRih<1kMiCDH#Tq2#M_am&=Ij(*@UgFj~fDJpeTRCl*y* z+v-pNtLsT@Mk~Q!6ak+i#5J2mji(Wm5~jLr&H#(jxiML8D>Oxa1t6yZvIDbL@OMroEhVgahWE}ml?xgHBBicNHuEL zVay<{$j>vF{)X08T>tAI`2E`pmZeyqrDkLmIs(hG;&xk6N(<@l@7c&PhJkUJM?61G z`1CyEX`XPIG7?$##E2LYhMbXdHkecr&_vK??_!?Kxv5pKmI8s*3f5-*GuE}b+$v~Q zy-8}rx)!YKV*QG$H8BhWH>Pv1NzJLWMc(OY)iUWZ$Cl|pUsI+N2wqx(tYj=;0JJHn z8twOzSML`T@A$0q?Q3t8SqF3Mir}q_xu`QAvSOSvuGbOMz{rjQ$f|f6GBA($qb^8^ z@$@v{Z-4&<|M5S5#pln@xLjwbDi$>+E3FMmgF9Fa;!xF~>}F;Jt-@NW^?_;?r3o4c zFSmlaK)u6AUw zdL+ky2t4@vW9~-tTLI^7O>6~gg(xmHc_WSQTMz*<#emwEq`)eL3xx2jQE@StUyCz3Z zO|ZW+Apd`i1M-paR+@iHJ-vQREWEuFvOk`7;c-z*`Tn)}kB<#;b|72hR_n6AFD1TP zB%EwMq{&t+P5=Tb;?dRG8mPcJ>XKm0&bR?FT0@7y0H*Y9{aPa?+nBZi^DtPkI|OC5 zqT>YxoESsgY_cdhHDiXNaj?#YUNH>8m5)A8dxA#dz;De*qd;+VtqzyaSu`KDo{;+%n^cylicaf!C|!q z4%g;0BFu4~yR`X7vL$RxJobL>>o?9YulsR=8OeS9Mvw(;xcFdDdt>|w;Ta0KslZ6< zUaK`Z-MfZUv5VqKyti;bHYZH=C_7@@XSp-fF6zOaC41z+?Z@`Fo%1n{xXcsg z$(m=TWV%OM^Ju0+Z9KbtxeWOGufL$ItLZ9`?|)-FY~ld{2Jmh1Yy=~V09h~%3DZ2{ z=`!NWrwg7g6Q&^}XX<(~z7W}ehvFrynXF$~=@@q3$w>mAj zs#m#g;a}eyJ8FFo3=(@~qO;IhgsK3lKvlmvlVXDkLwSR1?q(ZMt6ytmnJ)IVT&SAv zD*nu!*yZi45ABoTbN%=q;Ag1`U$3x56EXFNY! z){6*OOF>;;OzwKe_(JIXw<6Y+zLwULk*W@B6LAJIL1~JYTfrY+E55!|+*UymD-eg* z3N_VER%{Jyt+q~D!07_Qc0V((&BYy5>77Ycg`pIDpEeO=O8lI_4lY+w8A#pZCu`5$F9LU-*mC;RN;pBevGI3Rn^_tnkshKT6v zS&%qaI3arS(t8%utU`H8ehIL3uoG=XuB*~*HRwJN$Tkf^Y z$6>e=vZKvSjGiu!`?J68?pI%vi}ozTV4cUuF=I%KPveBk$hgcImoeeX=NZq>Gp3M!i(KucpvskP1$b zc}hDk&oWq56%wCOMU!fgRP|^LN$Ldd62;c>F7g@gY_EKe0UJJKS?B{!A9v^2Wg~)`oShNSYX=@VPX03!c_HzL%Vjp_ zBd1<}(^|E9DBaUCH&9AQnQ^^N`26J=O14kdwZbS;2P3cGWU$DFgHy%{nC6V9%ZSg< z6XtnD&J5LxQpE0=6B-l^iqwqpDDF9i^%=2wn5M~`hIPeS#p?Bl(1eg%L#krYd#_n& z;aJ&8tsY4?XGArc=Xzaa1P95wj9a8yfz}L~DFPBfN>Q7%H@sYIHG>Bmxr51RVnYfj z6Gm@n%ueapeWS>N;b_|uwpzvQI5jzzGv;~3I9tO%RtsnuIScS~neg;{!7sml#;?CV z<8mG0T_UlpD{e0@`1Z$FOw$PBgypuPmK7omvG;1TYEnJ578iVerT2oayY%Lp$0@MyRJ%DN#-cQE!t1 z#;03>d=r3DMuTE$f+|LjSejrJ>53$>Q=-u+9Uuc@xPxP`CIzszilsH&s^C@(nvmep zblrp%I=j0?904aHAi`3L!pinjH=hh0^TF}VI33ARz*_(8wja7}E zDtb_0oZvWR*@`0F0F?$%Q#8sDVIWnYS<|jstO@3AL3i?=pP$?isz}VvP;_!29z=__ z-ya3C9rt$TA;3a=XC-7$&!(J$es5)T;~E{ta74WxM#&J3soViM-PUVt zNsq@S2j0Ff3wEFdtJ*QTEvI*+ZfI{ZoDa^&qq%wHaC}7BZaTxVwHxGae~li?zdLW* zIUkqHgq%|+QfX4Hd7OyVfppnx+3>`SJY-xh7w@$p0Ik*oL;wK;LhOKgFu|5^IPbRQzK zBE9EGN(?k{WOb~}1NARuUw;W@16$KyR{`lE4nKB7kN!$aS;54OOqAN+yA z?;dtE_W#V-XFsBT@GkoiK_+s&r#qzkBV!HnL%{k2kUz z-Ig-8D5LhM#1@7KlgA(a)in4vKC;kW6ZX2t;xfEZ*u7Ek=pN)uF+vf^2*I>PBkj*- z07tVUU^Ad2%JU-uy!SVr_DKEySI5t^IGNvR+&)>v&kJ6Sm1=`Wg@r2J7~+Ey5)VZl znPU*3&O8#>T&qm;-U8PB+=5v?Pg{5a3PJ|;NR_G?yH@mT>D#q7aI=P5ttCuT277OY z%yxeYK~9Q!u*h(&2Bk4$5s=>WB$G>%|39`=qlym_sHZ!gB`)C6cCl%*I$ zNp(XmxcOnm{!*%Cqp&&CQHZ7hpw4Jh&DpHVR_qNyBZlXhTchOw6MyU_z!0kk$Vr zX4G2IT0zPQ%n5TgXqHiOlM7Mr+^axNf}AUclu)Q4IqpEk4AbOBxUfQ4AkqE28O}WR zz`7JHW5JNE>w4vC00au7CmM62uX**Ro92vdTUeSkfaK)+>3+AzwzHlEfJH)EY{pok z_K(!zI|1d<(^>Bop29W-9nN?9d#wNdT>EpFL8BoLje_lPju0P44{esnS+=!ytZzBv z!1wcE&WssLs!-EtZViCkUBP< zyf#c^dk@B?82I{xD$R?`EeN0hC?yC98WNB)1QklLMx+ylkSY=sIc4M>LzTFZ#GXAy zVxVO+RGGGSqE4={@BXG1ND;RML`^1D5-?|!+K{|a>V_`djMaVpl+Cjj_lE2yXYGR# zY#6)W1Zc7*u(une6pXVujhD*>^JGq^sGzRZ)@p0G-OTw|mlc=s0;!61DfsqnL8;b= zG7l4o5|&lbS^-f4rDQ>%CRl1PB(8Y5tyoGy5$$V7)ksO!W_>Yd>OD9mOfV1qJxgnko3+MUj(u7f4m-vnsz+8_d8 zS#H|(hdKlv@i@B?;@%q0D(KA`u-(0#=3#q&?wS9P$2~(H&H`^NZ~c@S@JGwu8Onqp zUidBuwn+~SdSmA!MY`Jqy6dMDnU7t-Hk|ZBBS%&2?fyfC=G7Td4{-unv2ipXvIsr) zxhY$N2`J9qdCK0A6)M;<$%d5Y;Uca+s&%Icy)gOYe)~2Qo}QFg`MWd$VOz z%H0?evHlux%rS6?3^MI$;Wj6v4+{LCZ+LXdB`BgUq#zH5^<`(E+M~V+02(ZH?hZ-_ zqLh+PE1|U2Ba*iEhLnsIOUM)nj2;n5F@nI9(GsCHa##`2S_sV61zxa6_5a-2E}?vA z`w{3&8=S20I=c_di{Ct@VE1lWsG4o&$iOhxDTYvEBzmhm5H(nC*>6Gcaiz+Zw^HqkTjOt%e~z;wK=(nAj||1uY)!)uI2~c%TxaNUyX_-v@$Vw{4oE!HKGUqbQ{UbCw#g6I z3Iine{^tB2m8WW7{deo)N5(#r95?OS!|GK$dz&~JOhRGCJ#R&!$(wAB(}-!BkaHBf zyXuzaP{sZHs0T?lsFG?}Nn71Xa>$;F0A$Sob9UCbal+&^Ci6Uj(^kt8yNM_xf>Pqw zjqD!5VVE8yILu2DB+hmoRqNjVV#e>wTCh}&r2|R~%Gnw2?48@6v=>vWS}}j3koV|) zOPV%7$l3d>jsar5FQDPY*!iMxU1NS@t%+wW1SqY6O9Ll^Hx1eA8+YqdcXP7EGBvb} zktTS(WMV=}1BkPCfw$VdoPhZ{;_3NSj zwitvr74^)BU|EaRs-*-0P@7;WMoRE)S#c``#i@}k^I@{QXF6I$zoWSR2y2YdchB7( zJ>?}&KgS$~HKP*d>1+GYqbOkjdoV8glkFXnq+~`j6xz`3jLk2gU1Qh#tnH&)2>H|t z(V>WbL=Q*T6(1X2M!sU?Z^yarjt#=-)WIRwjM49$-94Ts3h}K^%@!2dJ0Ip?pWs)b z-}6`x$D0tWGro(N4bT0Ck^Cd6weKBgC**!Pzy7)Yc@^s+;hztIu^$!{c7yFNcC7yF z=!!3tFBA}lKz4L;HV)AO07oa}4Bv9aW8u0U;gPaJDI@1>YoFOOCSihxqHu;4Hvp{& z(>8kogPq@9%!9D*xj7>zV{5T29J>{33NS^ZrN$`D8IFp0&45_9Z$dXL_HL0L3_Lm< z5t$!SD}o;q8wKLPc)m{f@;u}DYG3n^a2*F^0=RSZryj|%?yKUEkLC`NpwwmsfTdw6 z6)(5dl#p8Hri#IcEX8fnxSjz79Cs^S`1t>isc;1(65-0565JVUp?LfHaRWmSyD5w* zFLW=88=Oft-)=;t+_VJO+o+9O;JK@dtF5rfAH-_IDr0)hfgaj<1ARHQY_)ve_A08iZNcIX$ zH4KEq5u1!u+&LiUa}m*<0}?tWvGpA?CpMI$Jaj-FK`O!k48sr-2I4)$is&o_esJ7% zmp{%>6?Ma1y@tuMS7B6IgFSkQ1U4b7VTj4Z}w|N&31rd2_le5Lt}} zDFv!SC?BOD==PWO!EaP58fxGq19>- zTd^#LNW-#P=W%i9T4{nNWC1L%8ML5{L40pZL9Gqo1Y$-N(>=8*mZhRp>wOa0&!$e( z+q`Kp$f3TivwNMl%A=hVb!{#Qd$yua`FWmn>-pm+cyQKjbVf1n=h7k=6nEo+w*5W{ zV?2WGHFb(-n*ar-a|Im?g0c}YhoEYA6yL_8g;O}=Me?V zuE_4!&&9?r^32FaSx{sEA>GEeyBo3}-+R0ZKv)LK0O0B8-* zKKz)tKcFc-pStU`*T}!E1DKj8kPf)*8SPn94w9M z;A8lApPm-sG-tcIM@ixj-n8|t-Uic$H@Q?!C? zGnf!0Uiyy9<8T|J8w`Yx4pw0 zeedA_C_Q3-T`>f3j0n*{EO$9vK<@1Po>>cE!*zzw=LSByY=7bouQ#uueHTp;`;~_f70JI`#CKa;?L8-3QGeRL~W)$vZb*X3> z2URS~M3z0Znjx*fGPT1gDIIR;L(%{*?$-~2bSo|lOc5zf00FgVJz-fhPIJWJa6(G6 z2BHQ4u@()|rKB{d!X|dbgtb&)RfWSUDza0UEKppquG&;p7@#EJl93I7=faKJPguno-zXf{Qp?J-0F2;Dxifel}i-Tv)>#m2sF_m>8C z_Sp~gXZ?FX$9SZ+2tNQ6;{@sp7k7jIXV9_Uy0-oCDEsmVo%J-5Q_K42A1$JN7UYi( z`p5S&8n4Iq<*(UEHm_~*_|^gG>R3jxq7FRVkoFG@h689t=DvJI2zgbdlNj-!L;Q|+ zOIi~`U{H(zMU(rB@G>0N71urwY|iU$EC@)5pftg=4Iu_ZQVM_<$9Xi$H!kO<_BsWN zY}>jFL9-agX~JPncs`x*<@p)U&j&mmBMy^bN`xTV2v3M?0qYoq8o*>ezum9P)M~v; z!L1bB)`DfpDmPiGDN{K$P&M1tM@C>VsM=GgemH|rexac(5y!!Z4ibO ztT`5N%M7_Lh$L-Jko9Z}d?O|qA5p3r7=3czVmEe<%kCq-F*ryJOj{G& zHq(J3C8N!Izi3@g-JSK(9@Y_n=!mx0)ETg>maVmsq~%dV@Adv3Sj*v z#*Fv=(&Y`lam&`26Yh~d--11Z^vJ&MzsKSK=683TZ;z_N=rpl8=_y9UN$Z-XNwdZ* zjHOoOb!k+F7$fGC@N9}#KIgkPqH8!vUmL<~jYw^us;aOgAaE`q7DNh|V*estiljfBvLvgM6~`*0Fc@U zwbAxjTccjT0zvOwEa~O*5UTa?VB}Ji zIp7&0SmxX)v)W)#Onp(XmfBUX>JV%IKtb>4o;iWdKU{3zn2oM9H_iq-> z#=#x&zG!=<)*Wbb!p&B;LT%Byk2|n5d=Fs=+|nTV?0%5==V^Ce*qqaCu)n^2&q#^) zyX&?(?Cwjy%h8E3ONc;mGwa|?pI$q?1zK)bN59rHLXY0lo>2ovJ7D=cj%u&-w8bfIV zuHjS!SaNBN<|YJC>`SyS7N>JXB+<<~1??TNmV(tbOy@58=Hdn?+CobNL<$Hbh#}&5 zm`$7ggr~z1PsalebHX$+0u=~r2UxwAq>*_qKrAZxsJ%V(*>Dv#_N8L2jHT)(v97ui z6f5p)l8%8okPAx38V!#c04@6NvUol(x>4#g#rVqhvpY_Kr~yja{M$b#W>fun*?Je< zpbCq2dkKudf?Nfuu#KzLAsb?eQLlHJqW+#$qIrsh!;}zWP(Au319U}-z?=f+q*p#q z6H;k{->mw6xAo10~WUOAyhfiB#dq;=AUR z%&ia~BH+97O0>=Mn^d5NWhb;H#NEA33@Gl5n<~;ZSG%p}w`e!LHiT&#H~@8JAOm}SvPZy6kFDV)j`)xbwVvF7k;^{ z)}KS#^9tsSF*9<>YU^q>V5jwAk;X=v7HNmqog~DljYU)R&gVL>rx3A5mHv$+Ago}4 zIe{wIZgTIbar)b%oT7)099IRVKY;eH~%~7Hsn8x+c~&t4M~aUfr@_T9w+C zbHQ~fxGh;#6vYKu%wAr7@b*z+w6I zG6l6JOJ`9oRqj#eoT~zeoU7J;)QVfJn)N8$dU7bODwwS9C=&@(=^_igz>zy2lYp^1 zy|!@)=n4y_2xHMcP%q3D>EKfx#$gpucTzg=(IQSk027%!f!3N-EA-UuTAii93h>1M zrWDa8U#yBYYAB03=Hwje3UI}gfKP`5zI;C6cu@`HdE z9{y(cy4f{##B|dZ!9c^Ky9R3GbF+>So9_{9`@i`6YOQKp(xk=Qvno)fm^AapTJv?g zUa>9<2x3k85{{=6e){PPo=ztP1gtcv-4`2>tgU-^&a2f3i1oOzU;KhJ2M75ssH=WPtraC#ti;-IkOUO9 z_AVE%8Dv#1bk$K`bJp8wErO*mGHY|mwXC2Rthk)9ShGO3dctuMiJw~(E{M&45o&cG zC2PG$Hn38xIc;TkR^JGW*mXX7t&PVAaoj+?4s0J7*qf;e@1G(jLr)vK%>YQavpyv3 z!XiMWZ5`U56)^wk=zw6`4;B>-t@`|p4aj|_x}QwKig8~)Ea_GtUj)&Gt=a!@B8ee2S2J=4@DT}plW z)LP8}C5-98%~c9n^xH&(f+>=?oFB~4GOSp;LP!wNoorQgPzN>!hWMr`&`l8pu}J%r z0&*gd08>!)5iNKQm}lKQ=V`(;Ma(gPxgv$2orjYkh6+G?XrLYz^_ei!8$wD|U_TxDLEacLv9(4+Z6p4dbj*g&uh_U3@c6 z@TO7zEUkwMIT+bYAJNw=130q5TF(lHoa8^79uDcIuDH=;ysd0Ngp|%D46p(ThJZPO zYX-PtjX)H^m(R~FQabj>P#I{eyB6b(R8L z6FwNfwnc_UqiY)j1FhITAOL~iqz3)jheyzq?&3~Qcj6yqH#^wao@pDf8Rp@`_upT~ zT~M$UFQbiJ<18!6L8A&bs;F;zkH2seBYm(@JJo8u_ifgXz&l&Hv2*QDKd%N<$LFi+ zeaGVglhsws^Mq+iO4l>$;Bvj1%IlX#!=NS*f)G&Fg4gqcfBxrJoX=O}T+I%v zqSQQTEkjIEi?>CoVU<;BDTqqqGIJ9N@`y1*o6;t8Xmq}a{n5Q5r3mH$h#Uw@R-2WK zt~R9sNU|X3jFK~!szugS0`gjs%c^}e+J33)w_7~V00+#|j1&{rlJRoBL9&ez)UK3L z0sA+L*fk8ZaW_@RAGH>+z%~#ZOk$j&^&GWSUDu2rwm<}fp3E!yX3Euz`;Qo*PQ zQE7KefxcGlS=2s{{J&nvFMwtw2-e?2yU;5DQMJXEofp8{=&EhUk0pheE${%3gXlL0 z5_UZ->LYlBpT^e}bm-jX2TSTP9ras4$kwy@F$3YYwSC7sKCbUa9=y~0;C2r8No2Ub zN8qA;k7pEzY>U&3@Aiyt9{+HVt$C1-d9e3r$uFSM724&tckWJdk6d>zf5Wju!oZsk z=FuUBlDFsZEC{S~XQvN6{{DXvM7i6!cacP3hvGX2)@`6iHE3`&-C+Xyoj70HqZaLJ z$)bMBErEB=bXt`g>lg@>BD_;IH5oVi9E|3M0i9PnPe6Mnjl*eoBM{}rmJFeUNdhcD z){QC)B$;YcVdX_z#w7(poFit5;5j0ZQX-N$mtz(rO{(G)M34&rQ>?2VLd zB88;%)iD~He}>>x zGo8W8DK02nk-6eHC(J1z24Et>tuoej$7g5Rmo>nr3dF#o$D$9S@LXVmM4dNES$7%VcR!nqc*3 zkpfPK8MSKkek~P@r~o9d))>*7>GH6z>Agqmn=;Qw5D_ldE3QjM5`eR1z~Qvm+G(@a znAZapeGTX#uz75@Pi1i#0opXM2(VU$6h?JbFv}tdbp=U?2IdwEXl1J_tSq3Q;h&rt zt7YCbSl8VNE&nr>B+^2ABC?`)%V3p?T)G6WmXCzKR?W8E`${zY%T~;8sk+ZKIF7?Q zrQM%x&DqH6|D4e_)ZyiKIy7gBE>!lfW(5J+idUN!VKGGyVKzJ0Ri20BYb3| z-UlgT#sYxV754pXe^$3Y7{*M6_MD11)Fcf+%i+id=oskphupI$7VO+8XnFSVG5g4= zc<4%>sMP&Me{}3;vU2MYI;XEWR{oa#cRqmY z?kA|9QZ|LT zb`^~)%TTAl1MRA4rOrq+xms`pqqZ?Jzq9@E-zkXEY=e-;%we?&x;%VySW3Tkn39!l z9!3d)Drqt4H3rr!g+{V1olB}GKiMXgOj$;xN52Dr6bVELC2NH1oC}UmCrk&8@&y3` z!;1Kna#qCB+*SzAUi@x7De|gOIIcitLg9cSv0cALmO6L2NJRm+jaMLpH zv&NxK&n~qH6KRAW6dL$;!vwO1Oo$37ssb`tCZOqsLuqDYJ#{Fm!K~2xiO}dAnPlY{ zRd&X3VFp{#YOWcjB%Dr1JUtyDDPXO@WyyG23({1ONc);Z3YbztoDy;c)|IW!15LiQ z)Cvj#^L&Jm7Ef}q=&^O5k7;fIG(}bG5}Sw9G(ormtd0$Q{00S>210arnhgrjtea<8 z+OxCADFZmzJzTW3J1p?9MJ?Dw;vd>>X={+XhqQ~VZU5mLz4PKlToj=@j3EV(gqD(z ztq>Kv^OE_IUf0!LZP)9~_|iP@ZeMqQZ+6=WGW~wH9Bz-ChrNkx?{D=Rv<37V3PFQR zgZHwzF264=5C$~(a+p;%`f!+9t;aN(Fh(x8-EO#CE?8I1QiMQw{`8Ee(-V%z1CH~d zonLB209m$5L1MJ}3axe19zfTd_0Cwd9%EfImSsgTN@9o!^YIC_7Q9|Ac(UK67%|NW zLNZ>oldLR4VK3qn8j>wa(0iQrp#@m68DhQqpIU*9A2IDM!?(=Q_>@ z#CgUH;J|_qCv~Vx)(o|1txL|ib&?MhaF~xcJwGGO2b5B9T^5vA&6Gk^V!l{5M<1GQ z3REgGDQz{*_##&)LlNiF)_7z<#Y{mB}CX`yOrjW7b z+`8N|uV4;f36_PoJ;V%?`Ujycs1^`nQLvUX@>;MK??EMSx#LG!#b9fqjl~^+7{P-D ztxP&r>osURVe_&4`Ip`14&Hk*ln`6gABH3l@x$U3m%2t_mIiQ zDIfxZedib`+DCNu0gQ2}&r*r;=^?=S|xi-JYe3y%H@&YUCKCc``MqIy)6@nOSX2tvoU%fF z;=8fk7T)UTe1mmr1c27msbO?MYlaku#_z7sA)*nTV)YmVb6kXx984z7dZeJywAi}! z5>#_uoiAYOag4X>{+xZM_nK=}Om6ON~&I@?j_y!STIYw1)@cV0Dmt)xb< z(qh(D_n`n}Syxka?@ z@BfY8zFzV2^%YAls0eBYtdB$rgeeK8Icc3sQ}$9?P^_B$Wl<_nsf=~asHOKCa7S>O zwDWpOcJ4H3Gec;Ni3f^OiYQzF(aa5-qYelPRm7lbQBtd_dZo$iJxuyGLS`FhuXSOo zT{K0m!|{X=0+waLx~^JhCX7G{6tuF-*KV!SXlJmX0SHKxa8Diz8wXgaT;_8m!DTHd zl>k;C>b8~{3Pu_YSL@a)Tdhr1iDK>7;zij`?cMI*-e49Mpd<*SGICv2jWOp|o5)qP zc;Dj}J3Da~@E=wf08^dZ?m-*hX}6d7K}1T!1X=^*_YS4RVq;%N8!PPdP1?Bo6`CUh zAU-~MzpCa%wLQgczO=J_a7g>_-Cf(bX#0Rpw{!6EuD{t3*WgRIKP$UuFaS>%%NVIw z?Q(di{op=l{r!F1_LuT_zxwF-ZUE9YU!NU*WB+2ud;}q0;Pt&A#Er*@lIe{HuTE~4 zq5R)<+)s{R++=|6B8}TmViVl}21m*ONaN81*1w`!hN?RUdblC-apdlh4)4*|bRpzY zbc5jjymgbVC{ZxEs*@po2v9R4g#eZcjshU6tgAJd6ZB_XEV|}KRl9@-#5m!6zT$ko z;q?536eDVJ8AIn6YcyEFh05s)lSK_Y4K71ON&%7>xRR#$V~gFLO8Gs&%-pLA9zu!)dAjU!S_Vw-0+6RP(dmX;F1s1;TJKuhT z7#NM1=Na>y@N_z0K3E?Old}~O+-^54%Yxs2`xWc5VxDI_e|pC8a6qsogDEAmQ+Wdv z$2BG_`V0(kmQrxJUh#Ul;C8#>dbz5ToGTWS#-$L!6133$w&3M_0V6a)k13_50_cHN zwOzpVcE#7PFZhrD_!a;CUoXf7>d*@zP8ukH)zGCFahSA8XB&eU!7P|2cW@#~sHh7N zg%AS+QIs_y+I%ms8S7H8q=;#n5vK!E>KUIos(a#qMOD54Dc+O9fFTJAF>292X)1^* zBTW;8fN1op5TbQg50DfGFkv&>ve>0)MxYeU{)P|`#pozOhR><}BO205O{> ztR2vEH2|XYt71X3?IJTG&;bG_gqucRYKBEIu7vu<0W$nMkboM=>2Z_2Qv~1o7->ruL@mP4i zWe>UZd9T@P@1Z@o{{7$s_)Y+_zbWUc|5Zl=AzH|RV4LFi0)!r&czYmQ=79fqfe>BB z9yRUR4>GkMHI(mT`0f~O-={wcLP+pbrbxDN)bGA`Qj|vHW4<|`8J))0WI<+=VT@US z&(b=J#~3kH-ISs=vkNfIcB)84e}{QSq-fFafXK!PXD`51Y6T%!1V3OcKrXHN#IEK}jh;IHb zuQ!yn;^{PNWNxjvE*Vd!115{Ws(65M=j$M`m{LNBgmqo@Io%eldBw75hjcNOC?W=G z!Lk&91XGWZv51XKC}I`q!ch8CY(+O91jx?^JHp0)FSpF1k_UT+kgKP zwN|4OKH+#e0s`$T<5weS4kci8IojGpZ;r{#xZQ4eeLdsl)d0x#ic(bZO@yq5VL}KK za;|uJIpgK)8Mo_-)9Hl6LF-ed$yJ*)JF(=7^W}<{^BG@XUU9yxc)cxvBt!&Iv}`hz zf*VPYqRIx(lNZ}hh*3djsDzXXmfNZhZLQY3$C|6QfQxUX0l;5nyb{W5aT!25Qf~CB&{Q-m%>)n1^{8)kdmxXraPLYh<^M=Z+L3IR1!qMf0;WF9R2*|K*Q*d7=B z+s|fr`l3AMX2fFLJUdUNw&qSl4Ukmdhz&&8{EuX{CES~R>B;??t!~8Vc~n_>w-o>~ zP=Htqit*1&0%n@ER);hSPX=BXR%i)S$W^3U|ox4<}6yL-XKcWLiK5$5ee2@Qh-P(RhwZ}CahTyiWRA*)Ut;y znhl?`_cnmJqLi$~`n!!=`2M|ngAX`>?7@}s`KK7|d1%%nglG=21H1c}UkgHP0E2-a zQaMjy2C+H;0nq3=b8E;dL3Ydxzc*@g8{{;j?R`EDJbavq5Rqml8kMlstvsqpaj-oC zL;aV_6x&bRd$2RAt4PcZ^{F~D0^T&`!F4hNhLGfsyYQmggq zV9cU(jRK)^L5j)lTP0tItGBulrAEEHUhsOp;(Wg1a=Br#hMYh}2tf+QeP*63iNP|5iGRkeW4&7oI zcC}?Z1XF5N+Y&4zTgweO7sL?tycT5+F+do|x!|@eC{+LnRuiEVw|P2X%@x8f6P&cB zjH^K*rA#uDseh?M6Vue>f!)3<5D~4L6V1ePu2|L!yeTqRX;lw{4?IYsieQ>&q-jF( zx*5+UQ_o!bdzovKZ!RpRp5<8r1!5tPHDY}n@^2Ya+C~9{0xHP8Yz!fkyK?{vJ|1~c zL>gd;j|4ygvh=vsb=yyv9AgSh% zZGovTd{;_}F#)9@Sfsf}<(;?bi)V~tZWrfu&B$DF%LQ|aI82i&UZn_)iv`^n{9L4I z=v*}7UQ2~=#;qt-r&cx2rHZOsu|RSGLTLr!45u$xqbRKsH6t2VY)varoiEkh=)250_~!%%`p4A4qA}BJqQ@GIk5475U8<_87Khr zid+MdY1->%so$TX6_XqIAgC0sZ%?@rWLY)Z9|VcC;o)(vILtHVNt;hb>u5iwh)XS2 zXsQ69T8cX*bFg95z7Ua8r`Kp-gMfY&P<1o7<7)uEa?_Akn+TIN-8<4wd;4*A(${zE zUXNa28bd@fWhRP`a6JDyxyY_>bv92pF*9%@=&v<#c;(Ezgb49IIY+4!fJOh}(mGJeqSN!(d zE3TIn>mp!YP>bMpE4E~HKPU!3SYUM;g31Ac2&Qb#AbLhlptU1`Fh}j1l#)6y0uSb> z+EgtHxN4DB4-j42I;9qwcggbR{J_?e*(skh;vmKxR5qK+&9-*iu1}kF?s7$_XqhS| z&A`MM5R+3~sPzvj)jFi3*G+JRsu@bXi~d$qDMJNF%EoT)qs#pWP3?{CIz`wT*b2m? zZyyD70Z$5;dSPW0sfbYnYB5CX0j1`KP1UN_)nvzkwUqwdqO@NK`o2Yu>F*s^&)$aJ zKzPuwACLE+_I^B$NfY&ewfimOAv>o$hWkK9e+II-0(IE8;A8yEdxx`Mn%}#7`#nu` z%Tf@_KI|@b^BxBuzDn%83h+IWx|IHah}215XgYNN9-7>sJ0#kyvt>KNKz za>ZJUMkWlTv;_)4EV^Y(cvRQjc4B~mKpL`X8X4E<8A18VdT3gI&nb!Y$LG|j)&uAm zw@rk7?2T@Q;aUfyRqZ<7zMKwt+x?L3WSUddj=8MkQMrIYNP&&xO{g4HsU*g(S{A#K z3`1#cr4$et*#Jl`jAbo2ObJt>Mho%cVHQ<(DcNWsTtHFJ9a6w?nvJd%FwYZCrxT8+ zBces4eXeBez@zb< z#idUlfI+N>hZOqSlO@7jR{S z0MyB9jVLw&ikw$mF0Y&Gs3N%C7F^C}yu7^P^|D}Hof1b7NN5ZVb3rZu)qvY=#r3k{ za=zkvEv>j6=163 zkg&`BR%o~Hh#CU|#kS6CwdYDfGl?Nsb3@SpQ|OHzi;G`~SU`&q7}jeD5ECzf#U6CN zjf!{{G=;nC+-#l#Eqb)5^xmwHyozRfDp#ZmR101S6Dm6(%$rP%N6Vcq)xt*-+OrxH z98MP=RtK01w#WWD-v=PTV|JAeaZn)C?1k+^11M_Wvh&jwi;l(ScEv7_Opn3AyT><} z?nl9o_l_-4vTOWK@Zt4{jnX@ucMm=kSU9L~`m~c7I~&CnFGnGaKa0iV$L|6lcUcf~ zM*g}(%t@7&1^K^t_yPn5N(Ic3w}n@@f~p|K`JFR;d}x+bV;Kwpm0JThvJ6Nx8_F^u zZ~y!t@vQIHj=6sQt%Z&m={M~$fWk2K4A(YS5K&k##Q;tb+$j9o$SJSIWGok~rC^#8 zay9*P<0zwv{E=PHkcEM?4&VKk(u?XJT5kkgHv8VNvR19r7C zI4bB|OT`>DBCASPdfv+FP-N5Y4}lOJPGuC=TrbAS_bf?7@0R^18b|xS6jjzVkWv_;U4tRx%?IYg46YS96qH&tt!xR9+Lgf;b4m;GDZ-8wM_a$dJi`Elurl=Uk5T7|#Jyu6)bC=pa80 zL%Qvz3w|Gzu&5&&gU6~dgnzAGVyHEFlm&SUU4Peu+z!{{<1Jc_tJW|BRPB3h6Nsh? z1?6}at_?6)SpRPO9xx{mMkaG@Mw(m5YH;Ko z&e=u;(p0o~CPBQXxqi%fv}81_ezE&&-y5&(>w&^%Z9S|rf)(dW?Hc;S_odj>(i=!} zE~f)S^}(H*7g_|7Hfwi?N)1L8uH3buy5MdD<*J5GFH65560AXF*6zmhjFh6*5`hq^ zw9FB~YEzgIivE6{A`VkRs)86<2^6L(!dxIya5_#nOkO~I#Fw9b!Zh`biJ4WlG#8Yd z6?misr@|?{=7w4;QcO6Wju@Lj&GU>919H*A&b;V7PR7rURwvR%$9MeQF-EtXqdQU; ziO?vf1O~A-or_UvR{;#v+IlDuRC=0%hGDC9(3TV-RX6ttLP!&`WK^&f!)D}Kkdkuz zQ%oqL;KKn+3OgRS!_6Lyw!nkRItvRRUJnpZOhR=($NJCI3hslA$!0Fz*~oi)f;Q`W zRFpg{jF9($!wjk z8Q0sY9neF>;dlZC&wMCFa9uJ40eQ_@ceZMzepw1Gw}RUmkZVM>*g&-V;!<(ByyAMj zVa+S@vTEk$a6*dOpG0e!7M?4*mSQp02 z%LQLw&bTZ$wXSZ0*g%On>I!k~NlV|0#q|6*s`Ri@G4;1mDr(Sm5@JGNQV=9s*q+ja zWnB~~K@e@!OUbQ*8hbG~OO9So?2QAj~n*io8XPFB~3 zPG7cy^j?Q+V5li(anIai*jHTQ+1G|g;&~4u-abC0EWuaf&Vg|I7Q4{J_I2BrK!_ft zH?a*@)HJ=Jz8^N=$@g}Zwlu=vk<9+3cfsnm$61rY7@}sFePhR9djvy22!yxq@lqPO{U^Gd%^EKK{Stb%te=2xD+P_if30qc9 zK@+w4)dg?6DxnAazvZ~|f2GfmZ_0pl&e_(v9@AQHes=fspAiHTG#|(i{n#j`z2n_; z^y23(yZP|D-bI=BAmn?GAAvc=ioYcWhf-~E3&b?x86*ZkgxmMFb@qUJbLS}RuhteX zk#1x$036&v)V}dQI-g~1VhCX4vkPe!L)h2Q$K%>EE>#Qca%IHKSW>m3LoH-wW^n0E zp6G5dZKF8u08&GUW-3MhX6f&d=8na-W4v|c+oJZmlcYWnECB77?fEfDjVEHg_o8+k#yN z!UH(2c*n%1@Iu{z*nybgZ0#@IYpHR!_s9FTme5@=*aYl_sSavV3uZMjux9tBL!{f_k z@Nqx#hd(;Li-PsYxpoBpM;-4!bofMbXU_-yI|s(4cwI(ze~kC|J+&Zz^dWuk;ggC6 zAf|tJni_MksFyX}>Z_zJ*70cfyZxr!_YRxGK&2r^);@OVV0KjWQjklAv=b7nmO)fu zN$lSC7bp8H0`<3W738YXZO^W_=61KX^kJG!@5791lRu;uA@JfwvD1;CS3lhT+E0UO zsy=Og&=v^22Rt7Ao^ek>9OrCHC%5}@ywR#bTBK&UbUhFH`}*(i0ZRYg?X|wW2KsXs z5!2rn)F?I=w8%U{(4sk>aiWYV0P~bEPXUqwrsIUeQ^J=|PdJ?p`11J)$HM`Kc}9p) z8@T0yWm#JBtT#%V=NU3-mStHpR#P`x*MbldmZgB`ij>oU)^*%V)eMRk6Zf@NF-;0S z#wK@bb z0AN{GOjE{mIGUsKU=jmOS#6vP1Xy5*+U$tQvZe&kPEALP+s8>`Ss$-G*Z`bi?P6R9 zwvo%Q0^Lb74~sS*5m#j9rNIoa%-luNhyz zzNmsz(fK*eGf0YN2uWpKquJP83Tn~shf#5q>QXmPj#|O4Ht=KsxiV1oNv@fjnqn!8 zB{Oo?`m9nahytdh<2Ow+4u=z-KRqD?LM|D%+oFnImn%xin5T%RrxSku`3pWhA26qc zY;|9O1St{b6p>lb6kb_(DG|0u-EEw*AUbuo5ugZ4jU=lz8?$OA{X2z#Tp2>zCny+g zli4EMrmVDg7v1Mjs+%AqgC&@)R61r0I1C%CPALWIZDyMfGZyJfn}fZcqd%`P$kT&F zn`_)U0)ze6?;y+MhcI4?LHOOCbXQ(aw4Fb=_ji2n@j!xSz{jOYpyp^tC_ zJWkJbV-%yBy8>$%H$)WtAPEmMk9L*qa}m9fVN)sjo@XP_O_2$Npphysyr+KcTW4sz z&-hVQ&T)cAXY zOlWs!()knZGEJk#&RO9mIg(P%4u^yxjd))l$ewJ7aMsd%-h z7Cr=6)~q!}L@2pnSu#qkfc07+1O*tD4)-0D!5asah%EBidj)tQY*W&DU^hn2vPa?i za7v>Gh7d4MGbU3af-oxFN#?dT^;}(UvDS)`b91Pbb`+6I#+q0Ay#grho1(QesZ`B2 zO_LV$?|_kBcmEw*@W3O~6ts0^G|AA|xbb~1Xu<4u?ga9J$uauZog`^4fMqA7|7J_W z?ZL7b1Lk?g@p#lRoF|+PN0rYsaMD(Aqw`g;=q4XUdvD}iu`C(a+k%(#6}Rh(+ieBW z4O2RR4N%{f6>F}z-7+dKU{dMK!{LBhGioVduDIP+T+R}qN;+m zFhUTdDF7iN%@gM10WnUfTyVWE`1uO4h zf@Fdb+*HDJ_uwif_yI_>n>&Y2NU8pA=DbGH=1dI4PZuGwcJWHaI&y9K+JmRNC^nmrOZ~!`FCV#)L zW%gzOAD$@t5l72@G#`mt6KAgP@A*CZ>(5M^V6q7cytfT+6J9jfwGKX-5QMy?PlnM< z|Ip)I0OF2xced}5w$2M{N=9!jknK6g<3|D(v1r1G@io%@=sNm)_Se%X68En?`uC&b zt^53FfsF5a_+I&jMbV-HYADBu>*)9h%OLwoZR378Uf7S;J>YC^se=rQUdE{88QP55 z8;H7d>GT|8saZ2d+T&q}lZsVdyy4snJaI#tYx3`DE;98oclDrVG`L4Cnss%@b5L%v z{Q3}O+KrmXW@t$sfzt(w3E5N#8*GKg#-lyw zwgK9B*rwgw!nd+C#>0Lb@o%N-cWZ=5Hzlo!5bH&t*GZx@wt+p2vJwCD~fl~4+qj(P^m&47b9Ok z1Ug@Hu1#^s!H{3`*eSR1$OWi3&7$N|S{BDQP`4|iu!NI$1R4Yh6c7?x##KTGT0RS7 zP)7_bZSx-~PaZ+_=&-+5YccT=Qhsx@;cSJ|-T8@ukfwy=@rdI=DOM>Z9H+@XqkUxr zUc*u$Qo!O(1A|2!33*j&-fzFY;_KH7mg|CL$+#_F0SWjl0dY#G95kE3gzGJ1DPOUy z6;Dr301JvWgXmjssbi+ijd=k070Ara^VKHN@0Xx7XE%CN?}jj&bwXd5NJo~_*-ZL9w*;tEXK;B>RyumCBw4&w?K?(O^*aM0%3 z+|D=oa0_<%h?97rA((iEgAp|Pm0lm)J@p2iZCf7v(w1eqyT*H;%E6HL3mU;kLq>bg zXLFA29^ULHZ(s9lgWFUO#JqR-eYBvJQy*DtNJ{0Fv1q=XIW;3#bw9+P$yRc=gOEo) zuYaC)bG+{@e+-#?QwGFQ);^NnE-+{p@{!~A*Bn0h=-%+X=M3QSz%|?90OZeteeWFZ z*s0S%gORi2{b0v%o^CKb0(tkIagqk=f)L;O{0w|e)__u%i8UOg@ zGmZzPJ>(^8f$pl*qGef82I)^`LCJz;RmsoQvIBm}DJG;TB26~D#hrDy+q*f1GkP&syIxg#bu}QyP3PQ-?keYhPzZ+Xz2dj8FF2oXChM(#gc2lbmL(U)S^$=SQc1IH<&4`+Gk7IuEXx9?*_2$YM~C+7 zC`D^4a#1L+UC(2Cb}DQU-S>+iALGV)qb3M{fY09t`H;sa75$dtem5h2>7HEHI= zZDw9g&bnweu&L>wz_U!j=5?A9VhT8%PB@)Th|`4YZNaiEIG?XLpD$Q$D^et!4hdgA zKjG)UJ>m27geeiq%7`SGCSB9VBjf3G#C2J5TeTkRwiYaF)!M#LAeoJlqrncr=vHh61J6UlhtVft_UWmqQ&aVszvoG)4P>2eJkS*bf;geP@W~X#=m!L1pkbEu7!7Y ze$*p&$;Zy$y|Zw?OdXIYC428~lLf7WL$jHC3fBE)P^VfA_dEVf7L@YKQyF&8Kb_{H1)i~#<{d&BfM*>otaoYPpXh+!{&<8UM_n`pj7Oq-1 z188kdn*+!giZ&k`!QKm@7r*hKXyj|8-_zEa-dyGfYv}Ro<2AXKi2~_F8o@ox0iT~H z{QTvBUw;0CfBgLy{QS4icsflWDOhedoX;1$yqvMzGM05kDS8hcWn>oQ0^_k)?O$Mm z2NVKQj5r*QnC4_-f!!Y+<7mH7+CXd$*?7blF=ndn#%r8&*d@dF+z6NqrD#^8mI7`y zD7k@0r#gy9-?(p}yds!!@rs91dHG#5;%BIvnR(&TCtH zk&NC&DknTm2_c#!q)4NE>Giwfmd#5(Y$z%$T1?NP6v>nnba#thR z(SehyE~&HM7~Ghrh!`iNNgEh~2t6Qx=KNfj6<_hHJ&{U9=ky1oZ|OY-2ADO%Um_4I zkP=WOgV>|;)>P0l95C)WwLFn3S4|Pq6mdMxs>(J`KpD)$Bj*8AajB5bsGTEmd)TczT=qwnMZT!WdD0= zKqN@etjGr^+GiUc{wyBs@_>p8*C-88Gome{X#o)pK-P!uf}ej>4;;Vy?R{osMxcX_ z?>M^c@pF#f(5YAL`fLwycEzK8zJs-4z6^nvyKC5GuYr4^6T#bxwT}+Zer(Pm^1K`9)6T^6KA6Bq(w+;zWvbhz`q0U#`(AJDMxUw!DU z4EMs)q`g}D*8u}FIP?zkM9@Y59TSXvGjgYN7;Az#u~CiJgR&95VsCsm{@Q*X1ImF2 z6cgG_5O6vUJx$1+3$!TcnzPF7?xX9WD|3VxO>!@6io@I?C?z82*ql}~CE7nB0=1g@ zg^z(pBF4x=OUu1??5+O;5aPdU8^#cAvFqy5f=*fSSC{}|eqi#(t8hmZO5-3jsa_#K zI5fa1_8bDe)&2LaJKT-O28bHyAOaznx1dFdF#_`x@%%L7m!F^TZ@)a_@4tM(m(R~Q z%n5m2@%r)`&Mz-mmKCqB8F^*oQq0-b-Zko23vyNddSTr_*c>@)r3nPvD$~GveL?E2ysdXsmuH}NB6-bAb2Fc{~Vy%j8ZEKRGllus1eo_ zt#=3;(5ip~IC3YH6a-N)4!u)WJaba@I)^o_*CBw7hX+4gN5591TyaG)p1BUu0J2G& z1m;?>mW@MkE7o!obV<4AhFeWR$go7$12y+eij53)colQ%ZNDI=Jq?(c?69!aHQBmi8f-Z=Tc?Xy!CJ98 zDVBlSN8cU*9(^YPS|-F#pscqp9)ZCR`PU!rBlO?wvkJOi-VZQuqV4EA(YOz~e;>?X z##SHV66ESLfK(b;X>cB7c%x>*#?Xa4?yh^>Uiuxv16$;_eFPsK=l5YLukwM^`gip2688-^=V>tr z`|lX3f8(&p(-*3k(=?pvJB&$Ns6JVL@58q75Ob&ktMkx6#xM@T0uD-P(M`5<*1!3> z+obh-X6?Pw)OVEIIimP}o$hE2=d)jUhN49S0yR|^eG6lr(y$m+Qjy@PP{tGqOV$Q; zEec`hjRrmfDgBB9A;s9ELC~mJ3K3JF20$2UNNTCAi!PfaXZG(IfO!A!`=SJoqEFlP zvXA79Acy*RN*fN1f%|h7SIlpN{OBrBVX1bn1qp;8Xhuz&)f!PvdL5BN5TExAn8N2q zUnlC#%{=+ zb3R7rrfEXuV|xuK0BKe;gn((Da6D>f@aT0e)B&p)oypaSJO~j(Y8xPlARP|cHzgO; zS{!2{hm2IT)hzKI3l6iwk5sxgA0t^X#v-PN;^m|88AgVnpd~>f@ zOjW9KReO+96BK}Da-4te^)<_qk=Ls9s$5W^fsYVx4KRoWCD`W1G*5aNF#wcWU6vo| zFkzY!=4nQn>~)b=6b~rnX)?)R3JE3aeOymBoaZZ+SRf2kHfJ54&D8NIRe|`H91vC zZ!9-jOAEs4=X@p0=5-OW$TK%*5$NY?s!PLK+8_5#Yj=l8o8EqGKJn=D58brBmiu@P zY?9zR$QH0X+S9jZ_t)9Z<=%atm%7&gWC~<%FMPhs#%6ABRzDv=cyOO&{@?v8=3jIg zA+)Dqhkrl3k3V=29Ox0!e(G(%u_H|ZH2voPvyZU@6B|P!E&B6| z-KVl-_jn#9pY}KTO>tG){-gCfjA##kk2*2jhhL{YOxc~39=wK4V?I4MYZPZxgpr-J zAYRA8diBPIe(ubQ#YO;JIDQlny{W6|&>A*hzW|JLoJ7Zy$_0SKxvS)ht zYYYS(P;bs+8=wHaSB5B$)-To_er}SB*16st=yqDx9iUx@3>gy#g|PIv1dJZ!WFliaa;;QJb{VK@$I%j3*}WdvZ)Ax zO#Zfya|+4og~|X>>_6t&xZSYT+ngIWvq=O}1^=J9Yh7>LMzW_0ASK!EBs2T|Pq^P) z(y=X(1W@~gP8RhtEMhZ(;NlLtu=6- zgY9Zr%YOfffD>ar&C!sG)>U)an_$Wl&X)@=m$LzO7!|p0LO0?)DyLlf9Q^(oWzeW% zqO1%>N##{agp_sP)mlMGA_7utK@~=p29X5NWT2y>HW)*fDfZAR$ylTYLIv}Px7O6K zV5O2J(wY=aHQG>0!Mbhg)#qeqj*Km=q7>byr_-#xW%3PeRij(X(plFHO;+S7W6m?C zoFb<^PZ^iX$?9oxq&bd*nUS+b-%?H*jf>1{oGg|g> z@OI{w-p*k^q`d$6*g4zjX~*}ERKt(6Kn7>Y@Uu?3y#M)iwac7KXK6ejkbPv>v?|mzaXiwl}Uzx3jcw&4VVKf>;y< zD8u_~G?U+>Jvs8|^wdt3?TKCA+B6FuMr&M~!Wsk9@@+57;dA>O``=~^nJG*CrPTdJCxQ2`Uw_Y+%+($KDJLC)1!oN$;!Rj69Gq4d|r3b0$4)+#{O zOp#oW`%dU-gP)D&(V3=(QTOoc4Q<3$1*IEahWK^q(l@j>9teESBc}|$}tj3)c zQ@fXQOds=FtdZfR%bvcU5NqxAZ`!q7L{QrXu{m54J6X1`(4+qS)$BPP6u*@%oLNH5G|$j_gG2))W=+@{ra9q!nep^=(MT5q z+gh=%H{7l_+-^72t%8Ix<;iwhK}FSQuQ_L0wfPx|1RPvj-;`!vMhAA=R<*8zfi=~L zPE`A9O>jD$RJJASU`?9xm6$M}CcL~n;rVjLl-Qyi3guz40Ya!-0W2!nb0cLiw2fpS zCnbugld^S5(`c=b)^ru6AGTOrH>1KP80KBX^u))*L4;T&Rq@1UdeNDgtRbS+CjO{C z|K{sWKlL9EX`t9Y>(Qg_5k47zcJQ)$uX^A8492!;3~e_~1Bt=iQ{3H8PyHSl^)%a{ z7Ts&o+;En?5%-PZDNGMjoW%6}PL}LmVX{U-=N|_PP~6FTps^MBUe$XJ@PTe$j@OLT zaA#5+{^oO`DWVUaeXNc9U>y6(1Q;IY>Q5XyY+!5hV*msP1KNzR-GtbM6Q`i-`>V!y zz!Zqw8}hECfH-UL5Vf=x{Q1h)Tz?n{JOUx#fB!%j%)b|eG=cDbv$?1dD7YYzK*b}7 z96`uOBk_G-1NG{NdNurTnIYkYejW-O=icpQ4r713r1PBkF9g(-<33r#B5}`qa}R3k zp>{Hl!*~fxsZtQ4hct!I65G=p#+BZuzLyh2n4sQkLIq)#gi-`stwtM3QC}doufjm{ z{TCBp-}s=?++8!g=6?7?rQtVX)Ugf}1do&eA3SBryh3E4FL46Hy^HJSfMmqnr2M1r4cm(SX1soVaLA$w(j%0 znYLVeGH|U?3rAsiXp|fq5jp@-8;Oi5uAif=Y=AUO^Nf=SChJ|nL|U`3uG*u*6w#x> z%my?PrkpUB9PeQN6Jf6*1AB`fZ+A(_D3Q8>s=xfDo zxnfzaC?x~9Sh2g(B}%p)m)7%-UUC!9_vO`Xcg<|t#Ht$;jPe~l)V+uqX{$1!?w zA>zFQ!C;)vXS_T=;e0+pAXt_a%XP)JmPiL{7KH@~a?V!!1>Ntq?JAG@ZvbfarN!NP zFWj_NaNV>(n+PD*D2TEJ)`2!!>p7gPK?I;CpiDTQX1u&S3_Jd`83=YU} zS-Bi>Rc4WG3YkCbL(z0c_x!uV6o+&2>3nzu+V=cib`7~x^zk(*UPIA4>@M>3!Mdo! zM^lUweLwiaTLVs2(bI^`z*XM4+OS0lPmVx916&35M?Js z_uZG{J`a%)&YqJkAZoj<6Kf|D={J!o2cQWdlk)4YRZ|Dt+wM#)t&T#T+y>o&fV+F7 z!RUW52(Xm5p8u{3Yen3)BH@u6-@8M36?vY(=8b2F_Tj*N!lbwohD415+4WM&c3)5Qt;Ji?yg-H4N69b90k>orjygXg-<>eVqml=t*_?QXEc>+;_2#igs>UuL! z1XxN@qsxj?tMp^?XhwZ)G`&QP_dGF(CgjYq_F~T2B>k-EgJSU1sT^WABAGGPD0JB{ zfFXJ$P3$;5&d7i=SOjO!fu9OXB)7}!L6s*evK&(lmHoRl2 zHy|4F=VRU+6xrOE(%0IN4Xpu*yW9=NJiAU6D*zWwQ3V)No>41syKPvPg4R}~S=XZ$ ziH_&332xT~?{8PUzrUl_6{mT^-~RqL{POcJIGxWRqUaUmDWt0WEe)(`WXx@vFutG$ zelHaD`W*#ur<2y<4NDj>A^;1foWNj|wn0jRCO{0I?f+7j zOYW`I8X^KquX!AGqs~16NC1hkQzdnlXVVR6V9q$rGcNOloKz2ZYE$CDaUvA`4m5qx zok&hO?M#b=L1 zg$dWshKK0-d!#=nMsEedyOIMEtSa7Z+LeoM%G#7!B&8|(4vcGk1R=aTM19sC-G3Km zUN>MV6^FI-?z{e#l6Ore^J=ORbB}Lp)p8=$s+GFgJnQp=(*8ctQb*E5*o$hBPOogjHauCV zTDOtSTiv{|ou|rl4;&?q&csBT`<)EfwNfCZf|61&pC-cjoRHY^!1HW-B!R$?*1hHP z>4XLW^Ht~C=hrr^u4QQ5ouh(?4Akg4y}i9*D+`{U&Ukt{<9t4wN87xHWHn2vpL-Dn z$OPbgnjm$<<#NF^TbJ}IxLy~$z2C6aX3?tD*8<%GEF#F1QAGeMNNMymXf8W*#e~jL_6F6!TQPygZ||imzXP!`H8`02ohCPaszLsL=|- ziIOoKX)P8JS@SksOK&upQr^jO5Me7?+}>J4^7CZT1vEyI2{WJYzTEJ(ZJT_i6Mp*nXZ-Zj7tE*0K=hbGt4wUwyqS;BuYHG~V+n@}NHFJw z%hLtVFE1Jy62h`AC`Iu0cEi`Nzv246Slu<9uQeRft7UeLrn$ zfe1y9l#mwxYD?oMuD(Dj$lca1C=k?&I2wrhAOI z7!a&>UcBj{J5bo_uA+(E(H{C>gz*mi1w?|l>}TS!JT0G7PFyX5mQ3S99a=ALoIj`drtwpc;?ptCJ@EiJUmZ# zunq=$XwLq3CS!CP=Vq`Ab$tfg%59jGB8x%9fK_u{&30zZ;gJ$B*c*CuJ_a*BJo84= zLDz1tjrYckwryc+DhQ!n-H$%g+r;gj9HW#4yCqnD=|*l?q}{!TyzJoX6KC&y>oD|dqOq1VH&V+w)db$d`K zq6{j29Wfx$05uN%5+;NrHD(wi2+{rShikky&)3!qN^1xWL_M!ky;7ggrFzm+5Aq67 z%b5Fq3?(KF0PjGD%?k~TT8};}r~sAm3F0vk!U$cw&S9FZdvenvN!I)b^;jeVQpQv> zrjjwu-e;nD|5tcJMZf<9fH+~R1+A$k$U&k%hYUYI8q$>R7_cd4j$Cp#h5*r&C4Wx< zO*5bk5Hwb=FcTwzHD8-8Iwg!bFAS?fK;s^(ck+s+Po71wOMg7MIr8^>nRv7 z0FoW6o<=aAW~2n|1p*2MmRQ+;nDgL#>CfnO8YyYoRZb~>r_ZjAi>XbT?~nWaqt|h| z+lEp$dvnR=mHz?(n3CtC(61J`3dR_5$T{WsigwiUO+k*G^8?M(ynePS% z)${Wi&o39ua|SWcL~9t-0cHknE!LsDL1dqze3b3Y&VcG#rPa(i;dDBIlh$3Jk);~0 zxZTuHU)N&kL0zV0(e%)7Iky1QbDY4*X^6#)YTdKrz8O_~FxUddsA%2DtEh7#HB7`f zO&Qa~cAh+?<%)bQ|NQYIj^6tSI4~-jNP*_m zWdM<-R%)$-?Ccjo(AYhYosY6UR1Z`Cj{TfR?-_I49iMIlAA6hW_m8k){r?AJ{VvO+ zjk3@(9h}^O25SBfJ|{kJ08$Qqm5r}My8bzWW_`(=8a#12tO;T+k8FqIX9It3^kesV zw08pMC`SIy3xq)rJT`=e{r5*kL4NOWML$MCBIO8fKp(sV%#c#7SWyi|FC>DoseJyK zfP064(c{rKtw$4lD)k-?-~o>h^>Ku;`oUC(FopE)#rFAPhNB;rSg%h&$aq~?3GNPr zcp6IFl0Ju0x+-nvN1njp;(A}`htiqtJ%1X999*V!@D{0YNdDQ{fnc{MS(>F%aTL32 zA)_0pmWm|3)`7_iW$CFbd-{&LLfN+(fjk7onm3HOQ6? z%WcEIzy5~nwcxgF*tQKw-4G5?DptpLFfQVC5gG8p^n__DXiXW`KxEOO4;BVMs||CW z00G!PCn!qwylkZ)L33ke2SPl$XiQeg&yGd-%wR}qLduL(Qy8J!wxMzjPj)y^e4|+3 zkCc*ivQFS+QB*f5g}{{1L=DF_?jNX^UeA|CveYx^II2OB-XnS0?C&~n?r=yc<8ppN zt%B?Iir;?w4bwEi={VCcwbkn(hPvCxZPF-`iYVA86d&sX~N}6YlIrAo`ccb2(LR)I%NIN z<>r1l`ic`^3}-nf%=3iH<&1e&CZ|#x+SW9J_Ikx{ukUz&Td)=dIYJ2$rm(bEYGUiq zKRb*-GrE|w>N-34HHpgHC`PZLYc!R}&ylBkl7bOoNEI|GNGzBwI(VLE%=2mIsg_cL zW_Mi*-rujdUa#N4mamqi0AjTK^+P z+#j)}f`9*eeSZ;D8F<|B(e*zYwd%euMSBu?v*_`2M#ngqBSr0SLxYoa4?d0=7=Pv% z=d?e+AYV=s)@8Glh3`i}0t^MT_939~b{$dUUpac{$yo{>Bd5I=AAx=&G+XNrMnUck zncZK9RIENcgu6!)I{;#V)v< z*$QzprmTHeyg^)ahZU`3DBFguR=mBvnfE;5=``b~=O>)!Nojd~FIg=`DZu-6#XtY` z6<6~H-)<|GwP+Xgs*#_`LmAS~JTYrMRBT-{vg~BQ^4E!+Zn+GpDSnJULI2dy#8NI1 zxH@zKn?jJLbLZ5^eccAZ#f7&ZU0SLs`TkltmJQ}WvUL;IpbsKEJ!Hrdu zg$dU8B!RhwV*ntHU=TWmCRyFbl#p`@;C38dyj(rq!i+y6!Zgh|pBlDp!|is(x~^E) z0wPV}Se6a9+lHs7GcK14PUktghpT72p%#@n>(bEb3gQiQD?uniEVx})ZBkhqq^+n` z8Kth*1@G4@iU=rDMlwz(fRL=#MI%MeFSGVU0koigF!UbkK@xL<_Xt-98CY78;-L^AXWhm*pmxKZ>Y-Hk=0B9WKYWs}87%fe;Is$aJN?Z=jR#!xZT3oQedI~$k93$O}i2Yv@Bqw*YiE;3r41#h&4^3 zjkIiIp(U_0!quZP{7oF_d^X%P`*<`H2uw=DNhbM1g@&Qljn*+mDLf*^jP)=Ia(u41 ztB&{>3CI)-2vn+uoINT-kg5j3S_*_L z3~z>|UPPPI2~}ImP8``IN5f(By#ST=r0x;`>I@O`w3iBzYKlVC(h-;iuO1QQ7_Z^w zVDr5B`gyuYc(majT+7L9H9Xub)&n2_r|*XjK!}urlkGa5JDn3FPYF}XNTlB2#H_4Q zCNOJbRAH>Op|XJzOMOd&ayH#y#y3Z&(q#L!`u>!GUX&!d@jy zg<+fZLJ_ay(LE&soUGv*P%&kz#}Fv}XQ|jokQ3oNC7e$gQ_9GR3``KDR){cKOOTdu z+Zc`f+zV!_HSoHg%W1}(rzqkR(E{-VZRVJyVZKaQ*A?dz1U&5<%&jYk=ouwph=D*7GqNBNP*1 z%B+!$*08Os$%Cv7)WnT+>JBSvCN~>|)gY`UE1H#RmReCnX;!Hzx4S@DxL|;jKm$N> znE=lTw2)V45@Pc>YF(0s^0%UO2!0iY0P`IEci3t&+CW(ibR(oUQZw?0a9kPHF4U+w zpH4WPW_Z&wonwMD?cJf#WMxV$ah9Yl8bf%dS_8i|K(wVUo4j|QFQ3769em-7iZPmrqnMT(|`U9UIX zmQ9HV8Ud6e{W4@QxkJqeBLdLc-Xd7ey!918HxQLr!aIs=S1S!!3L(KV=WbcjEE#)QG=HQH|yj`32 zsawa45VE(rtb8;c8I1c>hr)fn$FMm~VRM!;GG4jyP90!~ zMdN&)x|s(Eb((QBG_f^u`_E<6AO}Ai0scN2cYE$|Ne3VCI`TaS( zI3C!B9)2)}XY5Q`4hwoX_yB(NrgxX|NJaAh_t$@lu8(VId5wZdEsJov9RRe7TD2I~ zcLIl@+hXH>E3Iy?kR|TlM;OS><^qJhuLtXLTa9pHNNTvc{wxYIb+IM4QUh5Y78q&6KX|mgbf62nj%q3 z#VTH;E$~J`Vhx%I)^(iyo*9jS)b{&ML|ZR;gyp#HRM_^Seb13?tyoCgdu1TNN2k06 zPLl!)RFo{`D2V{`!Wu z_jjyY#afH@%dk+rQ!Bee!EY?UAjAN>Kt#X5heSG6N?Is=nl-&9yn?t9a;$!<+BNBB6r3t)C zRgfTlfXM`}Iq(ZHNQ-?%Off3JtwjkSQUa7+NSo?u;l4hb)N0DljJswAiwIh>^e3a? zO)Swx+fzlE8Ryg4CfY zH!QcCb~3L8Ai^|fZMyjUglV1!<|LD4LJ`4p=ZV*Bxcuv61Qlb7NO+bBXb0D&7tb<@ z8EX@!oUPc|0KKSVrfk}$rWP&EZx9G~GKV9p6Qtjxl7N^@CeIECOUAyb?j9X*6#_Jh z$Qhu0Wn6Cwnils`({xyy>uE|j%}FC5*`GzDF|}5#+lFmhl`Md?%lql1psO`y65Ohw ztP8HU1-ILVW!bQ78;aL3n$f5Z7B)|@1dA5T0C)Wrl(UAyLiKa^?9BH*QR4}6 zCO~uJ3*R|SWA6;3cs61T}o zJ5ja=V+h;s7(gD+frVB)d`|NKB(ic6Nhf8&v%hT1n zUH>`Qcr?BRJ|5Yg$KQ=nDk9G)(2;-s@`8W=>o+XR72!1&86#bv+#Gho_QRWWW9Sn* z{ZAjU*CoFDg!ID#c&Azn&*jk^&!tlVPrC&@0*3u(+qHMV#{Xn}CsNwIMGe$=QU4x- zlXi#h$A%hx0-#xUZ(k#|lhx4BLPQS0VI4xg3w*>UnF00a^#to5VuIa^)q78pJ^J=} z6G3O_V73&G$s!4Y%p3qqTGNiuZrGD(=>j}g*shbb_Jd78p63bKa+aOhC{G&w$R@Zw z4Tu@Y#K@Q+Eul7VOjl4^!;}g0%#clmG|T{SfKK8?@3se6kVMnHIsnn=0E-%fQ3r#) zLL(?k3>@@%7`qloDVl!AT4$124S^{s@R&23;iz+{2D>R-5lvCD^bKb10)3e?5)-!D z4XswBNhvY&lrfzqzzlvsl|>prk8Z%;WJ>E&lzC~&YS3wY1mXGl89%)|<1|eg#k8)_ z*Y^cqe|y8PUtjU-*H^s0U2$7h)J9Pc08+vSO^F*6#{fQblS{}zHGWCEP@BhYn$uuZ ziUB7Lmvd6~Cxqv4zvqbv=kpoUG+7Fl8sDqcBh=a+2>Wip`Z0ImPUOJj5K1>0aWs+0 zz=49+ed6OZPvDf%(6E&aMDEYEJt`D))I7aMxYsCww9kh1;c2yEX_n&W&J<$`@_Tn{ zBU1HBMMaA=F{h>_sK@ktI_q{3t)*EOON|x5x)xloD+m_d0(Iv=>P57^%pin!mr2ji zx~|yDhE^KVoN+my@$`JbX`V2jPUh7Vh*JR{SBesd%m^C^M%)Qw89=rgYTz9yTzxbJ z18n^}bPt+WHO@=38Yy*fl&Z~7y;xV=5&s=xdQ8)Nl!onexJovmTu?;rS=GtdP|U+= zQ8jK>{LK+XpQlNiF=lf@K*De>rD9!IWjQ1Qr39W6PV*TlWz-5_E4ZyU+-@tb?>Ahp zE0#sijy=inP%2-Fb$l<8uFC<6B=Uaryts~MG-P44rc|;do(-WSBX>rZbUVm_%N?EX zN5EtFyRXIGn5Ny|$H9YawHMv*OWW^kdYhA8uScSNi%K~5*!F?55*`P!Wi1$hY|Q~4 zBHH8zdPpMbpYtI~dNh1L@6e#n)qU;9P9A=jY4pb!q#jiRFhth$`VMYCOO^N6yH0R> zbZme)f>{5|k3hv(c>b;CwtxB{Pd&f?Q~vVh1z%pC@b6z=4K>=985|%Vj)P(S;pf{V zvMKhz41>6kcn}2%Fy+R*{r*uDM4EA;e%pHY_y=ep?;+y=3 z=c8##!~lrYOW6#7XmKjsAPe9zxNE*SjvkD|Ux_ULzQuEmU(F14_uS}EkdTrBBrT}- zG%T`?%bFwY3{`4`8E625vU@<4Hd1K;#f%c|;o;tI1t37qy(o{(i=%xHU>&=)w*!eP zt*sWMtzpUlctXyCZ8N&U=7OYqSfWR-9l_D_A&tRCD5F+}?U`DwJ93{-_#XAhNv&wL zsK*oPm7JK7vPO8O%s3e@zd%zaH3HDjL3sSd>TgUTr<}oJ&r~a>$(ydtIGv`Qx(PYp z9$FD-%9eW2yj!+EwTRA1&KXZnPk4TQ0>Qk#Yr$<%`r6mmcf7v6OPW&Kb@; z7RbTRxdBC=L@D;fqXJK(cPFMiA0y(Y!MeCC!|1^*Tc#NeJRdT$pv+-Y&K*SAyl9g_FI26{uxBh%(QNOq2Ob&04^M%J<3lt-fR6wn>L&=J zqDoIy0)Pa8ur?wjYFblA6mlV@q@M7BKGf@fhzWU`Y~3{COvnHU%1!&P^!l!XQpe)j zZt@Z|<6^7T?%54`j>uuGkL?MuACst4EN5fYtK9+VQI-4o>c49uq0fGF+^x?6h#ddM zg8}Z%74KeWnuVZgkFGn_iH2^_Ydkt+zo%gD?hm8v>oU-zAD-Q@OA%%Q8*OjY`u&K# za%XOI`FwQp{AUi?-#7Hk4pGNpPm()>zHwhZkK49s51+xwO*Ew5`gQpp7!DUR0Mcs| ze{jbC(9vyZzk|M@d72V_`P&!#??3;ExAz4=2C;^+k9+yJxZX9CWFJlV>?X!y%K!Q> z`Q%ZM@!tH8qp>t1O@=8}TU?mod_SwbTq# z@&cp0v-F=@JG+g8O);<0XCHmMFM1%j{dI{MNGZUEgFQ_V+h5BSedMjSh6;eln^Re@ z74Z}UGuB`Ro_Oj}R`9}B$B~puRfW1lGdz!guz3{wt=Q)q{A&;t0G?vSaF!gc?VzgH z%$cwiZI(6W7(1Bph{iZm?$l0}$|$7y*R5$G_|Z#Miy4t3#!96Hw>c#+C1lp_!Sg&> zw`Q&9V2ijENc3rNC8YJ?%1pIx1>0uL+uV>C6lKmCr)fe?Ih32Yx77UL?3BRb=0fMw zhefAI3$`$y6wCmE%^J)p8_|OI>w@cT#r3AFSL;^P6K9CQ|?+L zWY#&)md?|xks?uV45fRsfHvi{&(aN$C?GV?V%Z8x-shM1YnW?>e2=FvkZtD!(0$s0 z>u7lSJol*C*k@%tdnu=Qs~hTi*886BXMq4>Oj&dBr%5A1nNqN@B<9Y@0+m_K>V{NS zcCLRBK~4$NJoln=D?s-><@wk8RnbsGW#uX1>G^^@WwZ*szP=+jK`srg0#lyVFoHlE z)n)B-!iIL<~R&x1|(72#K|}E@h3F5Hq%|VOa{Uw*~Ll6}MZ# zx@z=C&ygF$9bgnu9acrKiPokuc|C)`J<@D7_@>g#Tdy@;u!tNpv_`7k0|egzqt5?G z&y8#SqvMfrJ=!Yz{b1L``+Auzn@l4TlfymxL4WRq7(3^yIXW5)EILco*r7bC`up$H zJksbZGh(0J&0!npj+>=-jUvwGtvgcob2EO|XOA7Wd)>}ye6Rz*XB-_5V#=8?MmP>m z<2zfk+c6Ngvuc13w^=x->wB8nc%86MK|OsQy?Lht5Qt|q{U>hfA0NB*80@#vY3}+Z zFSje6pD+0P-@oAf{athLC50gQ){r(RbC-4>+*IfM{)+$z_J@B=!y;#$k|C;dSa879 zjnxmxa6tWUAI_TiU;TP)p0Y0R)!^p+CdB~R22YRfdwcKDf91#K0sxxTS(uUFbf4%4 zL;ni3kg?xTkFR+j&CzGSSJd!>2q@Fa?phLx!F(wI3=9CnytxMwU~Xo#nWsG)v(J#z z%OK>G2L`5b1&yZJ8+6&%Xh=6{KqIT+(3FE_`$dg)^O$qOG*6&X)DviFV>vNurZCXP zgSypXX%>1f`-YhWLhdPZq>;RVNK5x;ttMLyBwF++C2g!YCFSBjpJ$v-XY2XE;Z0kS zvDO8Ip2|XKs9vYBZc2N(uBc_QXwlHzi77x!+!v{bOtm;cS<6fpm;&ru!{68}qNS%` z-4>Lp&GeSrie*u1-P^KZSqs*!VyglK4Ku~%1O^Ojf5vGf1En54%bEhlq^WGo-7QgD z*N7uim3ytrJYlL0jkVyqrv~WW)6U+y7S0IeYwt!HlLOaeX*c6J+Mm692ZR9#@$R_8 zXV{-LwXCzGP1!qAw=TZDP}%F7EaDLWjmT0vG~G(~u*5l-QFPRe)Ru0yEifk9E2Q4^ID+RlwR5*Z4RCvMLNdEzq5UY z{d|01%pdMw^UWc{(F!qjIL{UtI%W?YKc9^Wv{^5kbzMhF7o2Ww8)Mq_%je< zudum)aN@pmGW@^*p6BYUOE7jCO)(O}HwEWj3m(V4S3-YnUO`|m+J-Bt>qQR}lSQ{X zAW*>L9@g$X_FVh~L1yK%(7T|BaB!0D8H?L7HZ1mT+eDSL#QGM=8Y&+sB99 z!3U8kIP~??=^#v#_HOZ%m?Oimvc99Bd)SU%~Dewd*Pn$V* zq8{hx_c&jF=75$$?9q>7#9rSgvFAr! z%pW;Q2cWSt;)k!#9IVIa$9+2MpJ#o$oPu~7>is&6d(8ohKWnKSe7M0LuebgEC_?(^ znVjH5#$E7Y z*Nx90HsRt0X6C_9hdv(HR-OEXvfQAIzO{&kdVqQ3_Q$E#8=trV$XgC^8khbYd%dZK^;$ zZ6GDBn;=to>$ahk4W%#&m6#8uJv2&FW6+_UrE9@j)|85306=r$3G<{WG^fls&l#7~ zgwshqlErENns;cHF!lg_PBONhR;#8zy=Pp`b96ms&awAvtw@=`(ky~vN-#EWiyDcp zbgI=Ju`!KlBmn5$k*~KKR!cwGil&1V10Pxdt{|jgG?edwuh$279|tCFkeIUq#uNs- zuHPNdYyO#f?P`q^Lm@Z+o8N^qNFhEm%uXx%(Ov@%A`D0jhMPyjdQ{>5thZ)H1`m-% zGp>DHo-QEL+Ku@%BQrtM#C)ziearjpw7c%mqnGrNa_j~nhvA}K*%z&SDOCxhTMIz6 zdv{ptYT$_|m=g1RI%iXphIwW%D_at$gmtT^#SA2=x+hrobtw{OqBeC@WSS7oI+=_u zNtCRi4f1@H{fD_9YjqS*AB_dryz1BJ1y;ueX|E|LE}!NZIeWAQF^9!_1 zK0e!4Q8pzNX%^-3dx_LGX4Iy8q*Or>Z6xSz+L=Wu+?yyE2~{Ss|6Sj*HJjvW?;*KQ zJB#O`9r!q=8MSXM#CMG2p6V$$*x4LppyLr(B57b@BoQQwh9~Rqqz+Z=HjO~nvO@dy zY~JM3LfUcm4e>@a-)tKP=S-db%A*&Dr-_=yk%s$2z&t+FgNVT0YaYGs4;s2(2+f)9 z4xx{`h?vcfh}BmaL)!>GLN(mURs$u6Ylhw!<@BHOw14dQKL7v#|Nq>*>3SqdZY_9V zi+eZPaFDLo z4cFHfeERea{`sGOz>h!vf~_>uQ_nYkhmK(79~=7<;1PaEPh9BkJQaO;UgN3*Hu#L10B|z(Y`(AIel7x zuN?e5so z1fVL4#nu&{zXZ!)fJLV-+GKUZ}n$2=HZ%#(2$4!V;Bn&hM{i{q|c7&efxE8q9A?ZzsK8r&k2EE zpKt1o*z*WrZR&>5{kyp#_VzJCqk300Sfw!%JSRdoQxZkpD>Kjt2!d>UPE9oPs=D`^ z@}t8!^W$Chf4$iPb*E=WN&F*0vt0YV99aP{p(nc;nCfI`p(2wT4xu~1k+jxE;D!)b zBO%dq69@!0#Sj?HDuRVj8Ri6pE{W+IL5zYJRrw_XNKvaN$2g1_vPG4Gx-P;Mbr*mH z=hFo#CZrIthlof4r4-$e`;1naNMq^|s!~iz3WUl6w1yA_c}cjOGfs=rrPd|maz1MT zKx?zg&180U;n%`yQ47}jxFkY|s(7V|+k}^mfnQ$H8u0vd!fA~v6U^$M#Ht%UrkItk z6JC!6U`r|o6bXO+biw6(!MZL0sH3!c}I0uL-ioN5qEJ#Z-$6U~g)*I=HNi@*v z;1&)5!2*#DL;;t}N%OCOZaArdt`*d;S9O*fYaS7E1u?L14O)cM^58s&sZ&Sdpjq~3 zxY_4~9j4HHCkIAk-U|gYr80ml%BAO~p!Ijwb+!5zD*an()%>S2Id5k4`+O1+q`Zv% zfaI`FGPx10PYE{$?TdX~B6Xk}E84y9MX%pqemLN_QF?6`gIy%Ti}8_qIXWg9I&d;0 z-2e~_0Pvb1Ap+aJ_k1Z=N$UnbB{ghC?B5aF-mq;OuKR{kD=-G|%C$GiV-t&W%q0{cQva6X4Pv zToFW4fq*3`7?rcqFWrMM0$vA;dMycImC3gEX(r9Awpr*7jvLGoV=Z!dW`S1;H|Bt& z_3n6xgB!<#>>Wc0S|IVR{bpcb`kd%`8T#5Gxc%rY;5`lT{<8y2BszmE7TbN+uC!*t zB)YiWy#{>Tdg0b)GTNo+f+!{9KLm<*cul_{0349QFDID?8WZ&I z9wd^H3(_M9)88HqqS?Wky-&JXkO!3Z;I{8LA#R+@VG`Uf^wTK&gKg+xKS!Y}DiYE` z3BwoBCR#;YzjRX;z|=uLE;~54p;L(ZD~ja>vTx$R>`wWHOBC_e;Zzp@pcKG?AQTM$ z(8*6h6;fJjkje-_3#x^7jIO92I`t%6$59wjsXuD#+r}tJNf1~$wyJ0q;A@9x9EnLj z`l8CLMqu4`?39pl#P)BPa$-Hd9Ba5kVTGGzN*l%z;(W>wUcs6AIPcfC`Nl?# zY1IlNf{{|h>7*63HS$57XUrOuf%!ggGnoO6Ak5{#;fe5$ND9bMz7!K;ia4*QVJvuJ zq#K*IIp{vf2tkdphlg$0+K;IlsXYoHev%oS)}(AGEDIZ`AS2pf-;0z^6BCwhY>qWz z^vkiIMqjz}ak*T&^YKX6|L(QKX*c5@!JK8Co7|%+0MwA^9C3{>xWus1@vOL(q4&O2 z)OlExrYL1UlApUW|GfI-O4YB49mbFrRU~?#MZ#l!lkqHh{FY`Tbp5X(BQ(oO5^a7~5Zb3mQ4@b}Ufg2!NXG5v2zT)XPC`F~S z_pM-G1E`fQsr*RY$n)67yLLFr?gv#@N(n*4)8+Pt~b_Xrn|*Gj(J1w(r2^kk~re<+tS+W$y3m&XnK# z)?PcbjQ{gXK!N?h8wK49aE-rGTZKu-F@i0ev z_dM>Mk1)~0brGK(CqEJxxL!B>_SNa-N0yrfvkpZ+#Mpuq83-`D$fBKzY`hiD$SF6r*&uFca?&z)p+}R zHv*YQu{>@Kje%qx=jb+|^(1hD<=}+7$h$kohvy|Ah6U6ekWv35h6E8q8cT!IWMd@6 zn04-y(vd>nlbSF>t=P?QO?kzV*%Z>k!B`&i-VEgTmOvTj_x*!MG;oPVi;PO^ZFSU0 zxph|99J}2j1RgP_Kpo8o2p_Rad{1*@vKKMq;ehDX+!&(6J~JN51A`qaRiN^H5}RqfEj$sUdSOrh)^0(szypHD^gEUWqgHZ(92W|YXb+1 z!cJh;9pWpZkzbRkcLVYkJe`$`zSPbiIc^O#0H7OtiO4}L7tK%#TV*ZmDI(Bd?zpGc zHS(G)+2~h(j?U={fUDBp_ENBwYG9r8dMZ5m6eBXqeIHfq>BugdE<7=;sA}hV>$85X6LNq4!3hx@Hqo&OzaIs0 z5kqL3*MtZGS;+DS%u&$> z>S%yU9r|Kp38p~bfL*FJB{N2rtcIJRoy#)nb*S8E1CgM$fM#@`^H~$ipPo*5x}34* zge65RS(Cb@0kss{;2Xevm;B$`ky$xt{qJJ_mm4=q`Fhe3d11Wwhf#Z z>uI&k{E@RbzNeh9tP9TP6E5cqvN^C?Xa-N(zt0P zYu|U2z4XF(O2w+Za=J(Pl&YXcPC^90y?|a5RotU>HN-F60bjF`Aue!0p14f?oDgD{ zan&mGjK&4RY|dx0`K-Agn!MaVRU>Q7XbmDj45b@vDJQGs9q)}BIs!`m{n08n9KFFD z(CaUVk!-zTZO2xu9nsNNG2>&~Hf-CDQmPq;_YtnUDA_yX*+mEXe2{4~q2oUD5EKi5 zQdWi*tLJ%?QgOX%_40jBNK|$Hn(^+@#PQtJ=SdUYbKsQDAht$Fl*)MBuK4Zsioy+* z1tBCP^4#;OO^li*(jZMxg>mcqbb1{CAwi@xeg0TS=hy&;Mt<|SZMP4Bwhw$;kL*Nq zh?7Me#MuLAN~LsqU;s`h6&_hn8K-pxOM};}(T6e0WNX-K!QLwNS|FnPi!f<1-0A3% zGG-79D2}3hfz%hoaCGy`Sef+6wHW*503tsFK-aQeJ8$m%;=FK zoJYsy;b#cl;p+GDe(_C?Vd@x)XfeD}vX13A$!yLD(_5q52kGs*-X0xokGjxn!r{!l zmx+F$R3exmFTplpa}=0?YbhwTYW3F`_1%Td0j`tbLI5)=-4;A@RU$w{9o1^+W6uE* z!LlrX&6&g8cJ(;-8N7RIHxnt=MzMfNAHHA zMMnz1ekm-CACw3y+T5J;=sYsGMVhaoP|`sPq}y|$}! z@hIg5h-f(6L<_PNbJPd}X=1E75lTDK#Mx{#FkwOEf&9O9I1jj)fgYn4k-VK#N|6F` z&N!dXczSxm`Fujkp>u9$^t`nlM3&raxg5b28=#Sd7^6lrYPI?&0YiHVsI96pka$Go zW)3rYp*S}%TS!V@zYyXWYqlJxSixr&*s;H9KzE_C#Ba?~?C@of_#esqf zB-E;odP=5Z7)?!*kZA#+ZLi%>dn8NO{D2U}y5~sEoWTWue+3{P8{7klKUx=a1Z8UQ z`@WdLgK0lJ{QGd9dz(fGMzt2WC`t@LmFrs7eVj%fZivA^hT-JQAjaIk&48R%he9S9 z3*-P;-`HC80lV@xlJXJC?qU6Xr%!s_C4o`b7%AtZN)yc3uCMry|M=hd_WN(~{QL=d zxq|m$`1L|wf83!Q4=s0N;O}zyqT$Jx&dvU)Bj8bx3HScfpAYzRym`N#8}dg9NgD4W zqM>vcht_E~=4|B{km&fiPD2WHFvE084JtP%$%Gs8~^! zQVYb+2{Cr#fTnwfsHiLqEuN7FvFj+B%P7sAPcsa*g47~PQe!hmmEF~XlLbp8jS86T zuMl@9#5d{?BA6A~1km##LKt$Kdj8Q+hyhqn3Fq^Qr>7Nb&brwUA(2vdgezK68PBe% z-;8>gB8FD~pa`|bM!QpETZfAA(>qKrn|g_e2T5J!h@z-n>qtA=7-3$(~g!^rFDaC zNX2;e^D{6s!scjto2UeUtif2U>r` zH|!Xrs+mM%sC5A_RDZO6 z%}Fvz;cY7@rS)QWUS}nR0E!7sfW201wV~R3RtLaX018Lv132t;>by00{iyyN6T~kB zQ2jDh{C@A7oDz{c!|wMSAx^Y6Zdmh*r}Go?x&TDjHm$KEToK6fXKKT;B%If!gE5Qk zDYN70Yi(%_rLlf5&7y~zsIPmeJ0HWDZLI5D%Grc3a<6=D38Hrh{zs1<0r-mZk0|`_ zvzz*LFrt-ysGmJ3sb6|N#X>xvv3+O`&QS%QOLO#-&2b(;|80!vANd%JlC)%{OVsOz zmzNj(_~YO3!~gLYeDlpSo}WLX{Pgmnh{uO-%|k0%xo`YC7R5jR=mx>uhwnIBx0@k= zu84Jrg6PTr5bro2z9^Da^zI9DfNIFwqU<2ntrOC!QZLdMq9Dijrmu2fJ?aJv;odnR zhT7^gRBkx+p=kuk;U;niNMI2a>0hm*K-~AdCKaJYv`A=hpWn@5H<0iSUJFknl>|DA~O=u9U%on^`#YowFE^F zYSVuOqhoZCVHyfrcSVi1yl8}?%WqDJeU9J`CxQFeW0Q0x0dkCpF=Ndc=kp26l5jeC zGO?S0?eGllaEV<{D@-@by)YFvfkg(6t6*hTbI*nGzrLxz4uizafL6jN@- zyFS;=??F9*yR~Nb8xcSvb%d5>!D1A%j>K{^m9eqLMG%rX1CClcjh3LuosO^Lrss%# zkcgPl6j7N9(2%(BnDoSI`D6?mC`i@pIXCMPWx z0TUrwq(;Dq@QTeE@rywzfVCEI5X9iL1y69k?GJAe{pL9|N(ZdHD5Uc0qdJJLK4$&R zLA==ZU0+8ctJ1!7l{|NXOD%^e1%yBytQbWJ_fmSYwoA#=oQu@C_2!U#VIK-aR%F!; zr}29U8j1FDb&sfAV*LVZTbd#_WpB8?Zb}zp6$T-TR{40XM(3-g;Pv%YE1wt5bJ3`W zIu(t9eK(qrMYwCz3hAJxib8X~!hCe4LyKM*m^Esr_X^e~BJ5 z>;(atN*9?LA9d$NEUKN5BjNe^8K=_;1Yj=(;09@`n5dGwgq#8{=M$cvFIZ0*YjO}q zL&pxLwW^ZGd#fniu$791#ohQAE@tbcv!>cPM_-M5;Jr!PNKkNncG4|<}0kFNdwaz8$L+|sk= zS}!RjrATahLD@HyZNo1={e&e&eEaDcFQ2d2Y6Eo%zd!4#<^l8N!S$bgbhLtoaXWv1 zbUbuIMDA_uc_sgY&*+!?;}<*9GUoTt_cAUiiE`|T?js?!A{%n;i_QF;K4JX`V5n-* zS1by)n7Enn$O&;n3%+27poI%P3Ia2LrVx3lZL}lNR18fgbc|h?v3I73t91;tj+web zlY=!ul)j;~2cT9#i9iWJsR6YCCjl0a<)KXZAiUeZBbY0;?TVa(B^eU5DmX)$8bN^I zbUNetX~pyN8JEimOEv@5-d&m^*85(paGz6}{Qdm?0yInF?}m#f3tHP+G$lYo>n_X4 zqGFc1XRB;;W=5rGwuonM=sj61XC^PId?eHjI-QH?2wY{LH9=*sh+ZuhK{?J^F`AH5 z!nzvGAX!3kR}z_qzC{?+bg(f}8`vp0)m26M0KrT@@@vEt4b=}*NL@qj+O`ar`t>M< zfA+ly$Qb&HdV8)GVQ}K-=@Wt)!FfrD(Nu6Ik5AQ~N>`=|0xTjucZ)($dF5#axZ^!I zyVh>3NbC|8czuxD>g}pv1xa1Oh(^8Uc}+6Hgl531+=h=wtr z_5NYpOGb}$NDy-*Z1E$EJOOh);08h@uHV?;71#24Fs7g&Gm&nipX6hXB%i4WfO z=(PAF*#;~yii^%TU7wZ8N36zl41^FP7^D)l+|2gS=j*Di{nlbtgu`Scj%F06;lLe@ z(H4prYnwH0AOV_DCm3ol)I`V}&?2EitMfJlG^%J2L^B|hT~}(-6yO$V$BJWqB@LFNkoWX+c%VA6eehtLag3PKmZG2uMMp!l6Kvc zQqz=wq@n*l&M*-6KnO@^XhumQw|>CPxb8b*2&mk!wyYs}L$w02Rvuq6nh3U{^#-k1-%GtIFECA>;LCM3dzKfd-%$9*tYFI&m>XaBwd0;M0>B{fSg< zX^u)Lst60taaZLD0a%xWr^|}Vc|}SA6u2wm0O}y;pkw2D-LP-3sIB$Fi-1>Wk6p=& zT?Nk8g(`aeYd>}yEA2A>zL(Iwaf_m%uY-^DVMz9%>W5ZWx zN)hm0**y%02hmZrW}nOIv{A6Ufo4vECab5I4fSmhC{j)q{R@GxEDKNos!IKVMeLnF z9ZchYMkGZv|A>WvR&_oUhAe`dRDq0xOMM4*T$4uYbPfvGsB!`^m9B`^U{OSkyw<8} zUi(&Yy;f}3rt_p~B5E+%U{6-BY|3L|d#(_?mYLY%>`~-{Xa$VHXC60HJ`rGEl3EU@%a&itAReT{Wq`84z$foe*M1 z6KGVKwSG~lnj~EeSP@ccA=d^F8`TcbXw0CnjtHUdafHw#-LAbj;}5fA;;K&W)TxZB zM%Q%hoAaXR-Jn19{}Z%sib?v5t~-$kF#;(NF6R@LWyL8cEg(n)ZUwb$Xk|wLBSlU6 z4nY&mFQ*l!yeMr=7;Lm?Zc6nlm9bmxp4u3D73>XYtfDPs#bI=gs?pm@(I}_Uw^P*d ztN?_qeWJT#Fd2HE(;kKMG1|=L(s|AZwr(oeY55iI#N5~lUP}Vgp976??A!FgkuV#y zyD5`>*7G^02#SiW2*h%0nt^h@<_8?}Qvfo`;IMzH?K!&t`WTu%BB75M-$+zbY5Rj3erNVBo`1kT2ZE7kIto(fRqfx9<&Zs84r_!tERfpyzm9 zkg+I-j%cYD;5ilONNxWG#86BCEMRD~BEIpyz0LY!F|JDhTA!Fs12|4H9i3{0dPIxJ zU`Xb%_+b9nR1C3S-+vwtN;du_dc;BrZd^}3-nH73*uuON%stjmgI$S_gVja<25Q@(`Lq#Pyn>j!vYwo)0T3HH)Z z_K^<(&_R{S7oHd*D4{RLYA1|)X#;44s4=4zm3CH&N(6!zQ`Cf7mosLr-9WltE4EUx z@0!5sNOohd3!od|vZ$iV$qaCa6T9pYKa>n_xc8O-6 zGY86$Q%#yu`x-aIpwaJTSwPDILV)!5c7xvLU>Eu@hiU3(?SrMyA))}P4n|l6lY%Z??TYp15wu|OP=K>mY5FXr`J(j z()(!Y6E%7w2!cUI@;`fVLha4j0b#CVJr%~am;%_U5vX;ISaL*&Mwc_WU2j8fqrFGI z&iK;%sdL+!*q8=HB19?#>Kug_(5lfPc?2zV-eNMo^>-7%i${y`;WxGiNZ-Pzy?i=b z9e2IA@1Zed1k&ipwHCZ=1+QC$WXmfvr=gaLvO5)0jr+VLbv_tY8GR&=dj;xlz!AE0 zArQc2e%lE-(1c}OUAOUmSaIEqPw6&& z-r6eRZr`?^bnwvsAn5zU$h8P}(UWnyQ?JGv?>5CNxDzs6i#>66ezwV&Vsu`3A8-~HtgTv&0&6RR6(SlO;l5x`*K;St>0P(SSeWmR z1NE&t#M6_TL8nNHiU1N|5t6s2-COPERFwHV+s&rjjm7{q^aa}Am0_W30SJyT6IugW zC2(tqfvm=WQ^AG?lKEnV0hNZL-vuHS5Kj_xG{&3{(sor}PzDNS zT>MfB_F6iMr|2u)Vg{&3$Gdi~6gQ2zz0?yzT%2F=*1a z*M$%fTwiyTR@EsgMXSBL?4a?GbB>z)m~9g@IZt6fmr<%N+|?*ZrPeM9t8Jr+m9076 zt2JK4d}~q6RU;F8wIg- z$!I;1be*&DAA*Z|Iyf`ltsCb807ok8?YSLmf>>1kjW)W)fZ!BP@sOVF4}?+~zr9@X z^KUPBS~9|A!LmljhBU%q6jus_99K|O!2sE{e$b%R)t=<74!O?b$@ZPz*MxLBElTbm z{Jq}qaQgI;{!hEs?2vx=ATnpyMPH=plri zYsS;l8Rzq=`E@Y@Z1f*-=R@a5W2Ls1UT`oNb=jRMrTVgg7aje#^(rm8Ud1V|1{i%X zz~%cQgZ&FrbcsAdej8c%9fwE8GD?x;*3g`3cXL3*_@Fw(7{@kq`aYn_SbZ;=RX8dI@j>N|2)sF z4{RBm)ly3fZcU>EJ%{a+1G~MI@+pB53>wuTWbDI;wq`G78<=Nc6?-$=i zM4cdePF>KT(u-;lf9N{1@iZoyMN?pom1vlnNa#k{NfOj*`8q=xHqguS?=o%34|ZPgT1%(X0E8IR;BQkvAj>1_ksVLeHkt^I^U>d7jVp4>Mq}&T2_3^# zm5st45K1swRRkeI#OM>GPUs_fc2COo2uL78im(V^1Q`lgcC9y2O{UV7{#Kd}|Za_CU4JukxwLkCN!+jq8G0~ex z1}EewyZh+4NAHiFO(yj1k+(pq5V$M}=hKSwd9iV>_tQM4ic+A`6#ub2^saOVJ{6;l)B6|PPEQX!%%S2vQQL!A5tT;M;~nOcPGUG7Lm~g z^LmEq_wIbqBxn6c4xf*mYKP-xzvO%Xwr@}4Mu6@d&FlA}FKsV66!M%v6Ct|GAvri7 z)B5dpXl*0%@%KF3@jN&mw_`aF0x6|G#Mq1rr-0xnA^n&8JxkPOXkHA1kPp`}KOBqjcfh5utnSW-4Q*3cK%+H~_` z5!F#Q1hNWuM5~!GU5&y~iiY9$(s11iiqVwxTqD~EAVCgDPEVW-K^5QBs(}PSB>-Th zfmr^7%Tred8mSr)SKuWIat=n-sD=as#GXS^I&JD;(D+7Ww6kCowAuuF+0EEB{<}*> zT78GwYl_&$U}6gZcV|N#*kSmy2*_%;q!?8(%*TOqp6yxs*e33X;JhKv$DC)To4JBO zE{Ncc=RE!$6p1Fg3xU07HQJOLq3TF*(<7;^zpz_#sOnl@+CYxi}Y zloA3O>pyjcAE!`Eb5u)DAB|b4W4dfR;!D7`5%!&WF)onOJwPho=`ysMo2B&N7z0vH zCS9vi+9AcB+{q%iUUw^YXH-QHT4trS84@2tK+%_nqA<;qcz2I|c*6nTcz zfvz0rR|!N|a>DtPaX#z!)4KHh6}2OEEpWtJ9rhtw;Eo$uRQ}kENSBFjlR@OjPZyJz z9AWJ;7shCo2S9Mj?c7hzsWCF>M7q$SM**Co8jSeuJ4WdLLZA1{0`9e@l-jdxjNg9S z@auWQx-3e^4O%I^Y&&XGfr=D0534mnDVh|$?-ko#P>UL5V6pW`L5|A4aM@vf>&v>= zk@4q&_rtVPjrN$^aNgJN9k(ypAdmKFUlX)m2YFl_0lFWY){OI#ur3ixPBtcO?E+Bt z4cFHzw(Hf@@gh#=1*g-ZkzvaNYurpBOe_b>auP)MgwiwuQX8YN8{I}H^oX{2E|fl_ zM)K?)W^npDb-H$U*u`ozIR)%UAM)|5V=)}Y*=_sz?>tI!f82cLrf=pwEZjIBbA5I7n!>=#`pM-G*%8O}C_0u#D--8vulHuaHiHNVe%{88uI zi}&8G@6*470kXl=Cq2sBDg$D~F#CLJ%t$FC=L)V>lSZv$K7alV%W}eZ-+zby{U1M~ z)T<37^J+iv(DMsF?l2mL?i&qsJ0QMj#DCzNFFrTMCKJEH9lv#!dL+aL;1`{RM_Akk zPPqNu^vEA-57LW_#~VhrbICPoF`N+h>^0%8XhKqq@Kwr6S03)~2x7m*JN^2Wfu>H3E?)Tss9p+h}miwKj);WEcb9sGe*rkdX`v0-_pJ90{$e0XiZV zy3y5UisFz4Gg z8PS2VYvf}q4X@W7m5u5l)*kSJVg!$7c=5Fyqb30c0Fq18;i6ts6 zDx_42*Q*wTYoaK&8vf`8WwFrz*tC2T0$`|{K)K4L38d~F2^?W_Mc%!dd}~U}amSSi zme3C4C3D}nX2&+Cq_nrF`2oJj4loAhfcVG^b`70GW&>iT`po$ssTc23NnDS(x`FQ- zmd_JUey^>8n!qa>x8`!J0h;4OsKse3p8qoBt34d93Ft~;O3`9Dj&=pB{tQK4)faI! zDx1T1GcerEVS*;p#}u$`2`?|NxL#ka61qkTLdaN_tYQoMt}@H^zFtUlc0edab5Ma= zDz4XpebYRTp{lm%HImb`EFswqqcBG`(TfW<9Td8Lvx74N2m5s+jmi>ZYgcX%s0hIM zyx@~55UnRgqsRKBbRvRQ;wGyj)2q4@b;X!gYezw~0B5x|SLdeLYd2eqJ203O`-~EL zSbzy$G}A#Cma$%p!l$#L22FrgOR8UOHrpG9;i0uVA`mhl6}1RHzZCrZ%PVq9U;%Oh z>b|4Zihzh56S!FcLyvlFJFeFa+qEc|W8*wUEx3sWq()QT%M5r;bB=%j6>u1Us?F0w zBhCQSIC@)64p+az8~f~i!~?26XW&~58kY{RwPwkx)Vry^+K+$AS1?i!;$JNamG zTh1+U!`oz9$C=^bK2PKxj_v#H*C|>dJm{F)A8zPRqAY}-V`84727eP&_2r&SuMZ`oV5}{&@O2+^a6PQg(71V*hZj4%jb*Wa} zmNf~qR+Z)T=*M0($-7xnxIj$l#SLugbF{l+A`sTdm!n`X!#)6(2M~<2jza`RphnJK z3SM4yk6V^SlkeDy5B6LV|BOG6E#79-Hx|5JH|(WCq*)GuMKdf1ruF1E8EEH(G*8=Q z-&`Q*N#PU%*3*iVvQn|!#=gJ}SKVZ5Z3uD9EAO`$R7FPV60qvy-18y4 z;;kloN4U5^u>xsoJDWpc3H4%h35XhGnuK2!Iqv*!qX={wRz%JV05kSeM6yzPVoomH z(3p_O`8aIglkp1ZhLo424#<)K%U5DiBbx}rFd$Qcs&zOX=JYzn0nBKd^>GtyTfyt= zjg(qBzEDOlvnk+~4naF1qy6AuHzoxu?2{6JTQ3UFbB^LfSd z^94^&C#)L|TpnF=NpE%aC__N`c9PIDr3A|9@JQ06iNXc$t* z^N-K+1>kG+usUl5grIZDqqE)?zRD>Oz~CshmIjDKlqz_6-SKmJwR$kXX^D_pbnggS zfxGMltySz+u=0A{u#DmfS@Z5Oh&f@I@2*zAt^j|0PfoH z#`(C5GELy-gIlTtf#LYgDy8$aT%bgAj7E{xTW96bb9(f>wZXyw=rGcJp3e6@R(03U z&J<5~JpZd~g4unx*Lc3FD^#%}s7y}oWZpD$RJ6lL+D{P4qH@au1%vA0*{aRC@U?;m_wLxkTOeE;>wM8RO`3Pm3|9z{VQGGK!LM#pjvUIKag!??)!GVp4(OXbe~vWUJ3WDI%NZel%xGjXOH29Y`nl6i$H2aq0-+$sYEJ=8KQLVv?tCFj`WN=btr6d7F+&$;)6s?{Ve*|f`tEi249RPsN zm(yv5*K^sdGOj?^mk=UyO2{djjH5?mhD>&Ag6qD4C|J^YQH8IfZLMmA)Qha8EQ;91 zj3ooY8}@w%g3C$T=1CDL0xfAfsc6AO<#>R8wo)}|JO*4YCw%w)Cw%kGGuBhq_P$LX z6&s80e2^NJJ&IyRO05;wz2LfSxb9AoX<`Qp3w|0_&Of~zzl{NQL!OXfROGJ*S+E8wP0O00j~9((D&KimfjpPT6=C2Y22NaFYWn$+&Uk3pAmO| zF>J2+zOXSv$0X|cn4OPXbuJ2Ho%Z+a(P=Z;o9Am74K++)+3kIto#E6gXO)b3J4Y_Z zyZu)jw~kqNJi-H|R2$=gOr6m3KZK=&X|wYoK92yT>vhAnZMa-c$XO*a*LB6`&o9`@ zj_~q=Uw-`+|N5_g#g9MziqEfmua5bl7x}{9`}BP$3UW*1`aQ?jf65mlHBtkKDS_M? zq0_UyahD!R=`7Fp?mAHSPC5C?r@sTb4sCX0%uRRp#dSW-UO<&rx`T+$vsESr0i?)D zZI;sk=jq>Eiv&vSk?x@Qm&mP3&9v}O@6yrN5R_`=%2tP=L*2I5`U<8iL1t(KXdMZl zCuze}gNEbD{rv9C*!%ZB5ll@wjLR@(l(GmCht91QUtq*?4T2bl-19E9(HdK=3kh9? zEC&LIs4~9HAXCO}tP~bS$^_Nq6B|QoeAs5B2Cnj{OHc4-s|`ZkP!Z#5dcRbw!x72k zPctC|*2q#c=L42E6bK6}h8e zMHxNXA*@aX2?T11lZfU^gxKYZQ%;7cTOqg{k&_AmM6WSHWU{T6oSh;Bp|4Zbc-7jU zhV-;%HDKJ4h9w*-4_+knF=;a$4jpQrDRoTiSL`ro=u&SXrc(i;hzmV z?RGbwUE(=}fYa&Rjpg=wRVNn=qGrkdaZEf%0^sPriVLt*lwxSR%d{HJ1l+KR0N6nY zSQe8GjSDzaSImkLkf?JnCNip)(H5K5Wavfk{!fPO)S`uN~Es<3;C#Iw;x!9q*vy zKIG9wF=NytkJHko@9FC)`4Z*E*aO4J40_eXXlQNJ3~-R4HpX=;`0aHA5pZ6jJ|`E{ zy%_yWw92^&GFZ$-fdkg4Q6TBHmr{&K$ta0@bc&?E#4vy`fnjb)4@^k^CRQrI`>jiJ zTieZGzwfnV|L)EQq#LiDBM(@zE|4N&%@L>5g3~FRV8Mup5rQea6_mPT+jpfdl2Nu+ zt4l;iA8ZY!HB&fjT0f=BGL%-NDuOG*abxqQlX;w~^)Wpa1OV zI=Dda@ppusMzd<97`Cq7M1Mj`Lus(v9Tv@qWH9OgHRT7S`vl ztZyg&dU}ZRm zF+{i>Rhu#reBVCaS9>_s(r|rw#hMfHvS7PzIG@joCe?!1S50&x0N;G`2|xb)EB5-i z7u))xL47kIZ(o$*hT({UJPK<8!w~&F5t1*eEI$4eJV}*x#M+?pi`^&^Oor+FtaHfi zA$M^)G@)yF!FsKm+uXF#ZF|sUNZMOW-Q=t7@uC!rr|yQgjE@K`dT@vW*^hE_rWfXnSaZC+f-O#gO6ZC{J_a3LuKMF${#+jbCO$VF0oNjkJi> z5|I-j#h}PP1B5n=9~fu8Q?r!c90RE%;BD9yoRq@HR(nKjx7ShTVQ5CAV5pHde1M1M zVcDM# zF(Yw@aOqn6y{IK|-^4gDh%JA|s8S&W%QdKIK{r1~^0_r6)B6vW7a~H)3H|e21&s(H zioQqG@zj{Hm(ml@n<fJhrLU+U)OBW<~4Ofd00q67D^EUzr+g{88Qi@27 zO*iN`ig|K#DQBBOM#*x-)p`6mWvt5yZEt9$p|qmZ!BUaZVg>f}UJ2;yuL#s}uVShR zx>k#8<~KoW1%Oz=x`xwQaXy`U@iblpFQ0!z4Xqbt^!yeQ7&@&8y3(1rgBs21@uZ|` zML7pNKVR_u_ut|9`GTCS<_{CFEB#SN!cpZ%M6}Sa0Iew6|9ai9?G>+^Ce^>b7VNuV z-(5^V)yxWuD(A(}RkRptH2ZgeAmm0f4adRekAS+dH7B2|lV$y8LKu(ONPz7hJpIv} zWSzwtsSQF(GbN?$Zgof86s(985Co{G-8glspi{&G1*EJUBc-Wv*L=UVmW=^fNKyXU z5R;(R*8L8*S{Q;rniLCCv3B)0+-THy5OaF`@46nE{b}leGm<|`nRQwXX|DwPa|j+` zW&~oS6tFHC=S6cq-1!J(OoBk&aci}qw2E!tb?*oPDQAR`v@h9Fc&)GWx)qZ#*YyIV zId~$v_PYeT=26LH!wip0odQ7&T9f3V^U=G0G92KM?q<7zMudfB6cMz0gbRQ*gz&-Z|Q@J!&`$r0)QBGKQyOW5J8@w zXc0}Puj%~K{lJU9c8V4a>EoV@X?uxU1M|4LvE6VV$+@Gaxx;i@)O#wgF#R~0iPD(? zoo*-h*AGMrP6{A_P)o&LbU*Yq_7VWLB7i|itu|b*SDeq9zyt!;^@O}!5qG5mTwgc* z_~S1)oi6z9yYF$`E9%QDJTd1>PRM(Qqy63pblX6G=VS1-brBA46zhBIaEI!nPRJPe z^r2>*jq$h7!2>V!>yMsH@ArM{^T9vtaMZ!9tr-9;Eu3Bab?3TM=$ zk%N?^=SJAc;NTmHqt%QYm13|g$&ktgL{allcCAOUZMzj)(-5`vYT>rA#MCzumo7|6 z6PHh?jFbV9io8Unv{no12+8-{51uJ#K8~!|q>eR`3>yH4w1VT5WbY}Ti>R>VZeNfXs!^fS+s@xqKQ&+PM^0!CGK zyT8aVSVaUWC7ezt&B>^mlT?c)E2pS)CL~n}%h{5MZGHucMz3KIX@YtqRgEb{a~4Ak z%7rf#8vwB2bUO95Am`lcYY-8N$(1?+?s*~L(DR5C1#ecIPIH*fmle;?Pgs{UCHU|BO@Dnnf&c_ZR8^e|s;v{ftIp98^$HN^s0k+xLQ!0+7r&mj+$1W!eYSaFv{p zh=8gTB4Qr<4G0_pa@08xZLV?CJgqzP!a=ENza{Z;|M(zMwA&3>m(cDO`Qh?|-lYh! zA<6dH7zpcJVrT<9W*pl}) ztm}egn)vIoU|Y2zUaznC+u!~JfBnDy2fqFGTm1CPuXr&BWQ1S;h!aA+WB#a-{GWCB z0*!z-i|4&{e2o*L9PR%C<&e%`I`>+ zrnxn94yKLLCcax*;!HFJ#!P>bV-?LrrSl_<&&yu&B`< zIa+mNrB#VJCZ#l7p?$!$!6j{H>X*wj>fgwmmaKFtQ*Rq{MU5C{LW~Q}=O+l0p|>jK zyf;g{jR7$-Qfg-K0Ym{gWD_|ci^Qmb5mQ5yfWXiQN>H@govdxsb^?IY>4eWOFS-s9 zp|xv&My*Mp=r6#Cq-doPQUK^0vYs-YpD+0I=^4wqOnE|M?bTwDW0JX{(^^HVO10W| zl}X+Af~_=^1{5Z(P;K(rwJ5cr)n-dg05#|U3tn;9om`J-OeiP`;PnP<46@Th#S9ic z8ZGX!3@_$(;n4pL${qCxfE%KK*H6&DH<)-yYuHM_QW|o|py>0%BhGFYu!4XANE!+6ck<$`_Ru`C&DNvI{G zlpVkR`ZGR%{srs#f^Wb1gwLNpqrGnUfKB?}{up@?bJEe*z4ynR5b57^qC!>2{^%i& zOuhN%!?VKBzxf2V4d_<+X8I)H6braVubcLJ9K{g@K`*A}(G$qy)T_I^pdjx!A#BEn zU`8^$A7czw(?li&1xrw+tLha`*^I!@rA1SS_;fkpa?1Gr zy9>Vg_JZf<6P9HGApy_=fX%|suP;~p_Umu>?ek}B+lKvhwQDAoX|+g3w(938CFH0& z`OBKIEDay6 z(L_~r8rP#S>4>gBQ>QnYLm`0?0tT{Lc912Cd;ODA3WVFZj~SX;yF+VWta-62;MpQg zN=YPAxyJM9gh-~|!!Ws3y|2=pn&#+4Ql~HmoEM`6vPJ1c$KbLoy`~j_?o@g{P-6y4 z(JIbKBV=2xrrpm&suPxP6AV6K0Fgi`AynOvbIv%OR($i#GtQS)8QVMr1Rw&>_aVTF zp&C!WTH<_Vt?6J2I@Of$N~zd3jRv(ws$f^OAYW@{ghb__8^djiVz?t=c}NUfd;CSz zfE;{xfr!`6Y26ti8-^moaL{oxttB##Q=418z6Jw`9S9PbYMwdQr3!*noN_{2vyK7W zZ;igMz`%`^qBeJ21bqMf_t^KV zdge<;*{|4l#AQBY8boBCOBF)d(7bsGr5N0?(iiF(C zDvxoeTy9U0_2+Dj```VR^Wv!InIe){j)3Qi5J15dk>=;uwh5%@aLf@Y04YGZ|0ZWl zMBrjn9ee*Kz{^$9(IsbG))gX})~_@Gqn;3KLt?w8_Du*0FyynVZA?a5bd(|tj4D7( zr(V^rlBoqnV&q5w3R)4{oPbCX%W1_oPZxau-4lNJ{u_Mze8$r$V?7bpRgKaZ7Z4(v z7%Ep9*0|u)<$GMOXS}|=AjgDKROQO4Q<^O9Ri)M8$XShU%@xo@xLZfLk5w{q(+#ra z4oI{Dck}oPQTf#@LxLBEuu2YK5Ml#jR55@S{y*y8wY!lcNftDZh|C1Ys*<{AX7B(1 zcYAjCo@uE_0GaXd{b2490g$YRr0(gPIuwf}kcf<@o4dKW%RJv-YDF-IQqjBzJkD=M zJ`B#YjiPwH?$~q1wr)sELPIbkgVBSMj}gu32Lk|9t0b=dnVb@)FyI?_sODTDQm}2F z2(DPe2{F)1#~cWHTU*T?EOEVA{oc6eF(B01i-(2~k-~zo2xeRs z^OwL_g=bT=2wqGc$>VFgJhZJNrBQ@9$tsH^q$Cvji2~G$b%}U7KjHcLxjRQB1e9D* z%YoJe`%!SavA)s1v&bs%v0Cm27i_bH-&`tl2q35`1R%}#dPbH?ZYU+AwmSONlJR8% zU?O0fQ$Pv?PPJt2ArL&6KObE0gNJBmdbOMjk5>6we$QFr(!%dmoacPFx4@Wl6FT_t zsH{78s^uFpUyRN^Z^L_nV7*NuozmKBMOCA72Rfsen`t>8?j({?Gt8qTE>GKYF7Stk^ocQQ5@0B%9M=nK*;!<1Td}5yJ%5ic|MORnV8-NMV_g4Lm$!$E*6+cM@83uD+q5S4 zOgI{Uq+C`A@k*nyl*Js+lxknkW`17D@DmFDHC? zqAahLjT^Xo*X=-YPrp!32A+Drz+!weS5)UJ$n#!0JfZXW-TmC?9d@}&wHxqv6{-cT zWhe@bGt=@FQ#A865G0uN<+%Tw+zcD18F=`WRgutuAZW)8D^^Af9Hhe6!CQbHqaR%8 z-Z4Yf}qO8@9yoM94LL>vHz}mBi&}-u`Vkax9C>LfiV$e1EK5XM7Cb+na6WHVL7q|A>V07MfjS*((4$C?W4=pn6=6vb zRFs-Ytvd=8uyz&--FM2V=F(6h=53`Cl#jQ|8qbljyLs#*qzbsEl+NDLX(rxnau!X2 zLc>Tbr*?vquE>J!4x|#5I8ui*>ZyL{MBh)&ZN?S_@Fi zFR^V`?Qfo&_T5Xn^GYy@;{+-&`I+HB1g4I~;q_?84k9Dh;=TAf&d|qaUtv)wH5zog}_3 zfxaWlaW)CuA;Thxva+PW}bPb+E`ajp-q-;)XKMR2j6VW@r32 zKDry>I@m0V(C4JDAwREGVUZMpD`Va{q0I@)=*!#w{gKGWUwJw0Y>xha1X6|s{k>ED zdv?1E9EmED3Yc&SV9y!9eSN|C)BlB_1xs6)FuF%%P`T%~fBXYapFiWvFJJKTdf@o& z1;ePC4|1PyKW8-igTIFa|EDi)+BZQ@mi}23@?%jD^uLS6ytyHMdvgBO*Lxtu+XbNK z#C!{c@O90q5=&2(d-GXSQYznMhd3Cf`!)#io3yteMAY-QNBi-uNXU4OQP7J@R29u= z82}!*C%kO!XOK_Xm38NFQP%Y^$I$r$rd6o!>los~L@on^mC?q7dW8 zyKx1h8>vU96fiS_&nsf;X6_TD(Y38qR$5{}3IS^hERy9_dQT^uPHV3WuBLR77B9l~ zjOX))(<$NUl&~Z*0FoF@iGm=kixO=AkRXu6c@;&G0|9ti6VOBYq%njPkRkz;`7Gf! zyVh#nu%U7`4G8D$G>2SX7M4e&D(DChJPi8*16K+%@0M1|2)6>&0-!;xay?TD#(@!S z4vQ|(;KtFAre&^68fUM(6$emlD8;?g0+;4>8j7mXYMg!`BZ)GE(H{T38&s+ajcU@S z1VK$n3m~YAy^mP$#SH#nb96p<{Df)uz^)cWEeCGb1Iv)2>=BGBQw#ZvcJ-bxIYLBe)M-+*8W2;!X-Bg-?pt7eca8|&yyknC)5_+1-u(^loK04% zq}h-)lQdOZSG588D9DMDyVyua@`~$C1W_W-gsNsRdNYPn4}!rBqD2Y}l!g$dm88Ns z)W5u~-gfR!1kf-gy3m10y7SIEXwkl}`_U6ZusMzHOg3_T9e(d~`>j1Chse`0vpF)+5x- z9T9EROX+s&^lUopjr9EdjO}zrEg4_G{?^fVN`c&1OCZddvX1}w$A4oxpYiGGjMJLT z_!u4zuKRDJ-5`8)(1%n}}F$Ktl_HpgfFHO15DG$3bs2>u10u3}MPUVjG;R*U$?u)20z(GAm3* zQFDwrN6-SR#GQbi?p$#`(ZFAqh&8de+G$<*wNbH!03`A0n;Bf9Sl5K}dBbUqNQ)o_ z=hR2zODq!6W&l zRrkUTBbRG4Oskcr#3;HoJcEo1@Y#Hh2ceo30UGIf1n&(YSUnU6A;D|#NLTslkK1bv zOzTK_U=wTyl%m*guh_2{x7&fI^Aony3EQ?9g^pF(pPvO1BP2%Lt~caNo^_{Tse3FL znHXMLk508d9-dR@{oB`}{k{dOr?Q>MyDu3KL9)n|4KL@62t*Xm&u4u8^33Q*3<$yH zIH$D+jG8O6XH;TW6bgvgZwIcI8(v>;c)e!at`*l?#p~t3^?G#BTa{~x^v*Co3vQS@ zg~;1`Z$LDg@9ehObKCHZvabCzMBjHved}H08&O;<6|GYq0|H1(JE~$oGEU3V{Wi^w z)8IJ=ZREJ9@AW`%6(?akgD8^tt}wUPdt&s-TZ`BQc06jRrub$Ki>cRP4k5osuz8dg z&f#d_eTiIO$z)p56#Gs&_I)q7UUwWh(_pi4V@g^wFp~^?tfcvwdR48>)Wk}!>r{&6 z2u|RGlqavA{_$A3GtHmD6&5|e@@KDisXmWW#z4nkMJ@806f^sRj+)x($mr{3>=ECc z$&;7gDbXrwpo2~zSAFw6x6`S;z^K`DUZuXYx0-AJO7JmDC_5!_0FZmTk7MNLmt~$# z`HZ#J$L;3c*#|rGZI4dti4`=V8;)Z~%{yMdzTo-!$+*A3wr<#O8LcYXQE~fv!TM#v z-@bgt_4SJ1UtYN?{ZLm$gH_J`E~-;J4!cQn)AOh!8Ft*`p$Fd~?7bH%{8l@D>@}ft zzug!_&kY!~jQRbK_ZSAPi8r-2Ds<1#k}VqO(LNKZ&^p>CfzN*5_6NlI@C;LH$2%So zxZEGo-aqs6*wG5ZWURSi$z8%qe>^~7ocY=iN;A3zpxS7>m_gjodLQ(&(;Q6AQac8X ziJ#4eyal}O{d5o_mVk}NJ+!ecAxV#Hs4{Wb(p>N!y@2aya2i$Ug`%g_`d_^-__KKK z%+$}KFlrsW@2wP4WtPaOM`**_?rJtoyFoM45Tc~F*PVHgg+MhXw;e~5D<{u9d0hO4*dIZ4>Dos~l zk+h&r=rDuL6lGXdo*{iThrzN$*19m=vJiqL-wTd>bnwyn|BjUG`MM$9$B}K8AZS$) zLhXRl3ko{tS;SSs%meEO*yOnaxQD56{YAjsBr}SAv`7)CK!2S_=!(&#ssS+{4CWB@ zYV4|(XjywvP)Gp)9A(GtsCc;?`11J~r_%|msWHM1_PDG#ovAw2U4=H>QCcfnu_Ac3 z4K$tQJ=;k74zPWeZQS>Yd^9Q-xq6Z3pvB07ekzE|#>8Xe#2=ZmtV}8f&;r+&SQU5Q zCILtP-Y~9*I@(oh#ZhPki%1U+bShcvGxF~8zI5M>pV{1=Y<$GzMr(sg1%g77z^!2g z_mNT;WD^2t5LEFq7AhEaV-ryQ zfQ}Qxv_C2Hfvlh1xOIiH>0Ng=ivXSC+|+A~%%HJPMEJsbJeGOR48M&Yr|WP z#RQq(I2Hy&;J#00;UPa<`UrGDVGFyFEjDl*$ZBRS{{_r$4S&?1_fSm*)+)+ba{F z)G07NU@|89Ph5Q-2s`j6-~NZLpNTA}D$tlH7$Mf__@Q?8L7D!22SU2{eOi3BAwGhR z`DK4abmWf&KAlU<^bLVD8B`MpBittOSgJyDHi=J5bn=bMHr}}jG!l7eNTo+>-XOmF zJaa_DlGA%3u^y@!&oKioohW4u6Rzw=vP{AY0(f<9@peu($76#3w2}BmYCxwV17PvF zb&04^%&2v*3!p8asa3pfov@J+Wu=Pp)dC5HNT1Ds4hd|D;@pDkjNUnA`a8GJbFlyrGiUR#$5g0g| zsXVIVFzP^CZQOX2dIVM`ubNIth}svdOz(A?5)JqiN2E?f2tWYj)*y}4Bpyhfj3}rD zW)Kii--Auk4;M=u?Vn>2FIAXCGJ%LrUS@u?-$Tgwh_323Hhey;JHX#g!)7C<%OI1WYy0n1-;@3Tt{v!8-H2h8yAH9R1uXV2G{l6wSzueJIz zk`QDRCgnP7mRrK2Y}>Y2!OIBM+Vf~So>2#25AwJj@2J)6A$3C=_bOGdQava|+Dp+# zD1z;c4aL9*E2N9n(czqH?s)OYfGX7Gq79fe)t+ZoCVI+SThk*qA8)|#y~n?^Ev$W{ zVMLXsnPJ;lG#6uLcTCC28&!aJiiYYj;iniBzGTGs3v>zMuF@6rpt?$%wqEfP&N#i{9?~n8= zKR4CJqJRGZTjblXc>JqVoV2gid+x-&;XAjL^*t!e(Au~c&>5Wl`$toIP_c&~OoLi} zjmuci{WNMv!S=dl?XSjI@G;VUc>9_4Z3E~1^*4>>_bwZ22N=La1Mh`?z5k~36}Zdg zrP3y8WPN{yPVIlEofcVSZk-_g{_=uzJ>k>S6BSidak*Zx9~tqq;dD9!*l|8JXt^LC z1^aEsvfS|HmoNDA=?TBRd_}(OusWv?odSGss=eDsW~jJi;lJDE)F@{lA-px<`{p=J zn64)9@Hz(}`cBkjzC@&}4sg&LWSTI46okwP*4C=;zsC$Ge+E@G`b2*Py;y-AHHYYy z5D{X6v^vc5@D>OvM!2&zW#aPBJeL`oVTw7w*GG?I^~hWG1KM6}o5t9H0Z@9g>vgjA z^OHO-NHH&m8-6~1wUTref|$HxG~b1Z;hIMkZrGU8Nw454m^|2inNhA}{w_h7*I|*4 z(;9HzRy;qQaM~h1bpWz3Ve-UE&&wL=Rho*6)vA4C>7iSr_K@ZlA*Rei@G%?{%=8%> zr>c9q=sn{)8)77;67jqsZ=du6hytVggt2z_gUQF$DR=M$Vf6XcXH~BT?>P-#k=zWI6byj8 zLn}~UyA57@hvURNq;*MHmlYurXq1xC52CK%Wr>@$QYyDtC1oB{7_zJGZD!kRV=joQ zC0+IhQe1XM(^+tE!Rd6u>2xBECGu=(3iI+-ga*iBbEQ)cVH6o_hdo~~wIx;UPLD>9 z0b0v_{fVGcExock%v;;E_4JmMVy_O*d;vu*^g?U%{m_lU!I8cPAYxGs=75ac76WmU z`WO4w98@B`@LI9j0XY-=u)@3>rZ8ovKI}Ala9`kzMd-XB>U&<)VNkqbFg)&ok7u6y z4I1Z9YtnNTf&j5mf}2xWtt7yKbxGKSeL#N#`-RS+2`D`5v0QG$F_A(*TLep~I9fw! ziYC$vNNQ`WrrzsApgSjYawL&9GWIR$PMn0(l5kpACWS8xBv~O-Fn6q!iv8G8a>MPq z3@4TCPr`E} zrbHm+9R7Ybxx?w+o_PbpKiYeoxXzv2dlfJ45D=T)|K8{&_zx0`SNO?vv3L6G2Gmh8y|2f+bpXqPfnx zdmE?ERe)5C25I!1e_V&;-PU#QaHrt3cb?7WEii4;K@c2G=*UTN05VsG_hiU@wn%sc8;sQ1v~z;KoOOFzpV+UHCf~%;px2M^Ya-` zrxoWlU|k~XH*5=nC}J?xln}Z%$Ek9pHCRC_@y#@JT8M~MQN<|u2T=nbG#0pbNcR|t z8Q-F#I26=8P2HR4(^kgxl`-!WmbBg!v4x3uo;p3uJ^rq$<&lZ`s^*a{rCQ=*>j%3| zhsn(G_rgu{*z`@>@_roXkUF*Fy6?E%XlN*4(V$_Fj=9F~ObiW79(2{04y1SxHgmW4 z+4%5}7^y}Sqp+gY35kS{)Lb>aR>?a>cbc_-bE00iAQngNJ`A^We-ZL5Oq^$;GZw%&&yoh|P>zTB|0^R12V z1R#tQS5bsI)TDxlC1f86X+a2z02e2iJ*z&??t#xduT}lL6vSmgYKo+aRt7-R?&O#a zNKc5ize(%hLu{XmiP7Sx(~5P8ea{Hda&rQ6#Ew1Va=GI5^@>^=_MJ67df`rqCW2uF z7SmL5T-?FL$VaxQs|!ttl5XV&G3CJr(5*H{+>vny7QCly?;HvAV&G$#1HQlh*fsW< zzk-!9?urR~wD0non+eE4+_e1gf0ccs$lym0UlEUE*sJ!vXgAm%`J(0XU)c1*y}rIQl)^642# zLWEaiK5YxWeE9|Yal^~Y3of6YEa8b7*4wrrAH_CA)*Co~VqwC6{Pq>OxaZ*qLqXrU z+&GZ-FmZIA=H8cYJu~z5M;5{xH2)SAz&#~R8Hv#EOe4ixuhIN@cOImN8;vMOV^U-o z;mBTT_8W(Ue})^3cb_fjoj(OA_|Y3*Q-eJ1||b zX2K^9jQE_^X7jWdW570n=}wc%1kCG4gC^bI=MEf#Ktda~1U#Kq`&#hyv|?KcLbfI1 zv@-O+tyFCZ-~mbQwR=nHiS8WcOvNNuiU9FI4!;vvbOe>R6jZVd?pkHn{ofiUCPwZ6(cfjE0595*dZ+#w) ziD`KpT~5~l-Ly}_in$8lb%NStP$mh8$tbI*4X1U%(|N;bUBMIxi>3jlcBZ)P2QJqe zE|)8Adq&A4PbVdtU(qPBmaioiO)EZMlujCgjL^Xa6i}_!H*}6vD@0go>ohG}4R0Kb zzHf?w4+&H|WWm+BXG1^N#_NYIq4SvPpf=4;y!Xz{yQ-?2{0nV z`tW+ET>8HKj#)bACqUn}L3(4z}^0skz z?R>8ff7Ygbc>ROoRSeQ=smwK11VgeNK&Rf0XW?Xeq`uA8p@`TxDV7xR<4`@hSj9$V@guD)4J_is=Od*U91JT4x%;rkYBslfrKJCR%4 z;7OKmChNm6Jgn_E?Ye)X{YxGp%UF1i*IeoSO^6nQ@=-Dn!DQJG$lX^=2Hk{uMsxu@ zz(p|c9d|AJ;5i&G1y|^4t+&;?#0}lMc6SgmMNqs=t-X29cUsgpsL`jx>v!jYarsvp zq*f~v_*ylCI&_LtFfip*IDr`P1$O|XDGzrV-M|_G&WqsrwBqS$!>6YW&rg(vT$hL? z1#G8;Z5ce^lop79Nqgq$t%XWmw;PLOl`<4uTD>m>Se1psx@Sc&eYfA)-v-sq5|=H^ zGM0$<=WO)ze*&L(ZcqZryw)rz2E-I?&N<(j2Ci?!06#Z`0O=|+4)po?Jd|oTp6C6E z(#8REwBH>C%u_Hh2hZIQBm}6iI9rGTN6L(}6t(&QFi{YrSs6_t#E5lG-D?`-OmXwR zVP{Ldr{46+z(J5A6XegQjd=}Av>6}K)`XT9DxKB$VK(O__oBP7JhR*4ap$f{?`yGrE0_akD9`c9 z7;SXvPN8B`v$#YGSXWYRU`8j5j)-Z^&A@byj-mGq=QpEj4LLVlZU?Tn1J_%|ZD&r* zaa61FZUxgyH6FQxH8a4bh-mY@qnTkm$-wzK2|Yha-g=ihc(Z0cFrB^|jl^Aj$CqArcwk7o}|zz zk3lH^93t(eTr;#xJt>@1rHQFtRcJH$9^0QN2GLO3wAR@1c(0a2|IyB+6q)-G#jQ3v z%kJK1rj3v4J@{xH(9dHbE->(DvnlttF z{;xKj`@V)d@NkNvwTt(cyLS1R;_q)3QLI`gkqFPZ`Ez#hAHN*HsX2*))#Q4x)uwlN zpNC+DEG^H?BT-&&sV^SSpL(=0%}KDNg#Y;a-|)9DUyzoxpw*0G`s0i7f3G3pa=GDr zzG7K6gb=YVE2AJ565RJ2uGd$bpFiPmfBzM?+kxv|dC*VB!k@jmPW`_T@aUIY^X~nB zFv{O>V)@HH7;ZGpUWHaGC$||AA&khvI4|rFos*@%1w#G>*SvUJYpAGTfw=cUNUasw z{uB|r#}Nv&&|H{dfC9TOJ4H-B^#c@gcRfHS>r+3#nghrA9e4YpQNQj47Azd>Ru{sc ze*DDco9X=XJaU{~YemGKYmx=A?Mewu+oY#c&7IE5`2eKyC zAxR63+TbU(1R%y9^{5Jz1{}HJddaxlb{u;qAJmPdMX^rR#OY0{^m1Cm6d1Yj4y+Y$ zRWzqzp{rIs9Byun01unzNahRySQj-t`_vsQPU#3Cb^r_*SepzO=J{1>+tk(UMb*!$^-}8=wSqec?+%p zG{C#%gEZ#QxL3beBBSR?c+4;ufC3r|V@4{j$;%B#DY)HArwcAiqJmEZLbODE2kbJ| zoQOLc?&na75it;yc>YGI89{($jaV0}Wg`Pzc@!6rp$eG{+v3t2&Yg5jMMV#yz*aVNP$!HI(U>K-QrP3k)xGI1+oJaSw z_pQI`dw(22K;aGp2sd<6$EusS$PmN^z(AfT>2KG4hs1U4iIez**amXf-bWi(b)tp1 zutJ@{htX(l&yhA2sf0jGE_Yi3o}N$mbUtB;tPCE->g-r2sEvEhip%xD<$B=d^@`h# zYHtmuN)-bn^4yN*Y+(HonzbXUs(l=}WC?JXvtnxq!6F~bGjW=#1O0J;JtE9;b-CSS z`=vVmdJnY7o7=d7@3~DrCEk1Od)JTJDSTac`1zf-bo+n+dr&R=JHPu`!-3%7W6<2( zg`@quMGp*oxD7bdZc8h6HnxXx`JcGl9(BsDiLXRL&id#iac7^=66YM25z+OR%foy9 zv(N`1rG)3FGk*E>2~iZO)`D92b=wu|%FQ>%1;=s2^?JqA(=*ofgtRQ!whgyqN7)N< z&Un3Cu$|8M^5qMD`^Q%t`E@vk{vyCIhj#uQL5O?5oaXU9ht^(efAf9&F$&T>W*`;7 zWn`xhGcU840U*>-wd-FGLLNVZPevfsAXdtZ;gRRMdo(hzg|6J z(Er!Wv+gq}^hg={Z|YRR7R*^;| z+v}7Fb8<*SV=Wjo%8JGq=_yyix>oFaf=EFOJiH*J(TGJ7+$>UPk6c5&h+0GvDt+Zj z;en%gQYMqBJ;Bqxl?s7q;KNy+9A97B01bkp0QFjN+zO7pVmlRV+lte|C{9e^SF}73 zlLz-4rBUx;%rzKTXthpx7L><6#TDzeusSs8gn${-w1@$;Mx`W$$&bT;W+0WVR_sSX zJ}O?XH@v>waJglCeWBrc-7}8d(2A{Qh0RNw%LvfGy*`+OAX?W{Hit{08I8!)>J*Bi z*3b9;Wm}+lM+jt|%l`8)JU*sk0bNYOUPECDcTP2ku_Qz|mSX&RQAN;-%WX$8FxidT z{s9J5YwMr)-d@}D5Cd#JGgqh^MYcB48Z?Mi_wv`X7Si+68DBm>;!O5u(g3sxvMV$`PzLPtxpJ*H~U3u^Yf)xfL+6Yfp+<%13b zQ@u|d!ptB9;JYX5ncLnc_n#w2&0d+2A; zn=_0~WOp32P7mZ=1;ak)$B2Cz?-lL$4Go{3p73A)@i&~-gxmFkbiG~8lNE}>hy(mU69LxS~K?B4VTLWpMUukUw-)o*S(-zcY2SiemI$b_VU7n zd_Mj0S^F1W@0hXEAJir%ukhyYps{2>44TQHpUg%!l~{~aV?0Xt5(nQr4de+d65{=eXZ+{G~aqg-i{CAkV*nN|f*~p6>WI+`T6-@~SywbMBI63Vef$3wb z{OKM#xqA;}80l&pYS6;*f^~hIRRAw4C}0KNl(20Hr#0f~6!CmYIB%rxY-_@@G7O)V z$fRUZj+uErj!c7l-w)hwJ8rj(V=vhE-1*J3!MoIe6dpp-Rv$qPWE4*>@6V?1gl+Ht z;pdtg?mh+Qxf=|ygcwjO4dHELVa6b#AJ%+U7Wt}{+&Dty=ItpBM=o7?sg#1FFqtqL zAaPzcm+qVaJt8-}w-qlmFIeXS!w+P>EyRGufOt%bw9p$L1NUC1BN2E^tIjc_0IiYJ z5~EAHx)kT2VL5PFmWXv@#p9X})B@(Q2NYL(L+9+~YzZP(ot? zw<>dPfaOPk>%Um!!^H)9u|*Y#MyQ6)#}_lWA*9D?v!rYQtrg^>;kFdqR@ST8wuog7 z21ck~4^unR2aS9&W8En%U9H9_hpX2y;OonQ*K5Y*a^Q9}R3Ns``{?S zB&505aL20o$>_iFO7NQ*v|%246`kl61$619Zzt=A8!eJ_y#40Svwp_L`CdsHX_ONvOA3pVXo(>RX*cSniSET?=(QRN8A zpk8}KyGH}>X2$?490oVsZQf0PzsoJqaeh3qVrGctuXC)`H`mD|dpv3sV9rNZtwWzn z>hD!=KYBlh5Iex2!;n#eTrzT-XjlD&{ua#pvFne58Q(*r(f>FlSOZ1Omb@FO$?@+I z|2aD6ubH1|jKTe9uS>+QzkJ5ufBk~8U-8?2|8Jz*zN59u(3>_~Za19NR7ZlaGWwqJ zE0;VZj*wk5RgvrUigi2V%jZvcy^3lQ2Q5LZz+WLzvuwO zC#f$kOepbT(TwCdUU&H}4NT6fYKF2)X9@qa6h61h z-}v6|L5_KW6(ceb(gBc;7wcr?c|oZlVqRNrT1?!ns31t{)EHaf9fa6^sMBiY8U0a1 z?;2v8r80V8;nN^uMj5~e7!TZSSy|U(Tkw2Z@U#g|t6*Cr)-7ULq7@dTaTP=89@456 zIXBd!;_^?JkQy5sV?W8aGihfOTB}Fagc;0a#(1263~laIe$j ztR2#iW5mYKI{4hPQ7wW}3PfnUuiJtcSzu8WC?zu@5^21)oKagwYlZg=LV4ZFXD_89 zghVfWtrq#9Dov@Bk!~VJClfoc%u6O}Mj;G*!01pBkZB*YZNLkqK>$m%^O@m2C zy&EvoYzE9XA*1j7U}Q!2;iwP*OH9}{3J|0i(6X)9256HWnujX1HX0d0u%v{NOE-RF zBVCC_%!5Uo0z&8e7qn8kG2fKlS7K$8CbxsWsi~|GRdhpNpq6kS5rj#%DI^;9BB({N zmkf~u*Oaj=38&M7(|N&mTCuGGt5FS`G%H-lHj6|{v=o$Vg%w@(tC}k)Mx&kqYQ-DG zJg$tQSWOZG(k!|qh{F0ON6EPD1=qdca;*d&FBz{d6_;DXQ3bUz;uIu6td2`lK`6!c zs?fm$DMq7y4G^IckPSGMGN}$)gl>u4w_S3#DfjI1y-Z1KxTI?$@or z9cOlCt9$kRtZc|inoDX%2bm%*`_XVz#iEu6qOK`y#;WBFxiK^xZEn;yDx8JHwV=w} z9*>$D2=%?44uJrP0b5G=^?Akf`GhY|XSfpxsV}WyzaDsbJ#cv~xLgk_(8&l( zN`59O5V*Idlq|nUm|v%gTni2p8*t;!a}R>?^gRifIcMhl$c=#LF86}_7i;s<@TVjy zBQGMPY{nQ6mPs*A3DBJK*a6x+-nO|oP2A^}h>(&N#H3uOTuB}3Z+&hc^!^7-7os^9 ztOHnBi)Ef>-CD4 zmlr&L`b2reZ9%OoO36g+IpcD9#V^199jEgd+jc>|+|aaIQrVwJAsq$(N#yv?x_EFZ zymF+<_w=wnzV1PYSIdNf5KkU_dx?92{t4Ku-}_Hq9vQJS4%Am7uNf66GGu4Hh5`@E zdl}Nd`r?MdERXki(C<8n9V`w;f>rC7ygIG*f?y#8lc~P<{M_I1opuE`&h9-({XGg+ z#g6`av9<-os7x%kgaqI=x%MJVOhU@LXepDvM zTD9E)p7?yEBvIAKa}~O>j{3j{R*{{IBj$8GQqtz<)(*Zr0utPFXH9RdBIg6z8qz`| z_%-vj7nc5MA6}v--R6!$C0(mgJUnTa9&~<21?ghm zjST{6f|98mmUG7S#>(dFw&HAGDGK7kITxZrLhDW*0iTILGoz_ujS(9JfNA@iffv*a z9`8eSDyNS<=hdxbT=pH8TPEG=b;s|&-Eh4ye`Plry zc5cu?l`ToT$8kpc{zvUsLZ6;%`_Pmo1BA>uK((68w-=DTGevCN?f!3jm_dm1dBf*V zXT&7<{p)YIyuKiXh~x!<2=GZQ*g-hDEeW*(`@ZAlr@Dd|Q|@a(2d@Q&1fZLea;KOz zZP@*HZ$I4)9PhgsDEP0n+5s8eE5Uq%Wr> zu;EP;lfQ;>xA4f)rz#^oWozG9$HrumGMoNioN(riLFR2-xL<;dOttRxu}JeiZfl-W5E;XLqVD z5YQ^eroB~ga!-3L1tNkaEr3=WZRafL0BZ7%iC~ElHAa+B?H(Ta4|ZQgYyfvTGjRPZ zvuKRd8W|C(<}uYyn;dh*sI0=dQk$Y2j8-lAz_Bm5Z3~tZv99KY#^C3BL`Q4u0s~M5 zwhd>H`lY?sJjVi1ivW2-eG?eH49KP7<+|hLa>eDA@p8H0<>kQ3%Yh?TEl~oH09585Ht}S7 zf<)?UOpcJBoO1n4bOT!_`eADZ`~@vm)F@a&K#D-lq&-pz?MFTbcY6lU5Y|=+V7->u zs0J7U+u^wg{#e0O&^}6}K8}Ld%LOm5uXw#&aN8S>9Bd54qWHF&1NXGFEOwTUO4^~C z;C7#$C%q5QTyJxAwdTCFy~ZPZnW!OVCen64*__7 zKI8f61g#l)zad6C29q|0_Jc4GVFK83K%DNQip%AK+rH!TLKT_SDt{iwfn4ZyyzN({ zWyATrVTr!UHCjb~!8LOM|K}5W-nncn{t5sBj6_&?G=%<4wS+t2XOXlo5|_I#*q0#n`q)oX12s242!*{6FCnZNU@34}aG9r|JY z&ChuII~sA5HyC4+6#3|V@3(CX(feMh1{TZ;F>10tng`vJ-{Ql35;O_) zV1J{ZYjcVj4K5m^bbdHcRr9K^D^>fpZ9@nF`@!6_r}LSBL>1+BAeTHPh4Ua`gu@F* zHAp)~)|Ex;IY)! zD1mFy+d;1g-9Tf=p08D*qKT@a6hLH16nlY5wMZNznl7bFLz3z40WBz!C^Rahz`U@^ z;T5mLVU#oYT=VB=JL|(^@^y@~x>}Jcth;JFOi64U1_|+ zfQEX4tXD{H=BKs11W!J3@S)X=Z1;4ByMFS0tqG{Y78wB%D4K*0LLA(a8uMWMeRq4c z5z#>vY8HvEp?k{Kv`1*!=d0J#)8<*Z;Q8EBmM?7LaPO_3H-45Lw@QjFNP&Gz|6gL5cW@nEqr^@58s zs=^C!*QcG*X@9%R(U`|#8vVEX-1`88f6+c4R7uZ%@4nx=%yj+c5j%qq?s-$a9(sx1 zv@=AGDe2m5W2tmZ)lzz-(x2Yxi2u`*a!9e>vgP^D~~GpK<*6?<@`ljOf6h90#8Oj|oUW^1k`{BLL>{J#RXF`jHjm z%~$3It)CINf8BSH=wyuWz^_)oNbX+ajRE z55uB=-rX|<^MujxOQuWCu(%zZI;++Uu4vD#z|4#99}alsVcRsqV~$MkzOkTs|LI6} z4&HvuV`6zD0e9~OoS)3YtL6r+Y6duITi#xI_EhCOH0%Z{`xtg$drvb^i(H9%wcapQMT+QxTv!^;dVt&pHt7aqJ{zI?`i{^x(- zbUvY$138z`kHWeN`)$W@?0`@J!3(Ui+BU~9#)RELhiX?t;q?{}Ga5wBKP%^g1=bp{ z?-?QP?E7W8JKuuXm-S7p6}8mKJ1+>v&38vbwFJ=lx%!-(a;K>E&q998#c1YXUnxr* z1+5$%)XYXk2&8Sq+ORAMtybh_!X=i-&8qSP96%ddF7&>G%X~?RYugRZmJtzvH6k{u zFCKegziAsbQFrKAKZ3az5JrUD;|%b8spj}$VJ!lbV89t*bfnaZQh_}elI%wS6q$*ITM^i0=82GgvIt0 zKngId9)1N&Q6BT)vva&0x5m97L*!Mazl+kiIp@w z!JB>11p|`T#oVn&)z0qsQ)cdo~#w`q4fV(I_cYI*HC&W1IJNMvEVtn&^K11wn zdH*6^s7;{Y?u_us_^nIEx#~b5q-E?U+R(B`R*Xv3tgn>4Ff$UmL*&6Z)P+jQtWY`Z zh&Q&zzv!B68408wE}sA6&+vg6tg6U+MlN~u{bU2uk!WLUN_1@+jwAsH%!&Q4|N0NC zYr=6{@bdB-%6`L_rzb2+M1pxVM5wTFI-Rg4P7caxQthO+hS%2{&QDL>bDNeG%eo-f zjM@tFe#7l{!Mbet`>(&^_4Sq13d||ff4)lY3A<|5tzpIbJYo@XWCc(WG83q!FNeqIn!Ev#PKWT+R;Q3r4%Li| z5RoFVE(@NY&-ncL6P}-+QOkkb?FMblA~9y9n5tDSOl~cqS~cNfUO+GO>3M9F6;ilL z(_{2Q&?y70na3DCaWA7nWJjwFXMiJocW^Iztq6ezezdO;n52tVQCsG$7Nfvf!a6BjU@~`ek9t59 z#k#D}ma(fMYC{u6Y(nZFOj%A%(l~&{H^ICPbH#Ezg!_i)5zGnq038n1YUJ>+^(0du z-WBE(q_6=B)C^opHMOsio!ii`=Ym@<*qUOI2#u!9)nId-HE>v9wV`B1$rbx8BWG5C zrvVfZ6hDJ#<-MLEdpY-R7x{2A(l%plFB6r5~N2R zL^(bIR2~bBAcqEXLli{~0H=&sI#wo9V|bPC*$Cf0V_eA!0V8VjonxumepEt0PL)8} z#!^M!rOGOd+S^QOSOyoy^VQJvVIJS?^&UKjE&>vuK~QqR*Ox1Pf4$=C>lL{)%jF6X z43F|C9(?bh%enS^pdfYlKK=+}_VG^2j6mfh%Z3 z5VAfbC{EjoFJC?*9~Zp7d_`*&+lhiWX^BW~G{qS4{QQhhPfrlBWX)E&0L|Nbz1?uV zT*)n56TPh4hEiD%;5ZJvUS9C)uPc82^)vq8|9!^wa)8CCd}03i!SIRS`K15GSL059 z2SVPP;Z&m7kLp=S_ymemp$hF`D5vw&yVh;!#0}Ku}F6BH?t7Sk`Ft4s>rd-{c3edF)F=DH+GXist3m(aMe}Vh5ZR z?=uNh6{i@L4qQsD$h9F?@|tVC7j>l48KBj8^*TNMLj=S@!u*V)CTnTYn-tg%(kg&; z2{@e>CcJ(AjHjnF!@joz*W2KoF6#mnP?abjIPw8f4}#GhDq>2OBf^LyFnf4xT<>QB zm=sB4v}CH(sCpu>Mcqn6t_S4Erjmm`4oh0GuS-`VW2d#wIi7)N-w$fJo69Ut7%c%7 zy{^f8HOr$A%QI`OA~br#Y2fp44z(giCWJb_d?*!-{fIzzoFgGbP}yB9ak2G7TLef; zKs6x(selR->SGcB$zD66!(-l>f#T7)2vVYqHsgK6G+-bqqaO`Gu;{Y@r~-7U>W4KO zRZ!=UJv$7yQpx!b0!e}>0h<~7 zkVxao_O()I9J*m}+iwK1)?5ab*ax+P+-gM$`S&J3)6ktUF4wF<5#0%6QKf1Iadp6F zV;^HgEleH_P0*%@GcnyC^XPhgCm4A>ed6;e0-0S!v{7FIQY|R~-963Y5_b)oWMz+Vlsp{%_)a zUExJ^sfaeoJi{oOa6usHQ4mIaC~y*E#1az|$)jLd7Xpt+ikzQO(mnrgfnjTv^31J` zdQsiE;r%hd{2lOkjC@$4_#e5r?!uj8qz|D1kw<{^_H{q`q1sT34r-TL^?Cz=E;$d( z&auh>={;Z$Z0q@R6c9rc70^M+om_=ej9WW zL^hna1^?@R{V#+l*!LUu{Xl@=WVGm%0#bjM!c==$wek0_Uy)+O`FzH@t`;_Lc)eVx z8E*y;3s@}(f$B1O-*LUZ;`#G0`1RM%IFA43X4dFQ5XHNZv$wp>Z*=PaK~WH3fDkeD zl#ib8MGolJ~0?P*^K ziCXcaF>b`qUYViFG#mKho~hv8YSgi3?4U&ByLF=CMfwYoPv{2BE)pI1CRZ}{@% z8DGA9?s*=^aiG>}YD}ZlJH*)d%UGIiP>{@f>?r;KEiW?v@IZ$Sg*a zQXd*962!n;%0s48031?q5ixmPt9<~pO3|2|Gw&v{Y>TAkrm2g9KEm5HU}5kiM4Uv`1)#6&W;YU`$qNdhocL zDsnNSQ3VJK5L18tR>cew3LqRuLCzI>&M2jszab#V@Km$k2|DueidqGiSJM1?qOCgZ zj7Dj-=pvJZ89mK;GT=mjpuHxEuX2JM7=9R>{$L=6HeiKR@Vya;V(IJMQDMf4&GA2a z%^>9McB+ha+xf0$EK>q>&|3TBez+f7ep_=9kdfQOdKlWJX7ArcO4GI`Je@Z@J)M!F z;HVi#DVFEq6mU8mFd3a6UILWHVwIFAr}0{qXY<}Zdd!!3_1Yk&U5BgMJD{!zjBBhV_bnx+b|DV0?9IMkA*$WVd5#4;ee%AS{ zYPG)la}Da=R}tykGHe&Obnfig=bx$aIOh($Nv*<#-6KSueiJS8(I?_bnxVt7J%s96?)b1O%CV# z*T5d0JgekgM)n=grtzE#MmVGz*WCM9`&Qkx`8}abRV+7m+6ZKD>peeb-tgw?;pCUg z-9XGn-bdHtU)?)A^T}z6!T>+4sFn2xW`ofIh|ekaP8q;dj$p9Shc=`jSeJmO^NOca z#Ay}k{s;GV`DcVqsbPP>C?kq|0Qmr*5kY9MrQ<#1J@UJ_Q8;N_Qx9i@eVXm{6Ib&NBphh(8q_tJgtdAf(y+NMKdxfEX8aDNSKtltv9CW;G}Z z6p+@R1rpGxqX|LfxrRYWQ=x?Nz8BoC8OK>oa$9j6irb;Am6H`mR^$wb@PJjY{z(vE zfkHZcs`g@K`<{^x#a=4VOnFD9{jnQoUMEF+4U7<%0Y?4sm^^%;Pa+#LwH&6v`-uo# zvnlrF$GxOBrHYe9m72}F5My_E3~(+!x)$-eKO4jp@VrfGw6D9@*mJW+MnH&*om-4T z=900dm9=J6P4&+b<_+w}$a+0$eN;+DS~lkENPucVv3;MEBXg)&eTiUu=(a|aFi*%e zL%=qP$xR29(f+$Vj^EXz2*7Z3z(IYpVv*_#md|WTUtG(qwG&gqVtGMcex?Y;W#2fsgkuxi@TW-gJ7EHY-=qOthi(=xc9J z5ClxMz0{?*r|SU-d+v?)CXDmmbDda+X+--3?>~o^2!JWy!S;??)ztA=x1}ocq6h`*f(s*L?3{l!j5XAn3Gfs=bxzH8Y5JJCu86HU}JCFb0sjS?PO8>voAi zx-Cm8c76OYa`3pS|4JXrk-{M7+nzbdN0Toz1cT9vErd(g?}nih**jVxm!y5)b8dU2xi{!nZDoU+aSN6TOztPfs|V>Al?d9o1wNV`5To zDML!vWd&<%-4m9k=tpl8V27vC1JIx*)2If@N+BQkFweIHtNmj0N$noZ=d5*&Z}0-1 zA6yjz1P7P?@e^1ZzJ`ucT(`j2ODPo(Of~=%4B);mHulw|XWhsrSfqE5z$`{myOw34 zI?tZ_9M;x&2CNHGV%2sUgJQ|C(US4a*a9;PD7)OCjq{7n4ndND8hCgfmfS8qZ-cZp z5m?=eITAxppF-$VYwlehdx(l1x#aFqB`XFqc%|3Hfq|6f_tAzUXMBCVVO=h0l}B05 z71zBY9|g5gX{*%#pSri*jvPmp1kC}Ej0h>SK6d`>oO%Bz+nGCicJA$}N=XqU0r>tf zcK{59@}sJ6-)`1XDJ4ZPNDy$io12^SzEvVZN=Ts~iup?+1GPZ0L-#8PAw-B&q{N`y z53fg1EAH-9;0h0jk75l@TN&0|LwCi32&;`4wg7kXybnrtnJ)xhL=4uIoAaEqtG)X( zDm$9@t9Y=-$gyvY_owI+Ss6qIQiml(11RX1#AcGy`~6(}T}(uc{qJP_1&Ac8vrrpPn`;W7XbV27Dw!m94OK}|t>$oRbtjRZEjHKZ z7)^x2l?)?NO09X}eNUtabODOC4?b|F?Rkv;o!v0T;Dd9n2fQEo3CE1;``0v6-e1&y z=X6X(Y5R3!ZAj0q-upz4uH?O+cxM~!eu@F=rI4MAUg#YvZ{lBw93zEa&;m;KL*YwPf~GsBE;fZ9CRIW8HSFD*=XFs037s zby!9`FukmV6>jvQP6%-tl4tTFi_0TuW0GK+=@gt!3zpWo-MYA^hZ+fhxGtTPKSeoiC$+kP!N3#he(C=0{ObJr80%#9{LKH-EHbq!xk`R#; zNIO-)Tx?ejolwgLc)bXpi$EmWSPFV`%f7x3wQ|bmkP_=wLNpqgbmzp!6Bb~8d3lAX zAnz412ujIVw+-92TfdTus_c^OzNxQVa>x0#n@~fkAZQnWm;k5s&|GQT--7sD6HrOY z<<}nP-%o*D0q8S5S$(C@LFPLXwlo;Ix;d}$dy z&STO79RT3BZD%9Cm}bj*AA>E=e2va?oRVE`ZAG=+`*?Ka+#5>;wp>xcF6Aykd^h4? z(C2DRR-03*Wl4>a>VcZJc&RDdMUzFHV!#}N(Jj5s&3=y2pc;FU9)se(Nce7{Yp-`` z>(J)>sB$?9+1!uc;p`5i4UX%h!+nMKzwfnR!8WcS0@vaf0|!0BR1H*QargV9`2Yte z*q@36DIR`~`pXUkd7F)V;U02NWjGuq{gnyd? za#pNcC0Z-pf&Hs)pBOUmFB62|7%+^B*^Tdy4~nHT%lmWk7k2IdDxN4EoFD!aRV(Q1 zLC9Y^?hHCN+xtTOejjY~R1qqvG3+H_rC+1*sTFEUCZ2N{SXRLd%;KS0=k=fq(}gafoFyz$^Z+E0Sg$#15(n-awUy#-*?Jb7n-W7aySP9 zAQ}){%2@p`oF}3#`>E~mtKL0pA235UdjQg4-eDryCIam?kyb6ga=zIautC;R%mmT&Ev>5PaAi z%ei3N3nXO3Tyedv%$RPuuXF1P&)OVo7H9zw#W=c?DvFkQ_wSR5MYw206hX3#oCH&& zk{+-C-RTqeoaDG39t?TQ1hvddADLtDflgue;Gyln?E>!S)<++#xW~&5z@Uvuy|IGa;dR|HO>?{P zd7dc|m^1RW;dZ^^^G~1g%g@iK`HSf!dl7^i+CLwJ^a!sTudluKYsa6XKz;o^U;E77 z_^cP*^@!}puBAt-{5c~dA@xAS4VMKWaBky(biAvX!Q(mWL5P%MsQb~u{1!1wF**R+ zKMQ?>Xei~d8N1O{byXp&1~IyXgwW(e75&+Wqt#W(pLbNxjf@D2*m< z1&UTUNW15XT&vknmwg2seHY<1wVJFu(4jq5^-P{vn!g-g|E(Fu%!k>;kL;@zb~>sI z1p*4@DKg+v?_Z!MNzC)4l;F;rXF}4%Aa0rxrkO!2rGrU@si`8?U|9w>=F6d={p>Jg-|N^yb8G=C{KqH_ z>VwBkI%f(f-)6iUJAz{8|-x&(_b#?#n4Lr689~04Ca3=viD38Juc0I$02Z zML@2=^|m4xW=Pj9W7|!3SFPs6om+LqKhhPO9IN*1wfBxnMyZM^TBh2>A_4=D$f?QE z9%YzdcG8Pbn=J36#XUUPhHCfVX!4HigB=>5quV;Mhw{kwj6Q>iG)h^UPpYP(#0TCS zSb)GF6w^F4`zscp45`6r{VB^oee;gCfR!`nH%AaGKC`aH$;MtFgG5aMp2l+bp9mdIM4@?wI zea#vkfBJO690RT|f8hH11yL$4=QB<-d%C5_Yuro4%a>QA6a)Fn5L2k=4{YByD1d#X z1l_Mcf5vh;H3a0gt~i~R7KtwNLat+>q29I?uP?vjzy10XZf_g5tzfh0YUC?AuJ>mG zk#Q3L6&llj#_=sDw#!WR)Z-DY`OeR2{}_tfU`3+?$I(%8mr_*gQSvz5`tkGdw0lsH zDB_6rcPt{Y9ZlUkgSPW^z0A-f^X7cFDAODfqB#+!79^H-L^q=2wK52)7KGI1)bq>2 za6umrGicvy7=|@FjMLp~!d-Z#{Was~xZ?r@qHWPOvH-lV+%(>D=t2ae9=$st&Jhkm zq z0_G`UiUHG{kfLCk6Jnxt>y+r|rpDRt0Z2-dQO?|vH6WxEDq6*4z)5ado<2va1vG3r z7(!^yt^@&!7@fj{EkiwRgHuJLpOzMc^f}<;OhqZPvyX}&hY92Jwp7a|#5f?h?Wv(X zMRi(^w7K2U&ErIL`ip8s-ZT7yV+?IldIK>)TT@1=MR`pNReB%-P1>k%V$T;a?tAfZ z4Nc*QbEBf{#p0Td(^8uD6{9s76clg4AO58r!z_ERR12a|C_8iG6HPMb46-z+=fMf#o7Z%*k0q_({Vb%>n#xjwHcE>WA4J-+|o zXj%XQ^qPVm)WYlppYt&JTT(sJT0W%3t1YnCH|6wiL^H!jA~hcUnGdhC>uB8m!@SWug(RDQ@XtM-yFK>?6dHA>`nmbp zqywiGg!?`jCkHcdxK1@6rZg#Tw{Pv`?V88w;~s&+YjCQaLoImc+VF&7qs9&=9l;?5 z*HOvVucg!vgAZQH`!)5CI7T4L$J*_zG_^mydrznRx3%*j3*@8waC=$-tw>YE^Ya+382<8QwqIgJG{&a$DHsEXV5%4TCwq!{t( z>4MAUj9M$!+Z%4TD{jk-%jMEIXY)LfrnT?L`-b0s|3CQe|M&mE)6)sBuPalSgYyGJ z>UNY!lKtGebcOP`UU=U z5b~pk7}DjN4$y;EcOH%d78)|$5IoTXMJ5Q1kP93f_8&isZ7yc5jrZ-Z2@PPVc}J!4#V=}`!flf#w znm$ifoW77x8qMh>1*l@%XoG4k&6zHxnlcUnlIHdWqWisuL7P#gobVtq>|bTrAS>Fx zM`YWBJa^bRm6>@A7C@8ARljQc)C|4T6K$fjzg1JjafSsIdKBskYj;kFs4Kwmxou^K z*2)@{MEa4}CAf2NyWK!ZW>TvVm`-A?!3G_h27*?l|&02Ry>bhMcD%7da0tl=JmYmS#eVd6beSo|x4focw# z1!ykeyk*uT?4{6YGTU3L_tA1N#fDyeZFGC)^QnwzM0->gfS4oenF&9fUNRoV`iRh> zKtnhC-|xkP6n#ons20-320B;^tebjJ&lov6`hNC~?k$h1CVwl^u5Xx@c225GMF=iS z#PfN=^W}uge8OaW4ffgDm}N0X*X>$J=^zjLO_qYa4%bY!(z=i z0a)gcw%A5{SVoO!F|J8fQ>ZJfZOjHNd}r|Sk31TK#sIty+`bPT=x2ns7x_5*{`S%R zeSBEItvD?+o-SOsQwlgO6P9Jhw(t1;_doDPWC@2`uuwx$HKwb9An_4z~CFBH5eZ|<-;JvNDHmN@JAnj zHfX{e^`=zw@i9K5`+IA@_o7E}Ny;AOdmdkvY2FM+u?8927=f`M(XP+(AI1$oW^Q1u6YKzM4h6IiN{yVOX0OrkE(bUqdESh)99804ahLG3~4rL*lIGt&R?AZXppX|@It;$xrCbp19B(9I$VNQrUiGUYAdM^}IwR#p(WE)D3yS-Z! zQVUc=vlBJbN#ExLVZ$8+t?c;iPJttUN{3WsZz=x}qR~$}s;0Go(zwzoRJB@gLi&uD z@>VoH4KoM`HKKa<}dUn+G7)w?b91Zr)#@*Cf!{HRM|yNvD)?Q=U0I^wQQ8or|NS znO3ST^*JKwCd0s?QQp(3gLN!O9kL_Ud$subwDsim4t7qrH!$$eKkw)mOZ(1i<8?43 z8eKdis1H{MDpHZKe7DmyZ33A9( zsP?)F`NmfLD4XF5XAXwIopS9IEq`r$AonA6_q%GFg^O`e8kn8fr+GHYCP))h&*I#l ztKW&L+vmcXL<=6qYa6d&e44%H!@#nt6s)LR6U=-lT*bVfV`M_GPfIO@1?RaSXYHW; z2QC2Z^=QBUG5)8jHgJNH`e=KJ|97~OcAq$AJ{*zI2T{wr!xuaq?;t=(0q4_zjizBvyw~eu;|fwRYPNgC~EbM zWSEE_T<84rUxOzc>JJYY4+go%i$bfLv`$yTjM0VR>VjSJ3Y7|6H-md$EXMX zWx;?1wfETonTk#>$_8uMQ9f==w~Za1OD+xd%Z=kKmif>Ye6--NU0ZCLzDss6le!v{beI)O?x%F}UbIHKGG2(!Tu zqa$!b9IHI3sc(oP6u~(5eR>UOGpC9*U$nK$lmhnM)xtt6ygmR1(AK1Cw+4mNGy#dS z%@ypf-jHXl%_#}i1kUTO5CdW~dttKLNh1-|05l0)30Baa5Mwdmfa9XdI-F>A5CIXo z0WnpSn%IzWXx`MF^1)f?8%A>qb14>pjL(zLG*@Mo0sRZ@@6I(hOKQ*FKwn-jxMqS3 z0g?T!dQ9HN&#%*ZPlR$X$sQPXSFQ(Kz6I9Net|#t=6wG7)n2R3G3|sFn}eRggV%6{ zVSS*X_JRNvMb&KQm-f8+|D9_ed;}q)h!JeC7X#)g;HT#^o{hp0jjjoe2w_4+K}H33 zQfJ8=u&i$>zEK$1MEiMwKt$l|3EEmJFBLUv97wa|61KJH0BxxaXn+M%(E_Iw0#Xo6 zf!W+;@#d2r2zqwDHd`p;l@0sc5fk)cVgLL4Kug*)rBDED-Fkhm&l2Caof6nK-)3*x zE8Cf6{Mmw4t))4yP63K3P>^DpnytW8#I#g=JV5LmIINH#zd|@ z+bae-T%X%*TveMQT*qeIiMH$XeQqzT=`(xZXU%aA`#d`S_>qtBwUF`89l_|CQB2a> zL4d*W=)Gz@ckfc#KBBhUv5ry?_86ExOy>&jXy}q&$k5o&=4%q^F>y~OZqf|Y@{pO*4 z;DrUSFej?i;-`5crF7%3s+CQ|0a-Jp!N$AxV4?$w-kI3J(tHP_b@hN^JO*tY^tGC&Y+ zVbA<*goq@Gwh0x`U=2;jwR4;w&Q^B{qHliel~LR4T|t-+yA3Ya)M1)Y!Kif?uB0)l zy(feS0iQY4*BKhn(dj}JuBax;>_x5_S`^D-f${Ku)e0@7%&^g0Z7M)f1a(T#B(2~y zr2s|5>AcWU-*`Xi3*FlHz@ys%>Wlp_`nwV`Ixz@!xV>jnn1GJ!1dP==}hH9O& zA6W{21&i^=J{Vk<;xB-!gmGP)6IjRnlIy~4Xq#u2q7bWDaFcihO?#%K+P%5r8Zjho zS%PGCA_nkFl+w*9U`YYXlyF)oJmDY#)w214aY~3aL2AZ9+F!L%Y@$&XYXw43(t;`j z^!99^t=5gyl?ttbnyKm&tQc8`EKu9OgdlAU0VyUHna7AJ3Z@`PK`8Cql)8H6jm zmH_PWVfB1Dmo`?ekLHfG-2uX|wF*UxQ9Mz(d6}MSOaG#xKA8jNkwE zAGp1}Ax#mdWkwRNt7V=r%?bOy;mgYxyuQ59?^rXEcd&&OSb0QSJyN#RezC&K>-7rN z>e9^!Az2>=}5kz)$~ zyRO?DuZ9qUfdn_A!Qpj|D4a?Vtz)*M8(qcdoKNti% z%|so&sji-52$xjUD42qAUzL3}#==ikYrs?$RZA-r%v;87-K>EiGa#j+AxNdxo(4BG zzf?i3)_W)PIio&qF+!ww%kK98z?=^;r#VIz3(qNGnggaOB2EEu3RsrNCWCXrJV#73 zZ4}jFV#~BBNME6IR=NcEZN<87D1`urR<0WepjE7!Ew?9TKG>kJqs~q_>vXHG6rxS; zb?iam0iz>!K5qpT)w*s&Mo0Ir0U|-vxbEnglic=m~is%KzEsH0`R)ah6hU!meMn=aBX@kwm++W;;RMTkJGYJGO-P~IP2 zljYAQ*nDz_>1bP6=uWxZ+HKt3b9?+O55FKFV`^J=B@Me(2&pOlC=NjskEnNVpABgh^a1S(&^f2|zm{(HzWjQ4Z zV9+(i))T1rtZHba&Are-(Ne3~5@FD>XYBjdsE7>Ar1v};9#O3ouv&*&s`Xtd57x%_ z*;f&<%y_Yy!Jg^<);a#GsQ%ulGMZu+%qBgm#2OERbkcmAKj$TTP)a*}Ii2w7(=!5q zmp^`opyE8AF{Om{w&K&L3(n^=1cH~>SG>KwL6!S%F3JGpd=`jA+^%a!*UVTI&74#= zS)h!1k~stwz`n0|eSO8{a>D7npw>#{a!R;u8&nn7w=3r9gr}!7UawcY-js{q5ZST+ z!}mI>_XnLv7CZvq=RE(j9V18CkNY6xdycWF%eWr^dLZJcj0Yk2n^4;@@txzSU}k23 z#yW!)?K6Hn2=n$^gQegAoMXj(D%T@dnTrqVrH z(qvq}_x?1fo@QrOjb+lV?b<&B;p z0IWBG@_gA5JkMfu31<)aS!j8Bw+Zs75Q!8D2p!Fyo3fQRhcqSQ zF?%fl>30eu(N4MAtq0M*aZUsY&Ec$Kl&QiF)*_0FT}uOrhk3-J)PlU%_IXsl8EZm- zs*W&%>p*+00F`$5y6~VY`S%{AhFehfC90(_{ zz|SqsIT(XPD$Nrc6WAzHBg_Ghjx9zB%iEx;Linnf9;Ro^J)({0z}D9*M=~ZVSb_ zPq!g&fp;G>1=_yiy*Jd(twDR{?FWtn(57f~+r~D~-U!kOb`~49?;eyM0GC7Xq{#b9 z$@FP9*>c)%SHD9GKq9lHt!GPb#Cg50$hA;i(Iu;4fu*?WTn8|KKG$ohkPukwp*^ek zXATA&?8}p_Ur0aI#{tGaKKS6c2C?8H1`BR`Ht$=(>Ijbg*#Jb9l+P3*mL=iWpMSzH zKYhmA+Y5X~%}YX>1Z596olZEP&$!*LczJojdRuWipK&@r;d;Fxc_+@iS8Jx!k2If9 za&8l$)(VXgs$OgbNDL@BU|l!dZX4!hX$E;Y&3L`Op&($pZCIBZ&QH%+mV`M4?AaaM zzfZ96SF#{~CGh!sAKwZ>zIAJIFgj|(;cxOn-OeX&1Dhdo6zqHo9dpI1H0A}+tF!&v_49USlbN9Whb zzXv=kNh(#=3RFR~tON^l3%i?JGof08b$=!5zwhkOoAZu+??$&2W)gCBItG|2sI2?o zdQO)9hM}n1ib*~Dky31#ld9$LEWG$QMfQuBrh$Hy0@7rS>sqJOQh&60(P*b)L5gPvMrDv<%@-x4J|8Uu#eYlV!#eWL{a;YD zB2-0@N`}Q98hd|yPsYt7oW8yKHs|6(tBl>Gey|IziV`IP+DIBw&i9<4)KS4B|^(5)^gmjYM-INs+w@S#0J1|g5L z878~|1?szsS&VMOqxX-&M>PWbBT<8XX=5PbLKkX#{5?AESpnV;!9K8ITrG{h2w@=T zR5wxf2$-y=M+}0~6mVH4oaTgOo-i-Wcq$6CQOzvNY0}xX)|Tz^EbaaE@gJ@<6^6AD zZS7PGLOcL@-002&0}q~Kj7E8i40KJjVv4Lyi-&;0$IY%!qwS*gc>Yp{ag(8bis$&@ z9dp>LyfE2)H&;fxuRF7rF+(C`2tG>BxR0Mvaz;8$CJaKw!Wbe{O?ZLbMB)8jh)^-; z&RVgRj4v;5jc!PwC^k?fgant;{m2~F!ea3XqsLj6@;GSU-fv4O$or0xca*zkq8}ai z6s&)U+bEVn^!_%|ZCdvezJ7`ppbp=E$S+g0eRP@vE|(KNJwGFe;PuPzhyu*h)G}b7 zKRufQn&5g}alPIkqWJ0aGd_L(8DGAddS>!jJ81aTQVLF{K z$AG$RwsGya-fo!Y8Ph~#d^#muPG`K`RwxA9ZADsU{QT21)^)>ge_YAmy?cmFzo(&+ z$0z)?K|2N^A3J@I>Lwoj?rXFI30c zHk1Br9w<}dk*avr8iVDWv934F^NbJ!`*qAS3o;W6kYf_c>rRyKK3`5aKV2ArEDJ&u zq-jE}6?`xJa=(!X>Tv(ZBYw{=G;71JD=I+OP+DdouXZh`3V zJPymLIei(qWaAGPwkTGp~chpq*O`7+2T9>3d{_KnGr)%M;X z1d5eS)w)OlDTt+(#_wei3%CqyjKR=%W?k|fXcS|#K#)P;JXg%~(y}x@pU3on+1Ou1 zcvhqsjkaU=pvr(FFu)E$vCk8>jp{T~cYe4PpL*6Z#)uSHccifAudM%oDzj-8#HJ#c zr$X)9BJS0+07dJ`6Qk8abvxHoh(H@o_ktX7RnOFLY(*8Z zsx^=dO*mqiB9?i=vLu}68J9U?F;%-J*&1nk>cnSry{%kZg>y2+38hx7+ivT=nDgvG zr-2@DMZ8uZb$7oN9xaT%!eOipxep;=S(Y}>IEFx)7-Y{W5{Ri*rx@7fJ^NaTp-Ck- z$z`*T<6gufVmi~cI^FMXuN~%#y6DB>n#Jnvq|tvxy(WjC1)25&6xPZF1XR&BpS1Pb z(Pjt6LURD7dYY_95BG=wh1P(gh2xU~LQ1F*?4@EW8(ywA{PFUN>)Q<>Ob9A~5CDSN zLsET@fq;vHkRsKkV?&BYhwWc|sEl?y98ETQcugr8TboNd)IrOmRZWIU@$9PAPFoZeoaB zcua>eO5ZCf9P4^Rkc#+p!8}i`6Y09{xL&WA=V#1m#&sE-2+UK$<$S@iBm^la`G$ECTuuv~o}TbD zFRYgk09I(d?cDIToK0G{VlR}M^uluIypQ!8jl&*DzjV1qPw~40Ek;i&u99V4l3n`I z^~t^XoQ6~kg=DkAr>8S6PfwUnOGmMb;+n54-Zh%tZQGD{W^S5ebU+^aGw{9x5c1xc z4YI%@2x3SUa2s6+L;Vc;IYk6}&ek;^T{B(1NU^IVwVnWsynlE$c!uty6nR6*hX4rh zb)jvI^=ypU4iEBer!Lh*DMB!Zq<==OMEj%3B74vSv#qLprlZ4MEBou@l2NpCvZrZk zPV)WHf~Kl)ibgP`&!4H&Mq&t-y$}{G3)e<&0s9fKsiB&~{~lDz`#y{~Wmcl~DRF0! zbJN5C?yp^7R5wz)*ueML`TOqyPTik_cjM>wbz0hIHqQPSIKOjb;(NN7HM&gRNHVg% zLv0S#yK}SvTlMe-@R>wWs@FvmlZXMA^NhIIc_ch z&}uv!{A>G=#CME~5#fJ$;z#2p(&qh-x{TkGdHAbG1D~U~VBmMzmjm6ZwvqZv*Wz~K zG*6g9!0q(~|MtKBAIvG>>AbKDI|QfGjFNX~1^(Cn{NK1<-|+Nw!IWmi81eG*g1`Ok zcci@Uz{CyZ>G=tQ8*c07w}6-u071^V$v5s=AzCqsBA!lIri6$yZtD%}^$LByAWZ=! zFj`*M&D0GF_I<@tCtNNkyuNN&*Bdaj$Z@249IHP=UHP6Y$iKqzkdE~O@4p|1CNdzM zo6eu9yyfY7r+kfB3XUVuqj0Cd@%Y+dXyV;QdS_&tBNL<>*N>nFLuf-l3qo3eVJNL@ z>Iav!j|#EP<0Avl-Xrgg*@!L!R@5F~=-)uks@lftYZ;M;gEk_aGD@GKeZ>&4%qM($ zKI7B#2~z;7?hsjUIVGHz8K-%nS@!@d{fovMCQ3I?6IW6t1$PBaS~ef<>+ zz(`Tb(8h!mql1`g%77TZd-}E%T{Fc4j`nvCI)u(&4ezrcoMr?GhF>&igF#deJ`RD$ zc%D+AA)2Dz@hG)muNkop4K4$-3B3xyGeMl7%(heu#_HfGD5iOOhdu&mbdXZPdJjih zVUD&Tdochp1?)i(ufddt0*bP(CR^sFQIJY$!Ly2(Vji0s$Q&1(6-WuBGfj-jj zk5RGgtDu*S#Tb03(bg(lw1ioUs#c(Ejy-Bu1{>#Dh=8~X;Y6l{m`W$010B?#2c^Al z>x!4x8*bU8+=F7m2xsCQ;Y^2fwBAZXt%r}-A@ER_?0r9iAB_|DkVu(&MYpTpar`6v z${u{MWrF($9|5WN*VlrNE>ZdyE{3-KbUxv~{rWTJ6!81+|Be)Z&(CK>09tmOKb;U0 zdq4g5+kfD<-@h<}Ic0qL@&)U@V%s-duWv{Iowqbim}YXuS)jTi1R&?!(B*2h41kW! z%gYYkSN!zpGtQR<3QF(3y}coX6Xt2gZQC)=6SkbOj{kc&3z z0B_X4f17IbaGazUqyH-hAAkiQ@078A>>MqlU~982Rt!Oi62N>W$R>Kk-`2SrTC^T&_dbDU>1Y`9tsh?IFR5;_ok z77TFMXz+Yl@ac&F27?a(QI3(C{oO^c3MSuGE1pYkm@1g^=`iA zt*E~t>eA%h4IyJzNEPxx@MJ_`JMMT4Ccoi2JpAo_MUMz2q`Lz~30VNFel2=o=yvE222)K1-*VNZw(kx zbt!h>d_J39rPO#Zn-~pHb!K9HP^aF|(BfIrXsR{)vvh^3*cMI|*Ax3rFlq*%}Xy z05Dr?!MJB{9tR)C$A9kl)M+77^Ze!}Qjsf1JF$Cv@o3!G9lXIYwxqu0AUBC6a-YejE9sMaN1IhH^#2(7hZ-+OoLDZ~~G=52=-b|ep= zbg)bT$pQ&g7B<#ev2CT*Whjor*VUh*H71L#R>zmG+u0BTin7UKt%|K=Y?l%JFwb2_Mt5Lgg41nA9Ps5Ux_)@qsYopU8;Y#VEIAYAKdN{uF#647E` zCn8jULkL#S<%QdpL-92R&l14q^>H4@C|1(}usgay5unn04T(@Rfx%d$Y_aBMrF>C~ zC~CGWlweTgv+V3Z!5L?G2#N>>Sv7D{soM#K)oLlF(%lh3cfhoc=W-clE)3rd+%a{L zfrgR~mTBF#9!%E`JOs;lL=lslbteZ*Xm|kXd7%t&}N^UCJQaR?kHG1i)HhW^`d4YAPACBH6ZU9 z*K5Js+m3y&K$xumOGMdA54`N2n=3CMSeBjI#l4Cz@6W6{K;Quv#`V-UXZkzz9Q%;j z{fQ~`m1ITl?l%BU7VIudd=ECH;aHWyX50)<>uH1)g}Nt;3hmR37;<~eE$55c|Jiz ztl(tD^%`(Gov8`0YyvcC!k%}CSV4N6@q9jEyJc*Jj)6PLkA}FRIp6Vs{UtHb5THJO zX#Z@-{oHb=o{P|tHU90Ri9T-lKp#_2kE-^G>`FJj_@!0`7a5?^i8jI?YIABw%1E#x*%;zrq@~d- z=s(&VsmxRCZyVN){Nqvzst`RZs;#@b2c%>mhy`5am}mPwesQCB&3Gl6Xmi2Wlh?jD zighUOiy>q-SAA_Q9-ikAJTzA>X(Dkd;MQJ=rCb;LCg3_Y&qM0x-c%~=aoQDqR+ zXxF7k5}Q4tK!`hu1Og(o-{&0&&-!aaGN4}k}|7zpv)^G?=;6P|V2?@3CV8`&sl|lcRhkG<4u=+%r8wve7fyaK* z{o$bN(dB*QD~|q1a}1By1AwuBB8lR1K4G2$w)G8pU$M;WeC+S5-c~vuMcY`obJoUb zII3Dxvu`{Ie`qlPMgK+VSc*e^fr1Wol&|0YBg5s0sYqLX7&93DTh9n&%?)9W(HqPO zsZZvRLj)+tej)xjEx2xo3h%`u{Ci{C-`DF8-do4JvGI9g2KeE)DACQ*oHpN-07N=Y zc%UrSGc$ej8|6Ol`KR}!oA>kW{rvNWadZ$}l2CfE6o5HJ@~2Zm60rWpoDnAXlmSL2 z6=>fJ)|J5r5yoO@ky!T3>oHisPo8{0wrt0?u~^-yN2=N!1*XLL`9Ulo@N7h+jP5kC zI57pzl@x#wt4Z%NXqrtwULvol6;0cKI|@2a=`dc?l*~c#pjZwW5CCm1=G|->Ex9>w zEIO_hJiD5fhDiI=ipjM(ek|JbIuBQ$k=Bc#)?P2d%QuJ3f~7vk`#Mv#8UzatI3Kw- znCUT!hK-?r?<#T3K=XBu*mJ#n+sqvx2aJo%*P`E3D`d1xojX!u8E};D$k?R+@Kf8` zv2ksk!E3d-)u|Ta8XK4NQBrms)5pirzVYQDghTVPQkcO(8(Tf@%_>mCs-YHS)!Ll% zpp*`6&X58rRa2s3HZw&pG`8!0hgMYU+-<;+^&_a2LAf+Ezh`#G=jB1B?-x{~Bk(ZV zp{l)@*LxpTJ5M!-Jb94j0Kh?$GYyg zSzRcE%3LWX#57sGm}QwNon;BqgHu~`?qF4gHJxNo6;fg#W%eLN0wKY(w{3l!!jqCn zInEC?uea8UZKX;Q{}%iu%~=Z)S_z2*M_!X9U>%wWTEG!gK%5czdtiJFj{7qX zBCO45j{b3uL)~#7>8kJRFV+2ts!$I!$7u*0YMz{GE0*m_rb@?tJN~8~d;nrfWtscW zwsTL5(Ij)bExEsbpxe{YdH!gH_u%8N`E~SfDrzM-qLrqg=QRH@2ZN8$e6F$A8TX9u z4L;0{{)oT(t`?Bv+NO_IJUu_*`RM|!73=K=fnuH))`O=Bm&+5<6!G@9;^pNHfBf+U zF(ypYjBU?&d40vp>kZep9koON0Vza*W}_tR84?6fPfrLT;Cj8HmhLRt2m!X)b@hefT{*vbN$@mS``w9qX831a|4M`g>oZM3XKa3>lKtykZ$b0K9Yah5RP zuxv){bIR}E|K9n@+8ya(E{4F)-MqFsm@);dhN{*Afi^C^OP~gK0DWE!j$W^$@S?(D zGgqIl-?ociJKRZV{NdWAiaj_CF`-)bZ`DfY4qsP)@^y1iv(X;ZF;MQAv~lG$y3*nN z-%*GO?X_9>n}iHNmlp@cw!Fy4Tw6)VMDlUTEe2D?QsC0{if^zF{yBQ~LUBoy6NSF&SM}t6ShW1X6Ymmwk320g zUk^SQHF_4WRA$ExL5TjfPx!7woZfBwz#;hf{(|2z`1oO(r2% zk+%&9g7fJNT`I0`*TXf5MZJ07@$yn|6{dyfDdBuN;Uo)UnsB~ca9da0)(vlOHwXeI zQ`2${Yyq&|u1G22a$fK{Cu|$}v_~!<4inV}+9fvcomZzG+|OC^c8@8$!)EE$_bZR* zTk+2Es{h>aHCh#~gQL?VMn8JbV0=V-cyUbo_aJ0=WBb_~G5JE@vjP$kqn$@xErc7Z z>$M*ZL*LNF&@B6fIdR7(VhF`MisH(WFD^i+JOU1rdT(=F| zx?%I?T~=G=Xr@SnN(7Jq%RFowyN5d$pEV>q`QD~t6>QV=LPYlky&+c&5h0mqfp1z? zJZDTId%e>�pt?vJ?c80Hag(X!e=@kRAfs4jDd#$D?uIt16_^^D@0)%Q-V13b06e?wn{HFop_4{R zngGEwCgQ%2tp^z#*Y`4=GN__>;q7s{-M`P!dvh+F4(ZN(ch-&4sJ>(v#7>cD{}dwX7X#r@Ah|N*$sAlJN-|bndD8*`&imR@v zWk8J6?=+R+Jx!wBuK}@L`$&svhrKVTK+QWIsboPa<~hK7f&{A%bJeD3#rL7Q#5Lzi zh^$wOqL`)#MQK!}Jr}&bt+;LMJG7S!s34{ZA`>JeKq3@@>T`R+uEo|PYesL|irb#C z<%*ISNR-{=$<2ws&rXX3qxN)ZdAHAfiyEOfvkh9Usjkmet0(Fc$6hB^Td4*lpuML7 zePF#*itLu2TCEf>I<{-p;kkMbBF}u<8V$#w(=WNIQ)@fJf$PTCre{RDsvj9I+U~AB zN^t$LPG0S2%XW*i5Zz|@#;k|yYJw!@KZMXT^;&yy&#~j2Y&A*Fkobxp)O&)3=I=G2 z()aKO*M(6%(0?z0SW{zC09lVwMVq?Ws0gO$;BTJ}eO~!Mj<4w6z>vKff8 zpSPc39Fu$B1j_yD!{DOAy~Bb+I6Fcx1qw5OH*?d1B|y*w&NZ-~_6<9h;&zZ!ge>m-L%daVa3y2B}?FYNcB z05INYqdK+M9G!Xp%vu#Cb5a7q3c*Jtg9guokbBh>e(HN?T=aq3)T8?z-bM`~cSMdi z_bBbm&%O&nRDn!{KS@9sP#K~Q#|1t*8QoF33*2mh`z?B~&}(FDoY0XCF@s6PF*HSy z6a}X#;c`0Rd|EJ1RF7&KlsTN&bw|vkY^>Xcx9iQ&Oj!>hipcMOF$Ok$+oIGitS7AJ$}%=)OCFoZoa9j0nDtKLNtR{UdnuB6}0|Ms&9cJE>3Jd}3n;9LDj+~1G3eSiP z2$|lkKc|(T&98rELNsMO^fd{7d(GJQif!9*T{pbEuGlkc z%K%W`Uuctx0;rV@kF`K;4p&v=JyRX8RBXAh0Nt4qwa+mJ!Ny>ZG%hO(SNoj;QUgM; zo-@KgOcl@oo96(O*$p*6L z7_|4`$LF#)?e&0yh2S0=bqAI4AKXxvX7$Kp~kiHz_?`xAMD{iKK4CY)5msh54w%2ItCk$_H2GKvmOGgCls-@ zPT_#!eNof{(|4DJ?y$GvI2J10y54RGA>gzuSVeF%r{;K(9HgSax@YM1hN_CoNe$&p$or1l z^@dN+pG|^}>Bo7VP*!T{uiJ|ADdPNe!gbw{^Ub)OI?Qj6-i|odqrvXT`0y}>h>}gC z(25-Y6%XGmKXSH3gjo-VK7?`QRKt!+WUsRatBgwdQ8y_4hIQS`gwtkH_c?1Pf-|t#b~$ z`Q!kk7Z?kvO%kX&G0zF7WoErZloaqN@cUf>vhHJ+YMj-xZ<|%*6|#u8f;^2@^wnZ=uGXxP!L?DwTJ~e0JPcV1pM`YVV13!ef4#8o5x`Sd%hbS`Di2_WH`QXSo%6pvOF17o}XoP07yWMv=_h=1I_dU~& zIVXQ?Zr5hiaBcwr(u-Vnm-k31aoOeL#gB0v-;IYz=gmX!&cVO!CSc$q3HM`qZ`VpL z7&A8^^em7+4b{eGHEtn|72l@YLj)sS^_z=PYM5Rn2f z&yjMuk$oOwjK2MHy@B>X>>31WnZ1Az1w~npwr&~M+lII6ign8Q&4Adp6kpk-Gqd%`1O&pBDoUIEO35>XQ0g9QPRYY%e;O zK;TMY5zDe*ndYAP=pO^1XWwL)cOFnS8wZ=SRXf$p0;MLitG2uOgED{f(2UO3iQ7Pj z*`fnOQ^J%YqSXkx?c+spPnuF!C-Y;Z5qt2F8+9AQm(j$=K<*yNzeD+n0lat4KLRVi zb=;rJMRS5HGMZ?T-=kEazgT^|hmhWVRB6BY@bFBK+eHr!{p%ZVhl8*lbhO{yzvlf1 zzZaay{=-0su zrE|)@0*b)YDgyRgkTaz~&rc`gwg#kWf@~XVsaV$)^EBhMEI6GOT(28ywizqFOnNjZ z?OOPEH-i34;e2HJc3c9eUr1(Dpp@QTXf~ zV-iwvk_A=EGGm@6#QQGA!rFo$0cmnM&iI}`4|li(d9PSkIyzroUh(qfinkkOy-T(L zOtm?A)x?bY)%LIPYHBNT74{)0>}kfpq-RK++HvfO(4Aaq_dtgv#-1T?dKrRmN=_9; zbA(4k(U9H{Q%t0Q>!c%d zHnAIZ#8rrT&4&jZ*0{~;O-A}=vjfJDZa$SNa^6u&@8RL+%!<~$?rsFeAKn2Y9yHeG z%()HoarFXUZ5;P800Qm<5Jkyeli123! zjPO7RZ2HK8VsiSY=$W|Qrz5tYkwAp+qqWkWX>@T2f1U&z0ASBMa;4Pn&DG~BIshZg z%oG+=Ay*(|yHcMU)ev+a8=$?ft1da1Lje$1yka-@*t2u$|9j+k9e*c=42jtY0kB)K zw!1ze!#l`dZ~F?L7^$n%G~qNaNGWxsfB0CZ@HJ77A%Ge|46m_*Q>W}0X;uE5n-xTNu88^0B^nKa*-Xo*rJ>Xv*ab;vB}aSpwA%aJcGz zd@XhrTrW=7)~t%+i?8-5_wr%DFIV` z5{Iy$C&+P6aHwEd7d=?;XZxJQoNyKefMLz!XaZ0zDBwJy|I2YLad__H4Fy6YMQ?aM z0$r!9@Ok*P1sR*MP3XgOm%zT7oS1l|6fvcQIZub;nt=O82-cefgd`^D;P*`d3!ej} zQdU=y&{R+gP@7cP5Np;KaT-yy zvFPW6y@RKywI{qxw1Q>*{`?vgDMh#;9;=Vm)Mo377)|MrC61U<#&y16nkI`s&Y!E6 z`1d3-98~7x(j4?_{rQXoZS?qft_qw3Wm6bb0#O0p%fa`5xn#Utu9)WuA_fLjj$KwX zY(&}TdGCcE<6Lz$!)j|!?TI_(%a5k+Bb)QYsc+dTJz42nEXH^RCqN9t_xWVUp0P0e z&<6(jR&xkODzcrSAKbXUvxg_uC!YN?!Q&`hwS& z7tGU)`~8l$_Q1hqhb@2+(==0#O9F1U8&nl3N6a}R#E5lUalha3^77JO%#;Zp|hIIb#^MemT&J8;vr(t`%u;Cm(EN}l+hx~Yq z3{9saV({UQ{@}T|)UKyr?~nHv(HBSs)cAZs=(Qm6$naHI(Fg)mQI!2C#~Q@ML{<(? zW!LnVaRT?)?0jVnJ#avv|7~clF5@xQ@LPT1^k7p!L3e?I22!;tHm#_-BPTXeIP49JmH0zdb*H8bEUMq4DkXk96l z+f)&X@#$DGmFsSU1Y+ZfmjWLmmBO$wtZ%!W;&rR;0>c&mF9{Z8i63b{!Kn z2Q&nZL6NKj}c4Mif6lI!(?mzOK1nH84P?gI4csC#?xQ7R;u_kV$jQv280#V-O`mids(jM1eSBl8+Y=r^f+6 zRZ57qQqgKb6qiIbgE{|0Sg#yM$NjA23>z|RnHuMUQc>lz6QfANA)&_|*t%MH!YvBm&dUG@6 z!$8r0?l>$kv4q!Og$D|zvt#ekd^Cdh)xiR5I}e7vYe*jbxoaQ59@h0x5CBMHjDgge zL?`2UNI-HTHB4a5=-S|-&R%f|iG5?$Xo(^j+JqiM+HCwM3*Q7)5``>kVDhet7?Q1L zq2Jk|ux34mwG35&h#2x|dkY~B&)yP+p{EOKV#zA(Pl8nxgBqCNyPemfhdoB5_q{+F zU>9Y{^O`eFNVg4I6djX-K$NXL$to`?GakB56Q-O85b9|@wqtxjtQs1lDa6?xkCk2wGm3-3HjVZ z8Y0&&yN~3h@mhDJoN&3!2!SrI)0FY~%NNvAal7448{_d1 zDf_psI|L2e`ant%DP0g#!jvQ4-rle*3$E8oZ@Rf$F2*t3ux&eTw;Miv{s}+-^aZ#3 z4a;Mv{GC*UkWh6Dp2w{2c-VjHkRyZqPn_kSe`r7#9{-(Vj~X z^iowp6)6}%K_4X6xWoj!8R-44^>$K%C6(*m!?Mz<7WFI&2*NJ>UC5>^U= zRWMprEDyR8?}h3yr4-asu$O9k7{ve-G#QfyVBJuPTGG<6ZWU#xN>*zJk{n}1nlcre zqV;`5LtYFm^Lv~x7b@kI9ktN~wV6wD(14&sqeBQ#iB!X}z$!>U&ZLyhmkBu~DjexK z$$t*w07H(%s$&7BPNOF0y<=HdJnjqDm3=TS0M(_PN=nMRR>pthr+oo@oj#5!;p`zKwKc!U* zI08~qB#nr@8puD*km~c;eA@h)0vKep8!IX?1x%BV&)6INo5CuUXLYDflfc0>hh1NY z0rNZ~PZK3Bt#Wfvw~l*s(=_!8kUVYB7CaT32cDZ0hTz*&xQ!I;Gp6+M1u zxCK^j%iy35dgOGsbgnHq2J_&du*D@O8U|YOLCIWeL|4DYW*`sOg{q2bUK+Ikd-2{C z;>HpqT4R#3mxisd+FC1v!p7_l4k#Wu+_9dzx#)W;(s0km1Mhj)p+cpWGqJZsrQvc( z{dZbrl_Ut=$a@nyv{P^VIX4wV1&t*`K+|AU+Vjj30aA#V?0Qp5m~%GOPp6r^4|*N_ zK0Se9oQ{cygCb1oPp9hp?UAEe@?pJ2M-|jmL#+obPYH8oB|4y??mKq>`R4s{=shRn zK@jhtfqD#e^fMj4KApPs-S>EaJ}0KeKLzYX2S!vmK9BX}DVP^l;`G#P&ao5a30RWn z-+#aEyzk39ALBFWV1{`0TzZT%{`s8{?hhTFbmbKA>E#vIX~ws&{|j0RUM>?ZbHY4D z6Oo~^?6+^<@b>nGJY{@(`GPo2`0eXgeEoLAZ@+!zeq@HOes{GNXaf=(0_jeQ;JW+V zXkw(Z`M&SA=>`UjmIkUh{*=9%oMM620@a3`6KZRCdAVR&*WL><&zZ7aIic(Y+g{iZ zyKZQ&4KFWOeERem`~Dls#wNWO64(KBZnzVCe^VKM?s%o=?**Z=_aEle{a<)ICm!S1 z|7a3&pbzgCQX;fInF5kT(y1&7X%NI{$f6<3PIqziNA-gRsWqzZcp~!QwGrirzQKGa z331a(FJpGJFi2l4js-&vp|zrDvh$hbqQRQOF;I7&l0A32MU0S?kR)J=*63#n5`Z-t z8{&#eg{8gjfR(yyEh}S4^u`GBz9$2>pO{>L9f|M+2i2)K=725ZLtp{oq!4Q23$9q3Qo2TZ@ z&ZDmMHtChL;T}`yN@W~dKr&;s5DzL@AIIvSYmu0m>~F2ucY-q8zTt6y^dKDqQ`s_K zfXGThujt_j9?qDGBdYeF4FD?87`Ox|VxW}jG$mH3VluFZ^^I2Ba6Ul7A?Mi}X@{W7 zdw00M0A)XX0lbO47_GOO)at3-4}d$tqc&25i<*1(ewbGJJL}-Z8DROSVf^_RsVc3( zdQ&2TRl7(bB+W5Ibl{)|wqxJ~R@RdX9Q4n4|GC}a-W$p7>s=uWmiWN~vJw1H2pYX~ zhI?mkm4g}q)0B~8K$;?^B$$(6ibUL9>beU-*ei(c7H&a#gBq1aPy?(#V>d%U-gnu7 zy;Uq*rPs$sm-E^bDzSTx_(922PZZH9(?PtSDh_D%k?5%&LHg-g0+zg(qTUGzJAO6@ zlkQMC=A9~}a)^YusiZd@Hw47S3RMd>x$KT^($~%Q1cVF;qg9+ZwmhEseui=f_6?mhWBmj zMXRNI1N}9JVp~_dTxWd#^a+=ou|4iUt(a3JcrmeJmZt=0!{f2wx36EZtvkN_^f!F| z@)^rg@Y~m4@$0YO@K`o@FDh@8e+7%(644nk)3OEE*Oy+Ucsw3hmInYf7!et>xu;@{ z=MBZBA8i4l_Vq4-^D{h7Y zGx1|U5tCJCBE3J3CRQ0jI)#_KF$AujIHP&v=BTuDjDr+~lsaz|4q;I6!U@s!t-B{} zHrThVU|CmFGh48&E4FKj9CudB7P%4LnI9ssbBTo6m>!GQxCBRZdR zg6a{N9Ml>)#u<*kKe!VFONzPz*?=GdO!AZW5Ls2>a?Qx5QWImuWtx!lU;>JU7}(d4 z+z?OQP!>QpV;pEU!rl@I6)^ez+Suoz3T(CE_E@m&73;QRD;4*3VFl`m7ING-di7y^ zH%DcF701(lpkpFpm25eTI->+c0j3maSQqTkI~jxfNzdFPfROQ|H{@wWP$Z!6x zL-klu{>Uj$Nk~75?F&6l9KkYQOXA;^IKBIw?<61t8rNg7F>d=K59zeV{2*WqN(3Uu zh?mO@Xv4ZZn1EcTPSqpGk#Ku^!+-q8|H8MgZ73;cV+bXnjk*vMc=TeGH zC`1HbZxiM@LsFt?+7=<7U!htsUnbN>^RwC%`}#n=TyTB4VwxkC%_e{XqM_?v@3p)O zm>ogjyCl^Gpx!t)$ok<%M4s?LB^U?B`>gbL-}OJ_cwfCZ0>u$^^rJ7%Ry`OS(f#NP z;nVl}1~CL@KE~f&2ae#NwgxRk4TAX~NMO~VFJi+S_TGzURjhZ{0sag<2Q2d+&;!PR zdYYUL;GL2XyZ(TKW&^;+FPj)c@T7>y24z<((P0a$qx0h>B0z0JU|`W~Q}x|EguuS0 z>jgOnVBeuTO*qL@*P${b-Lc!v$4af(%6|5Lzo3-LA)ma5p8Jr-_t>)=-)yZ1zlI1Y#289NEVa zNm0m=0RYDahD}SsHhK#R8vmRB5$u^7=h>c9i~*M`dE0&LjKfp|v=#&iaufs&V9XvN z(UYNb!0J^du1&@6-6|T1reaiE;yXOXmr=6}x$h0?#;uQr$TBgBSXNfG)_uq0v0-^^ zG=n(04IKq!jRS$#{90xxw^? zWB^sEf z)oN4TpK|~**fZ4BDup{fc@*)0)i@-=z6aVhGlgQ3!&?h!h2{uP>N0pG_1hU8&Oe zHk$HbwEbrXtBz%_P%RK-q-pL+ijVoA()+1SU{nam!S<5AC_9++J|-Ct#un%2K)ToH2g>pFLPneOi;oyl41) z|M_uGYptRd?oa-GiN0<=opu-}5{9ufd|!mr%i6g1{rvM!`1Eo`-4>t~!|ndSZ@+%Uy6*Uw|MoXbQ^xJFpzJH=mkEz;rH@yf*jwuBS|!En_4SGr0=dWi zB~~@GP?+UMs^rX#tEMiZ5)PH6*(F#l38YkteXjWnlwv_RC#XxC9wv8UQiObF zMcM1&`sD&|=skWH2nV-3(0SYgr-PE`jjw{31Sw4Xdf?}JWy-x9zJDAvE3rj21+v&v zQgZ;#NHJSdsy*m8v(@+LkAVU5!F(^Z7)5C#k5oU{tj1pxrNN$n@6{_kf`LzKn~Vp5 zwgL2pNUfw62BP61qC3}iTum?}XZ`{7KMB$OR1~U4OKHkpp1ojsthnD+Jnr;y*$N%8 z85jftM1e_G5GIXOrE+A|&W)j`ZAiM~FxOR;GU(R$7_?!|3Gd&O#M^B#n6SDKSj;cB*{&YOiFoNRf zfiN{bfg(j$gcI|@QAl7Se3>%?go%`@d72O-Iocrx^P{le3qIi#&dKfp77j(NC5w}fQMD2EO?U;At3@bhgz-JTScoC+w$o9S_q0{RP|}H zezuhQzNBLJ7Q_Pda01!JYX9jWd;kKTN7L~$4pRNzK_~i3QIni;nKHh7x^}fL3Q#P) z&uw%{KPXhB0Haeo#k~uU>$N!%aP&;_UiG{0`ScwAK?l!m0CQ;Xg75ELiwE@Q3dBF> z_$c}4bIB-Qy(<4_&WVpBF7dbsf=iz8dY$pFfBP9IJN8zPyl-rtxYp!|S~uKo58U2v z03!bF-~K=R`~UbKs7es>VzTx`Hgz)v#=WYxP1YBMx($GUd;^x6cC&fbkYps}P zqA5hJ65NrNCHW4Z47X)3FGXA~SA6>Pf_a*d6Ppn!o?o$U3vRbNe){QCPd=t8cfILl zSukHW{LA0|hOghg;oI$Q50U7Mm?k~5QN{NV^wzBS>$Ha;i1;H(Nc%6Jgb=;_&m|!v zJ?-y3M|=LY#z;D}ilCMOfTiEi12lyq{3-O{l*P-(*V_{a3<2|ygfLR|Bm`YaLOc4Z zhN@PpoQzYJ3u!G1sA>0WGM+Jm%dryUfGeBN)tcKV^G9zDE+XvD*wGrRC$$0lUVA0P z9b}sauOMPuTd{!JJQn;JnOq>z#@LZ|_C!3R;zMIMtg6uOxt%tS9~ngQTy`yB6MYVW zhGt1|xkQ2&mkB8)q!193Q#D=dy-}&g16nzPP=MpifpM=GtaV4xN@>xdwC3UXX>2?V zH9EP6L*;24Pxi~5zii1u$b7F;&fP=dNP3LI7U-cM-b`auyAVBaB|2ZLqAHc7?)Me9 z+XHWJD;`V1R;Z=ljM7Aoc0kkws6bLMqQ0OzgbxXUIudVVrsIx1C=i>til!AQ38p#X z`Z8g@L{j9;i^eO?l>RL~6Zm;vnd(VgLr9i*1issC+wtw&4UfkHr(e31@?I*oQm~f7 z9zt!HO^vC)3KscLTEn_-`1baOb=y#FeAZggP3iFQn*Gsu0BmgeyH--j5;&J@PyQR4 z2(+-m8wM(y+M3|iyLXY6qxEW37S4XiNDQ${cj4d=2v($+Xbxw-F;OUpk&__j(3P!H zO1Mmk)T*3J;36N@616@N@ya1T)TwwF=wP+EY_N84#wj$ee&xVY*_%~pfoYb z{t&jpwL_f^GG#M=4V?F`4yVCjrv!C){t@ztUMUn=6dsrgioJ7;dXm;v}$Q9%I+pm zULrj>$T0>(1~O`^xZm%_!&KxbJ;2!36}7ye+4oY)NQo7YZC~+r`-*@2^oq;%f={1b zalaK(4ytyuqYoc6&Vy)7LhzJ?_|F`_D;>rBFP(%)>&ig?=~Mm1i+}>85;aGx0H+v; zH6{m9G~s66%d$?Nkq1&Zfp?3psG`*!`_9JXlq0m!_lBMv)ueKdhQCssesBb~^xm=Y2%np#8y}@_ zFs(II8?#=KIE>GZ=4ifdB6*~asRhL>!RHhaLh5{8M~8neNu>&n1xhg@WkHG!dCL5g z6Q9!{uj}{7{~r)QzyiyFCILZ>u7QAkFId+NWvAK^D_rvQyXPxAAFRNHcF^kHZ8VBr z#j>c3V2M3PR+n6%O*;@X($fqMgt*s7r;d2F3b-#TZjTjjH~ziu6;&;55IeuCD*>cM z_5{-tpyM5$`O7l?)eo+R{9;qZe3@{)&bVCi(6b_a{p(eN%DyCcg~c4}y-G%UVuV6b z_6_^iaKEp(-|k)gN+wQ52JRLFRtAL4=!9zbuR%!5twh%E%ZmH`2y&|uDC7P4;O^*--Wv+!zp;Qp7w3(u*?H)dCuODW`-OnS=-jIJy00NDtzh3N1>T^B{68fh4gz{09r*u9pd~FEesVsAV^a_-a9MgZCQ31Gq}-fo3LoK8`y2U(Yl?2{i1>L(&ng zf-F|;JoOvcv~2hoK^Kt^s%B5a(&wIP7aI9poY01<|5y;goHHff1AF$?bv;zm5KcXO z^f~b*&nxxX6Yi>t`gB9b&u~tNfpRcrfY=|h12{Ki*r)s)|A<4ip{a>H7^&fXH_H3t zpHZE8KE9iL485;MQvBC_u0dlEAyxYG>kEGUyE3_8+kG;NRv_@%733PBR zE4jxnAtJ_%QZ{`3`n6wfGa5}w!IZIV(5Xo!?ga$Q8d4O@^@4etAwmeQpfO^3ey*L6=Vx@8RKJaZSr8x;>O$ZGUPR|`PK@#7BXM?`5syi*xWQE6E=EX$7jvSZs6x5tLZvSBR^MO{^h zoN?0rCMs%WSNA%a=zDc&Mm|2$C6QmQt}U8{TdUZnp*7x&y*7gW3?4_qSUjPELD3p4iu- zKo>S?pacD5-Eq6ESl5cZ&^fy)kfsY%1>1fo{yfW`GO=xatxz>{zTk5yU|$|+qDaC% z8#l-dzOvaK;Kq`OkwKhte3FJ7?cALFA~4#X2V4<~Y=tpaSc5$mSF9Q-Y(WAp*9)tx zww_!jQm#Vuv1sd|xRIS&m4$1k)2dkUV=^2qSXPLcl(H(jSMPn|v#-p!7;v^`>lAwf z+_{?q13O&(NWh9lH2mF@Sb{;)*9lMXwZjEW4%C35p0Fr`fB#ySvJTSK=q@>6N<>!c zzN4ABYD|e@7^MKZBNU$#Y!r7>$Ep7E6|7{m3J%kVdAi^_O}Ne()09{dQ^j8B z^+w?m>&3GKr0QW@TTKdDq1aoc3SFD58mNG>S--X;KFK-Lh_di}Vio@G7yofyb~+rx z8Py1(-o)%9=N!|-NFkEm{4Rsog!K~lvDJlj&d+cJqYj|#`D)K`!=Fu5zW4Jo1a`iU z`TBZzr2O&E8GDKxu(DBV)lr6Cb58jD`33*>uRmj77ie7}p_08cB}_SU&rcbT`yIc1 zU2uO?G-N8=NyI!~@a?wXu`Cd9o|aPacra1%(AbmFm|5LT^Axa80c9uRk#pw2ER`Mv zK$lF+dA1UyEvlw`;ObPhHfS9)G({^7YhI872M7z!6}Q`+bcV!DEvJY&Wi;JUYQfv> zE57{vH(ak5r2IhHP4ir0Z*Kl*Ap39`vsi7->(4z{Y{Sn3!;rE?-GAN@8p9xd=av87 z=uqJI&hhTsL-JurqsVjc{B(-(do~gOT>1fzWj?+%v6R63gO6<&0MX4R4H^%`o0G|? zD&XX1xGuw)#dyj?QmD2_i}cO>^vw?ruY&OYE$nVe$Qp8)WEolAzkp)W0;I^l+56%0 zvz`!{WGmcH!^1pQIrKEhc)`u+Jj?RHg*`A2R253WljTH?e@e#hH}~xz=FuY% zZT7@DXH9KEvpEpgpzf(vv)4(GUN+R}so68P_ z^aj^PV^RrhUiZ&cR(VRL8rfcGSnA0I%{J-p;ixsiC>g>rMiU3Mi{pYZ)DhHqknHP^ zQwBZ3bNt}k)4=%`qxaCob7CS#Z_Jz{vYDDzhwtIBfgV!80KG~!RzK9iACo8dx#a6d zh+JQ%l<8&TR4Xy6sU!UYThn;`T)SOL7e{sHsrh-FKj+FNpKD)Vx=p}yg%h0;c$_N? zOAnPDFL^==0jd?P2(I(o*QD0K{;WcPiA4;GFH{3y)$Xa9LSr#N<#3I+y<7rBNQ+A` zAw{6JoyL`RJ!nT2$DhlGNtq?dUNP(A&BwX@QN;<*brym;Dt$aEc!H}2NCp?Ycm^W> zBvAXKIOL<__mY(kzNx~c-T_alo*mf##8rw(_V?}Ss6I}Y886ovuh$vt?FJ}?ck&cz zR(Y8arvwDSec5rlZK##(gm2##)b>B|>C3e!yamAhu^g(D;%g+=K`4-N zq-5PzD)zdOz7i7@igj6umZ~5{mLF-X70q0#^bccKe(RJC4{J~>mSsVX5wD+KOp+HU zmVv*f7vz*7LeGd&6=fr@^vlnG!^_JR*Xtc`2HADvYRY6ypRI zPj2k+r|#gu81(X=A200cD4ZuCmP`;S9g`48*leR!bZ?k32?=s+H~=>tM+}=rw3Z++ ziB3y2AcCr9N+z(9WM4ixKAZLp(VGihN>QN*ky`Dp*Uz4U!*DP7rZ1i`juC-9v;N2g zB1He;i|>)Lk1vwX$Et)9f>k|2e~+D_wj7JLC85(qV??SuIpk;y1clEgtv73_CRtw&4clLbTS1jvJPIdL*%X(2H^guP|h zCFSK*4Yhl7wYHSZw;}+pq{ToWgw=IE@F70HQZ1<5&&Y6Ua?%SxD>ho{*3t=ZbQC zqIm^t+?*NASy`~J3yNyhZGk8ebS57eQ%(pFe$5O+okkRsfdKFXT92x@QX|D_Y{@Bm zV&<#?r+@b$B|~Ke)<@*kkPz*;6Xb9TYYfNl#9{CA%2@aHkfHi)=W&y`QmqHAss*6U zJTa^X#9}zg67xCl)up~$pGTe~n?)wP689V!N)M!4zfV_496e6N6r4f`n9M69nxf@1 z=q3^@9!WM4D^!SrlhUX3M5;y~QaW=tqrR4UtO*hX$r|%x@VS;a4^y&vK0IfOyuFvG zuQ~qwhH>a?-RatW*yyudtn}}3{|fLbGtYBWLWVRC$DU#db##||UaPi-q6M%7q?C$% z+p#QbSNeklgpe8cs88>q@1R)^O{1b8!CLkvh>eFBBEjZxe6H#-r`5oN|5HZ~;N@U0 z`0@Al!v2}h{?Re^d}y~PILuVfz{;OEM236QQxXzG!0XEuuh*H>+5HY@E?qAZjf1C* z5Ca~|hWld!glco8H7x6j-@g9;`1EBlfvE-CzJmfP3H#nqTY>ktk+ujL+15%K@p;au z4cOL#Wmz%h+{@Z21ykWTIOSBeVnDK6Pl%%^uIbLTl?#6}HJ=z0rhI`2u(XD4-*K_3 zi;5sakauomtu-u!nW)@TX^O58U2(Z!iS#J_-53t9(vE{2c%6 z<9+h+=Q8ep<}vn^c#~f@z_iVl@2(-V&=I2JPy`%`LHr6gufEv!pSMK!dx4?)#_h|p zZ+^k#aywXN2#t>m`}yYbxR!(rlURgEEjl_}p@}G03;}AYS20l55UnDVFnEa^*VxUQ z%m>6(NBp_O;IgfTx>@f=2z@&|P%jTW0)v6RsjJUL!0S#w8bf`l@@H)ccr;p;sG^nq z=(7<|K!Qodjsb9lRcH`h9GtM8X4?5e;&xqF=k(hRJs% zeOytlG?RlLoT@c8o*HM|x#NQ&TL0sTiH|S!M$99qu7wTr%eG-HJHEZ$v1~h*ZO6LT z13kBDQZN}%Pc&0x)A5udrWBDfRm((J6@mqHVR&gFT7@iyPTO+N2mU@Q9WU1lt1B_k z-zk9OxF^D-U{LgcUuWXz{udUwSw&&rSKRLpbEP*Gucke5e%7yV)u|~C6b&QN9^hUo z9*-5vvSHf`)~%s|)jer~UD<>lL+bqt9{lxz@9z?xD0`p;&LdGw(?s{`oRA}1!gHc~ zIKehjv)oitgffz@a0V#KzT5lCwr55paW{Dmabef*DYMNV@( z)Ux`ygvpH;Kt_PaWnR6>*!w%2YTRY$?f05JEQQc}sIpSx@0Qof3eGeqNQ!=ifC76h zPb-S{{lMP?1c)~NPyK4~pgH<|eRLc~qE0vFXEk+4ZiPxjMa5o~?(F6v)n&PQN-^aPz(*yq*u<2$l3>VS&)of$BJW=dVYXN}h();w;F z=fV0uxOK5&9axdCrFIinO15hcd^?VX1Hf^!J0_|2T!S8S8sIxLaUAYXUNLgY9n(}zarlHGsy>^!BPF_;289{?muyi)rSL&QKZ0q z;Pa#vAXsVwM0q9iQR*+&4gq$IWM4R63xXu?g4%*HT%}}hALdAp_fXK$I7zElXv=xZ5 z{8km6JIDt>H)4P)5;0E5iHckMUM#5L;AP`oKrK6N-@f5*|MK7P`f|aqIasBo0Lh(F zr7N|3G)Bh3T)p%LKpasCp-W19R0SE65Px?+8c4r8c`OJ&ed(#3HUL`3fW&!1d~~gU z_V9E3)f0XU(qW4XjiC5>+%6nYw?84DK7p5oJ!-1 zOew5Ek;l*|kqTalp=yJ&ZY+9ufiU-YpsK;!2u1r`G-&#Yo z@_S0L2hJkqRUqaHoM~2+vZ3w(YTdE#!AvTT3X`ATX=v4hA^UrCN98(}pV1T1kIK;@ z1d~1Wdu34Sw3f^Dg3Dz_&I#OTy@~<`ih&6d11uoGROf;Ma?o!=pp-bhC)&|BW($64 zign#^zb|-fMDj~jqa`>sQ6NbS%_ZteWiI-2Tyi7;WKyu6w6p2o=0-H6)UG{5K}x`E z29TF|Vk4^ckW4uqiey$W^D3blU{nfC|8|oDrsS{n6a{XOs)||^szg#Bj}^pi>xeRFrxJ|@zYOrzMma}dYZm!?O3lXLDs&4V!UPAQ*m<2lG4r@4SVbbD=( zo&8A)bh%?sKqN$zRhX@@YY<}qg20Tg#6&H)`W}ExEscTztszQ8P`<~8W-6!j zbJ_QuC^bMyZX7gU)qvdvQrZXx@!77+ihaHy3wxXp(CKdDLpp#{VUN^+n|_p_`S0|< zhx5pY6dsQe0fD_rPjQ*R_UGqk|G~lJ=~%a)rFwndo=*Np4jupbbh97CDCElwem=ljJYa{qS*_0Ik-n2CUnL5F)PE zD{3j-q)S_+nT??_MV%JH0H{@%`c=j>Uoc-Th-o(Tna%CnTCpsfHN}!sNXfWNuJ^K_ zm4azX`1E?gvhHYmW5P28iysYUKSBuBBxr%!bHHK&;Pc_Z+^G`Mj-co7I)+L3yFdC- z<>Rl+6@D)x|4*LYCs0)-W+}rYgKsW|fJH-3f_8+rFF^K3JutTGZY}i2$B&db{BMQ% zd|0r?q=VFi1_t%MQJ>tZL!c8ejFtmODh~)R1VKPTGXmXP-G^FW8bd=iR5(~5%*{K}bvY+(ZxtX+r|?-aEnT zmcB0rLyMl@KUK!Gd!#@WF+`&^vHB4rJzJ`g@=5myaM{fe$>kSA-BTttCx)ZSqi)t! z=oeBeU6Wf=EL&j%^?kvzZP@k=9$usTok~Uj6uzvOj7F-S zT*D+;1EwkB<>lHd9nr>sdy9~YhpsITLpWPwpjmc$loUg=VtgO0`g5i-83VgkiJW$d z;P9sh3QCT>?ljp{HYLG_Vy%?jT^3TgcB9cKd;)O4u|>g}*=z4vL7xjYu188r4w!Pp zWlCmJ+xWdmx>^eC!HBR#W%4RWzehhP^cG zwHoD0@mRJYu@HphMvMYjA{YymGNtUsgg{5mq1LaUOmbaLJ%-3;)RZXO8={Rtr$PE! z>tNJ5*=nsB)vp*$OHiy|sr!aN@mLF%t#lJh_ewG+W~6M=$1x1e{d{?zr23vD5TndC zqbTkut>J)M(f2q9h@RIb{_RQfQL!V%4qOuikVqA)Y>mK7lrjYLz&TZ28qgB3$7aF+ z6x3jWNleg!bko|X`jv7*7E?_VKtMmA3V~FwZu1>OxnHbCyPsn~3k0{uSi(VJ30@JQ z`+W1EZLhO^j2I6IyD?lGTtlB0=%+HU?IRHC+#~n<$8#bwyf8ecGGnv&_fIyS;_3IF zk|}=eeNy7@*4NAqUi{7n`*}{4R@lB_$wvwSFV`zRyb)$J@t#?!^w%w$Bzu|KIgr9!;gs*S+j%bQHEha#RCd220NOeG~J|W3V zbhqL@kUtnG&q+uOp}SB1-ATyPanP#zryiS(92?JR(Ec?a{SQC7f`yne4+IEGj$=?r zRdy?PIsy70m-7oowS$*}eS>NO)j(dawvIr3bR1OzQUN^(j?i-@#Dd+i*^!JT(w4kh z;^d}dp`_z+?T*I<#qhoqg-T1{5RFz@sqJX$OlPVxLxKG1Q~? zFC3L40Hz`n`h9C_4nMy`k~|snIno-bEhIH<$je@^Z#&j?!@BO+c0=xM?Phbd ztp%m@#N7oHoZC%Mi_QG=lt?+t5%bKZ{*;1U1CzZNsaTgIWn^=Vm}drlAqC7AzJL0_ z9CW836rj++c@{&U`DHthOQ%uwIR$`9X9NhmqA`x;L&3^i#DVVJ195uQupTG_+g6Rr z*r=v9_98e~aYnc)LgAFV`(wdgt5pfCw+Y_dZeuUnXR8Yb6uT0$Ppk#dqc2Lm|0{U4 zj}GoJMPno!s&0<*sy?B|C&i*e0H;sO@Pw0tmGiqXgwtY?d!C}fd9#r~x2$4xXU8hMwOEF-cCd@NEltcoeP-?if z()~HSVMFu?`fDS2kmF>uPE!&r8>o^M$b6AF1o+%zVkNDy8N;Boqq?i0AV{4iAmfOk zRJo6Xm6RBOm=wtqR|9`V*Z@9a`u~*$>5azM-}P(MH$Z%tfb`if!9*xm>KYOi9L+6SVBO-*3qC1=s5ZFE1DD z+a0ag=A}awM2?sCd<3Dg!$INT>l~Ah?dK?gpBXJ%e6S-guV?kkd=fWR;pLtTTpXYRxrS^=5ROOk{}jH z9@lBxE}XU!0~L5+2xU(~PQ&i~qU6n}r?K^{m@<4%g~8OAglRZ0h^o)VE`iP~imz4xQFfF|s5**5PX@g_Sxnz6mPJ_O|H?D^s}6@R^~n|gCU=>=uq z5hOx%?>`t*pq{xIB65s0lvXIIZ1kmQ9Q2fvaoy3)Qgfb2U$PC|KW}*$?|r8z!M5+% zH_}z3M9hht@(`N!$Qa@)loU@!Rbn95j`?nLCVEifiaoJQf5? zJW@)SjGmTbz&uZ^^jTj`@FrkWbE-{HAxI&h?Nqy~wpK_RgK0Hjfx#x}q5L}OoIYp_xSr$%>mds<8A8sRW-^KXtav{S&l0J?0i<{(h5jpCH=K^- z3V2Ng`=;1!T&r3jWF9N5Fe^l?G2Win*k7kW7uNA(H%h1{OZ}>St9}<2=cA5Mt00>RlWbc7(2zCae**vj|NrE@*_Px;k}M`_3jp_s$n1qP z_l@`eKa7Wbxy|mbif{*DX37UqH8b~!%F6EPA!iQJlarOPzyVv-bP*9vzM%sI5_w3! zyk2m-7vzN)_wy|H_;|ojSE+>8unQ1 z&?)eC^8t!&UC@-J_}lG{yj<}1dd2OwqqV})kEf&i*#5>8d8NeV^282@;Mpj#w9?mE zbaz^VkWV09D)z-OWte_{F(~Fza(HkOM^MzWWcV!p`!f;t!%yglv-+2Q`W~L*d4%{p z8r7#?vcsfv43t4cG{9&G3Rsh`7}<==5I+EvYBIqaa7nKks1kB=oJP#D%$$!`lZdl5 zt1h^cY=^A4V2$=nATwB|B%@wVrZEm-NCpyBC_W7RZ?)h;3??BCQ)x=QYau_Xnbv#g zb2^MpA2tzlKCOH`RfXoPX^)qm=h$bX>6o8~U~8J9=c=1XhbDGTl{p*!Q*7!f<`j!% zFbQW_%(+XMUkhESDVSsEJx+Mox8{eLEB;uSJ}7dSxe$apO>al;iTENDK!d62pu;W* z>7@KCgc|DWy5f4hVq4c<{a{ymDt1Wl-X5Xbq3W5!B*-QuNbEUfo-NkwJRk;Abn>Z> z&--LVNMdjnVViRh%zm;9-RN>&ty?mkTJ78duf~wrYh01avHVfe zMmrCRPR?1X-ItCP+=lA}i*%>m0+~3G?sJM*Lc}%&AWqI-63OKXK$~+AD0RGM1|aM# z&igO`P*z-^OeRdg8;uEC=?s^qX4gk@z#($tRZJTrh>!=XAo%1u9)rw}-TH~cPles<_c$}pH63tcx+ODVbjqsytCT2KI-L^?XOq?^Z zs#T1Oy^DRVzl*(>1(g1`;8~jYi!)4zf*@SE%@d3kV4aoYEe5SxOv%$R>5~PvIk6;> zC#;|kgnG4o;#I)s)jZq*n|Ba)=D~{CIh3s079$~i*@BNXr`|j;1VAfAG|GA6_Hjp$ z1ea+qrA!SyLA9^Ov$D8Ih$yal!MY?1VormYirISOvuJS&`u?Y{{GJ>1 z#YaDX={ZdP>LWbmJ$$;!=Tk9=syi3fly$0pd>wxL$ z>*Zqpd_dWE+&*r&y^|OI?b|DrYPCZDT5wj~<0!m{1Xu&C2ye09h< zj+=&?h9OMOs{}Qo^JhF^zlZvvA&5r89LhPiOdfg?$fq^pn{fQhly!Nqu(lg@EZhD& zzY{z>CDO*{{v-F$`FGqIC$W1-TYYh3b7K2@saGjrR!q7MlV^eWYaBzu85GilYFU0h!CvrW~+sE4C$JSu>W!x;7`PUWJ5udesKz4Di`Q zh=@7c+@^W)AXxCREQ>h`Iz6U7{5?IsW$ue{R5DUjwA_%ip@|~rY%o%GYWTkQY9imSX%Ku7^^XeM*TOoOMaKY#AxXZhM=W6tLmtq0Fv@WEDIMDwv%j zPbFBu6@q2I`PH5*QYFvNv*`|}^RZKRPOV}Q#6M$5{EofEKXc!aT0P6o9tLV2jP{Id zouXsQut%|gfl6OF1#BVUnjvMHuODZe^BGlj*5lixrS6oWvMKw|_>bcHULJhnK&erFnm*|w@z zBIHvA&TXMz4YAAVE6g8w64n?Ai~yJt6d|eO6dD0Nqd>KNLxOqfx8pc9dtd&1uRNjW z-8{u*cjhyxHmeL-bVT*>&uGUewfUom_o?NMEoej zHgL!e_h=pWErK7aV*S?r+!64D;K4z8%=XXkxBu@i`5iE%|Cn3nFZN5V z-g%nejske28t?Q=Rx5u#gox|qg2PmOj(x{r4KM>R0vdRhjT|6o>s1MZPy-T)TU8h& z4E-S)5Do5wl~l$%Va}lO>#a63CRBQ0(?vqba?Wt&HYet}5fQ&*&yx1AXBaIRP_}V^ zel^%n|NO#8%}^^h$|E4rFMqdIYo^#ML)ulZoG2X)HWplW*Qx|PcPj!mG7U&+p(0U; z7U1z=x|V!SJz#st{^(;aNIe@KVd_;d2c4Xb*4Pkn^hV(G#~2V4P;F_A zpPvDa{ZjYZ1HAtSD<@_<)FuqUsz*Z(xwQ&)`Pjx|oEU!+{`;DOi8e$Af394$=739z zxaK^9G60YU?_t4;2s>xz#2!~`UW|LEW7~Ha#BeTE1Hb0)8hj5=fs43o?xX~Uv9BRA z?Qt3uilumPH$)lyvm9zlm9Mc+nKU8K&Byxj0JfeUp4Dy)5&^H}FbgXfJcka~nk)ky z*ArfT@_-ZWSeng4G|rttdB}+wYrOyK$+r3xrUZ!p*yAPB-@ziIfDsP3Qr**UiK_(q zJ;P+{Og*fze?bqbjnLnE;LLey?ksu`mK=NVk%WDsVj_pe ziQ4B$g}K(MRYhV?mPLtZ0SJwP8B!GrgNK7b=y6uWeFynuUgb9@Us>f96VEt~g7Z|= zN?0L#rx<))g1N&$X~imLtfuCe9+IRXC?bHQf*Kjf$J)?>^EPQG+l)9-u6?CS(HC?5 zU*~#Wm%n%QKwy3;4?d=f=X}l3tAEKQTs7;GaamVD8_sA6QK(j{OeAb; z#(j0oY&HaI23d$kl0x8z)KZZaW{~78eb9zY8IGy|zdi< zu&^=V_OjwnfBG}tKMw5sI|o&w1wWpB8^Ob8Nj2#uV`f2_1>sp^hrtwte7?R~4E!49 zMn4B!&)4Sw;q(awwCyxf@y;>tq4UVa>CJCEY4)#c=4zeu=s^)KnqH(w!SIGiU6dQ-cu92Sh& z?O8!o^dQN2E3GtW(jJ76=M#D;HS?_yz|0=-Lqlu@7I<%jn1Drj&?ypORMw5!<%`|f z6G_tn;5`w(-uvaGx=$^2a36|SMA&p`+mW*bUhSD!OK4pXYCDC&WOJV0_K zP###TqMi*^A9V(s!LK~nhu4C70G4o+(|8va_$vB78+-Tbv(LZ&?CAaToXh#%uF5uo z#plmprEU&BV2Ovv;A22z7zC?YgG!)hxp`JJlO|8NZW|%1ncc#pB^DNw|MetkD;4E9 zu-|TAFOq~UuiS@?72ok{w*L&eV?YCD&uhbR9#AyG3az1RY9^6e-LI?0w*rbr6bKV0 z0K6S9?@2YRqns3oSeqpcT>;6txsy$f;L@g8La>S zF5vQezc>BkhrtKX^Bwh34i*IGafi*dPWLu!S=i!=6eqFFD$Iq8y2goQHKKxqdOEPLSxjUR%`nz3#b z_v1i}Y$QV1)yWLmvXac{@BtO9c|%)RYS-E<>o+p303arFx!>>j$l3JxGqeTNs<_>5 zczJ!{0~?l?(s>8|{`bG(fBx_P6R)pVynlQ^&pyOIH9pfLaua};5GiI|K)+#sPr znxEc}5y;4v%%3zl&HjboTJt~p-gM1ttU=NB-Wxgx<>9sgy;2YbSYqhO1Fw4cGY?ou zMKG}dz!jDna#bJ>fd%brUb<6En9sAo^5>)iIWN+m-yQf6#e$@Ov?lAdP|<4W{V+Vs zKQg_Ku$Hk7)PBZgeH+WppktjB13ntrd``v5s6Isfa&cyM)J{o_n*!z@PdiBEz zh!1N;#Bk(ZH4DfoV_l_p-Tq=l^8cD;@;&RXmV8jl?y3S>foAZz=)GrxB_;%GrhM+2 z1nVN$mVj;H&*YSFxvqr2Z0uD7DJGu4Jxb`9H|nd{cg1Nqeg)`oh(1hatEM#OJR)&L zGYjmk15$rm3p@ywB56EW@}nA$D8$$W1fEsMJmU`UnCyG3HB|L69D*8Sa)LqU2+<;V z!K((eNB(aVTs;w<@f|$Q! z;B8ef1v#%6usMoB0Q*_7?-eO1mTZ;dUiG2f3XxL#u~FePD8m`4Vu|O^u9jEw+>TjX z@~^4|{6p7Q{@AB=`8!Yc9C-WJ=JVy*FZ>Y|fWQAc4~~)019Uy}qF~#ry38_uT@|vy31LfN43Qs0R@7WMV}_fVN#rsFd20ZLYUF=!}oz4-Y?5cO%)cNk-kZ! zmrrd8JUM!9?59QYMYZLtXnDR;jBvGz4yTzR9RJd)ibDi|uQt(F&)#7OK92Uz?u@x| z0zehsC6Ro$qj--J~SPK+Xd8Y7yJbV*!nFz8lAP^#&4D4CI}&sKUynogD&+3G+z3vvoT49GPz`x64zm~aRW z1*z_Gk84>Ld$-YmQb7fm+SIB<@-P;`q-w|4Spt<1dM;_6tFb06aGnlhMeW#o-kaI2 z93xWB9d4CFz>=8pZPx!JF3F7PaD3+EP!Pd>7HHVfq+tsSqDs$-dr(ko#mC17K0ba$ z2mx3l14@7f zOH86djQA`v&dQF&nf6P_sl(5_Z^jHsQ$GGfQ08OaWtq*kJ#<$c+U?F}LD?2e)+$Broc{<<3O-Xw(2dg6A!p;k6| zTNWlZ$m{UNr;1}gakk2ak&SS-!CGX{I{+P(E{+^1wIIcWbzO1a?{>D*z?`k&RLaw< ziF?$dI8So3v=yvtL@5=&e(X5*3jF!cXex+-YsU9Dz{JcFBE<|OLgI-~BXF{c3T0IV z#;J8Kt@MNv&xB@}!LN8hPUjKThwuNZ{2+NcZ=Vw`&lHf)&p1fup5T7ICxCwLXisWC zKf~}#MdYE9974n=NW3NWYAv{2ulUm+zGF=h`^N_a4VP^}jEd{F;pO!eF#yLd_&@&7 z{|oo-&nNuy{(-;#?eF;Y*AJ8eKoUalSH&|R#)xlkFGMWe?+7CJ{{0)y^Th3T5;SOj z7tpZZKTwW4(ugvR^kVXN637SHF3cE}qDV1tVK!pEswzSo(T%zjApubo)nFVdAW<*| zEZd6fWyO*gi{R0nRfrfC@xXRjOdSAN*NnPTqx>&_`3v5@f5*$~1^1om5Y00X7CZ(2 z%pWeQFd408o7X4v>+DD`4L|+6$G48H?7#XL`Euz0O(*5qLX-2#<<1P~hB8-0ydsCwmOJZhq`lnrOjA97zbZLk|?s z=(u|QJie-ml-D6=Yj%qc%&3qQ?==KMiRj=(c0+Wks>)P!T4JxRg_GHFa#q?N5Q(Ty)j@)z_tLFHRAfR z;_c;vKVGl+cD=Ai&62HIrUxFRc={PIHppN+h-K|v%AG1nO3cA@;8Vt$@6Z6wy(KEz zlillkFnVD6xKGp^9G!j^07j5IoM4B?aZbe4fyZ;zT6>bBLy$ZF$O!J;gB!_6CXWaw z&B!cpM1$7!wHgfCXKT)$xdCX6oE$BJ6kr{Z9Rm4tN7fvCcxB+>YZ$inIf(Gf`Ao^k z-E`d4dYj;(Lh0{5stA7X-+xx}wwSDm0a%SwL_X~b)xssC6>5LS=!A36n9>%V$JzI3 zmp1*0hj^%t`M!S0Wzwc6Q~WIT=&(^Wk$_YivIVhK1x*DFf}Bp26KG0Nh|)m2SrqTL zJ4Apb3442)f=%aPDhT2-VU6)V87C^CSXLn$5c&SlKOT4C^Zd8Cz6d`2a-VLlICB1e zJn!8Oi+tf*at)-Eu`UY&1g#Vj5v4(EA;c;P)oE4m_rL!Q_xs)Yt10eh!G2U6rQvMA zev;QgJ7_Sw#OA)|Nl}B+8gh)d@4F?W3hU@D_ zykHDSNg)Sc;{E*{Z{Oc=xvW^%JMMRG0-NFP^M18q=PHO#oEtDe(NuV_@P|LSd9nM_ z^6>Yx!ailRzgRNA^Znny=HTN$3$7*RY)kl$WfFXI0C+e&0|?!4r5OfrmD#U=aYD@u z_AdF@dcfPvprsVBEeZQBC=$>xRE|Ui0S!^S1GP!-dbLjyGsnW7AZM*CIZsYn&<36Q zYw#ei7F@fdWCw<+-FWq81dJY7IN|hpT(GXVZR_bmvY!xZ=gLCz}#V!@o^OdR)pXO~^? z8W1#3xERxa%sFr!>T~mh^7HC7Br_z-8oj0O!~ShLgKz&Qoze~!Q1PG z@82$Xd)e@IS#e!FPzFM1@C2qiE^3*=p8bZC%Y83_9Ovi`zKdtf&XQMoPj~{;161d& zJnq9lS@L;r^z&tQz|gmKYbI;2v+^Q(KmmhMwQ27+1+*IE)aJ07jBaVJ&h%(2ny^X| zQiNzku)aCYLF2l!Got^lcyXBPKr!HDEyC)|&{lg8g6?>a-_;i2tgtS(+=GkJ{kuDC zQx#*3?XYjb_)O7+96UMY_ZkHMVHY|CrJfhTV$#~=l#tt0jsxd@K*bVQo-F;Hz>T)7O#$g+0D@M5AlNP&wrxSqM6kfd zu++u`TxBp&Rlqk3I7&n*;JZNTkz08O#3R*6i5C&723pA{n)O?1T&t&kRQ zYBBLLdq+F-9hD;jt~(tN-TCq=h(LPZng5Cx-3@uN8@xvK!_S(Zdj~(aL;TELE@s!3 zm5pgb1nODLzvtT8mIceQv0`5;e!YF5G$7{%A_)QtPUU4qu8PtkV}<7XTge4i;C3A3 z2bO}ABKG58VZa(OlaqL&?so(zuGftf5GN4z%&}^3p1kFZZ`T+6^2?1uMdZ*BJKb=W z!t7|yP&Fe3!f1j!Zy{jK3oe%pB7(gX9QPY)smLkf8a6CTMhFSD-EkZT32>Delav!Y zqg-pl{o?~EFL=3LaO@55@4IE9XF|GVDlJR(%p3X~gfyTj9a?iFp5apS0`f2P|5gC) zCjyXv!RDBQk7uIKzvA)&t{*lux{CS_J-P=D7CuY|M=;!SGRHQGku`twJ|l|^3=(zI zfMA(gD%iNwsgR0(cm;_DB_cN<*!u*-@T&+)&eq(wqSOejBx$n$1<(*sg;~N7JeakE ziWZ!(F%ddDA}HmAXvG$p>V2a*w68PGAq0L9H3bZfO{{}<|eVud}Vzuhm>?8+tn2C?S?`PV9 z;gA@3w{w+dNYZ#IOY#Wt+`z9k1xjTJ-V!&XvVfkkhB>U#8mn4elG-?V)BbRJ^20d! z!wY*kH3YO6QDa3=nILC&9ynUyIoO}ZRNclZ19&I2H?=32v;BZ|3?JecA!T&+wOTML z8akduyt?Xvr}w{T_8h-wXgMB^abI)ln0i%ql2-4*QYy3+g=juhbBArNjy=$laDTn7izgT2~X&tr{s z?wlR%ww#m4gx*{Qd0tNcWiF9V{^Wc$c=fJ5s)PN;xK{mc^&(X{V2%mip%v4`+nl%ey8Yx*{~23a)@?j7jil5Y}@yp z&jtm8;4G}Z<`AvdTI)?b->w_}`0WMB@!}(6Mo3SP#D|7LbHLkn!C!ytf6NX_f;+0Fi9QO-Z{)iP`ohvKVf`cYeg|B#2D5qz%iJpDX)XnUdxLS-u2 zL6?$^-O%w*?T2*?(qVNT;HfrQFIhXlPRDwJ6DV(Mjo0G!bk|MrlT=3L?^GSslJ`qmkJ1_50A4l5N{P#fFg<()FdmXpfb<_vxe7w{mrl^?>*yGQJ-61 zdkr(JQ2Tq2>87*9Og#1K*$9wMZU|zPBlM<4B>!?&eEo&wm8{U$5#W1is^bJ!OPgw+ zBX9Dl5_>mt!WvS)kqGyBm&2`%RoCpnP$aKHf&D0KNErlc&U63_V_oI^B8$cVhwV^4 z!!BB7bG&Nytv1iTx~w?gui2Wd9U|fmpN)e#VpSEjl3rfJb6u>D3En64{qQdoTr6q!_VXE;tT$N%nDZg0umUkTC)j?|DHe7`rJa>*fu! z1*a(*=5^^^yVrHa^>U>Wiv(*{Ms88Az;Tje7-NK-gtJLxCv$8WT28bBP*miUux$}9 zFABOpHpe(X`iDhq;af@2W~(=H@)KIqpThT0D!~} zg=`uukT;G?9gdko%oYO@lPSen#S?z+SfzA=a6RvB@#NQp>z|=q{PUyIgLbsQk^){X z>~XLxB=jC`136NZAzQH2T&~mtt=g2`=o^q$Hf>Y$anLwlM{OutQPlXe zrYu@c1}_f%g_0+ig5d4#ihuvpcYJ?+MF@i1u|tJ@+k#Z2Xz+W$1T(ncB$$+aP)G|v zayaP8giuqQ+R)C16BQ={*2Q@?7JwMUT7lkYB?O{hQcArKQE?kn-Cyvtz^+;`BLh6K zKp_Mn*I35B`JHqyMs>>*6duX}4%4MVDl7?M_niCDJiBmnfUESj0s?}cvC8$U&58Zy zn=_M=q+L9x-jff-xaK9h; z%U^%NUKH2s6-XKT#~tq5to7H+1s`{oSX8JFEh5aQsT!f^MuhwC+Lea3S3qno^IU_C$QcI|<;n;83_Z{1^@L(2pFs%&3aPB+md7+}onsG}9 z%4x>WGeA0=TUx0vOo#Ih`WNh{g(SfEWZC=S3^S(FuRrD?{sY%^;K1)T2Q|}k{O~qU z_k291e!Mlk?AbSx8CX~r;`xJg`U8ySa~zd%V0vk>(>)B2JVOsuYtLfzvGwdvpwX$y z3P@-LwK8QEL+%wChst;+B~*n5!b;FM@2n`8V<;7pq6JujmVjI}9v}@iKf-L3XJi#X7VrE`<670Egmu6T{jGnr zD!ke^BFd*pv|zC}LlbhjLiDcZ$^IB@E{+Vc;VydM`pF{#fM`;cpo3J~;1Tdwq&p%* zCNuPAW*kL29U(@n)?;I-8BSpLL$|3l)3QKL%W(W%YJAQn+Cy>87|#4T3!rM9hh5=9 zY;~KCdQ;Agcbax_5AXZo7;Au)#OKVd1Wy8u@g)hJGQ7?Ki3Wf10OYsoh8`&Ju+t7} z@r~{KOFRo|3Q#@6s*~`8T4p~b0Q|bBtD;4#pa)j1Ei>oICK2*9LL>xtU6)P()1<|u@J$4fkay!9=+T|N7&AWS zbS-`0-Y7b06Zfj0&0jI zJ<(Lq8u_E2)~pFq6tlU_0uT258o@`0k^0}30iJ_zXzNhKIq>+3uQn27Hd7P9Up>hD z!9~szux%@@FIVKqBvYvcr!{95RiHN1(y*k6oEO9pq1J=Pd#U;OHcueb00@1vvv`G) zndIYaIIU7&%5dJDx93WdDFH8A#vi}E;PtX$&4dV_f#Mx%nQXCliH1fXztr9lbK;6g zs1^cR3_y@6C@O?*HJI%6ynYqXfF^`)jrEJ(11_2Jnyc>l-V}ojjX5*afUgNSJR04H z`u*VJtLt&H#TL6@D02#)-s$)5;dc<;$AfvVm$%6lRsDRD)22+&Eep^)WEdEVclPlD z#gZak))iaI&{`pFD1t6Iv3YPJSL4^)9lzf1*ev91Xz=gb!A&_02_A!%@VK)qbAc)cH}^~Mi2 zr3?>1Zuh$#kP(;df)JxA$B@8#8aB2X+#=LK9QQzMr_S2QC{+ zL@6hvlbF)8k*I!MGOpJPK5ne2v}Wo_=1BfCp+G+XfCqzTE2Q7EJxzKi_0G=dKk)=Q z-M4@3CjyRt)%8G7e!9GCvab~{1NKUfJZ}5hnbAsVNj4fXXR@jFDFCxCqBx&Pgx#?m zN)mid^q|I7GI}<8Iy6)*+wM>qRYdjjx}B^7pb`>A2b{X?#PzeP<h` zHx6x|muao`EZ}hd{dY*{R|6oBPkZxx8Ar;9)6Z_!BB5{ zB**iLaY)|6gZ_v8G4jW?vT{)@TjV9~7#`tEBcnVrpAVlqIbpNo=OC0`^=N8pH1-6Y zgzSgRsQjopoX_9aoCb%s1y<^kS3@Fxw*oQ^0Ux`sRe2iA=L3!kPJ|J1J&6BzpBp(k8f@IgxWiP&nq_Ar%h`(EjGb!)mt=fdOy+s1)RP z>3dr&Q!2`PG$7>oU~`+V_iiH|&$XX%?I0{W?TFHv{C6YXF}{f z;ovW(P~7%?sKx~<8$!@k)fCD^upx3h@ONZDtF;FL9yofF-kz5ESGY}fu8qLe9Siv) zk@uUo`0^~9ukK5IqLM#k89(3K?pbw`ezyI6y-!DA zzQ1><;;-*FY?nZu-t`5Ffb*d00?pInY`~5b}?{ ze|yDcU9jYU^!oaSrp;QWCp!R|9Wp0^u!HlqM7+MdU_UF`&dBt%MGQN@)B+Mpq&LJE zyS}_TwKVwVAhd@4IB@Jc&hx}&Tk(3mATJA!`yKcFunx5mF)&Ilr67WMv%D@?)(zL| zhTHALaZuHSxIp}K3(yVGZ&X3%Aml-3UBvUd^XmA=qwojg=U@8lF#Km;y&V4Zxy}{x z96&Wk$|WByaW>tEf3Xy98BwKNphJn9Ql8dTr_Qrt#y5+>P!x)24oXVNvag^KO9??D zT058}QuH#fA3$_K`KLZ;KPq74-5?5t)FpL~R3TRO9#XM^rN&-JrWoM+*D zW@fQ*Qtw}aAGmZ_T`rOPX5f!oD}Vy2MZ~BbUSLX@eEi5}hkeATr)QX0BE`{vPMeq0mI-3TE}@L{V) zM*te2)9<-5omVOU$%7OKf?#`=2))<|$!L@n=XpRM+sElF#Tq|`K2QB!rew*SKzLTN z>QC|X=z{~o>#N`UnRgz$ao9XnMLCO+v`@k;E%Bv|XPan)NF>tZJdJ;6+@h!PQ#&6N z3Rp#jl@23}pBBJMgfJ~T9$@-6Fr63A`*-rkKHXEsT5XLf zF-r=HDqdf%^cBi(&4W$FGBc^zwBfuTxP9Dkp4H&a0k`{yN&e?v&3AQC5$D{%5=Ow1 zGPazpO1DE>V>LP_1Oe_lRSuUN@b+@S+v|nBnQ}ndwhhN|$9bH#p@o^G3ZOQG#7tO@ zg6q2A*PL+Qxp9I3$4R{VlGzWWX~liNL1MzTZMa-6*!R0dc-5k84)(quxbFvw=|RU3 zQF6t4-H?}teLoT7f$jS@s0vb2GonDL1^a!+vTS&Hx!~h=pp+Y`HptY6z!%GxRfO)e znM>`9fh(g|K|VSBwi$c@`8s@tV2SFdrP-eKfuDcoi|72Afa6D1l4qXCZ{2Usl*;qh z!pLHeAnEZCbdsiw9E9zS*3~(p-Fs}rp+57hXyRr4rpiFTE^mkzf@O-D;%J4PI*V)C zo0IJKfH^*;peg%Uh-F>enXXzPL7~YEYKTb5AOXcUI1oWD$n5ZO>dm1A2IrwQXcd^W zmsNrIYpnq=h?S35h+^GH`9sAZUBOg$Qi)csW;McaO8~sFpTcphvk7*)?1$!jhzB1} z8I=d`s+}{to92|e^efk!z4(s|aChYA=djg5FQ*4Fb0a~w(C%3H^N!|-45An`+M8Oi zq@EFz*11H2QGpQWV?ZaeqdAvNy0hRmz}IbCr0110 zK*$LYpb63O5O$M~(;q?PeVapf)H00>W`a8GL>|V?$9WKj=U`B}{UsMC4BOz16FnO@ z*$fZxVodC1kW%M(@ejJhWNjpjv#Fntu_qxKYAM){5rFLbiTiQ#zmJOjJP=aEHD0h? zHY_>9MG!nl?gVOEX3e0%VgeMU7}rcEsaTt%Fa>lohpFS$AC|L02i5sL{L>Y~)MEiA zJ{VLB?mHVip4JbG!JPCd>VXSj2l^BgX+;x?s?62sIoUJr;&-yYgOPQ`A&jHS)@`{x z%Of1iA$?|Z#MWB1v+f}n?Yj^T>!jQ{$Q2MKSsGv}iUBb;ln?+hvam*kMuK*&6-TMo zEOhXtoWJ4;MIR4e>-;%`+qUWXrgMzr>4C>w-57KJe01`4zYh5QyzjePOxVP1_HiDB zRh42SZi7>uTnF)?cEbNTFGn%+AJ;fq*^| ztkMxh2`g;{<=l}3c)4Ei?e&T!1*}WN>&uFys#um4Dc#1P2@ORM$NV+8Wa^qTa@^5c z8Olc0U=09jDQMad%?W*Zc`*(M@vU9g8qAnc){W&_s%kZ;2E?>t-w*7^Zqao{2#rzk zNzz~`>;tkc7rb0Ie0`63AE&<0FF$WvyyJ45r= z+UNWHN3Y+z$8!+%9EAfFqm!DaoV6)el$0w7QKq1HR4v88ALy*YAJMFk&O@! zQ~If>G9*bk&8)z5uV##-d#|>B(3{LPX+Raf!4%|LnOmqb!A$uePCBk00E3}gu`HRt zm=h$$$4rR`oTuUs+>hL8o*g1Fq%}vTG^c=NwcsN!CJAd!Mqf`hSHBS-`>1^NZ60dq z!AXzhp7vR_UK*;NWp?LL>{$k6iq4fC3YMY^@I#&R?!f`q?{H!zn8j)Wgw~g!!>e#G zur0+h+m(k$HNvEEZ~L6l(Txx5*CnlkaXL_FGpeh?V2TpWvu!%tF#gCRrrn_%kB9IW z(+7{Y|A$PFkG}8h3#Ha4n86w6fEc(4aWYF+Nz2ws{tNaMaN!V0OEqdlsPrcQy6Mvcr?COpLfV`m;uk`(#an-$mfzt6zThV zNH&`YhRjZ_bo>0c79RIjJ*x#qeqY6dKi&hFVRPKp`mLik%WXi6iqwE36jlvYjW5LY zQp{m@8|TmBypDb*J;9WGEzZG*Umh3F!H2_c=YaEbGH$jlpKkm3Q+9rM0HQ!APmf9* zP|?h86f>FHl@Os;Efpp;M40(cF#~$yJaz!=!!-JrR5snbtnBX~!5XV7&Xemf2Enof zT-J=swqi*!4$>D(gCpj&4h>WeW5F6)BtuP=ac zd-xoL?yfm?Zq8W@Dp)8(Ut7iBe|;Z)h+07@j{!QzoKUW1Nw{1$Y+J^@-vLyl7;xEE zq!{@7$Bqcawyq3>FDr6VY-_~zl96urJGM1r+ctcByfe@U%v=Q!voB~+p^3Y^*t*~ExL&V_F=E?RoR!d) zS_+PxRglYN!LlaQb{YvdbbI*E`1tf8e_*J3&@pB7=hymge)UXkH~jOagGKMdn3LH1 zc#mc$yUVkBCtcT|jv-E~z#wl<1n-1T9jCkzzj?5dSW9EZjdYfRQVqfcz%_hYV;EAU z0*!}QQDIkUlb!WUj6xxmYf&vg^6a5DgX&lo@pHn$LM)+C>avjl)C3cU6rY)1<-3a+ zxX{bh?nFL2Ck)zglu@2;88L~gYz`;AJ36y5{p|1zAI2e9p_2kV<4yd$ehRX`zCMQ@ zZgl;7%-a0yW*G`LZ3^K1b@~u>2L^`IW)L#C5G)y!M6K47qrp*EVux4t0N3}uBhAnV z(Q81dM~51m#pQTCQ_f$Aae^KQvyw7pO#S(DRb=IUsjVSai&KL@n#sPl*%9M=&U}^M zYM#pR_`HT5zt8;@gsN8RWB2eg{JWHb`<+>93Fc6!8fmmyGe|+sdF&gr6JoNio}y^x zfL8S;kb;%MWe3^+< zfqK{aIRS%_WNO_(ptB)?QuGoDc{7c^krbH5^z)%%E=M!D*USlv5iSo(#Q>o|UcakPWBHBhFW`-c zJxTog*A!TbSCD0#dj`WCJH#vLk2cklCC|Z!U$ir$qWRhGcp%I_p4{sDZcZZTu-Gv$ z01#odo?jchk7Zd9LqItTK5jRw6h{PUxE}?zDq`Z!cWL|)^$9{2WFxSzD|sGJtKu2N z>#}Vq*!59~)l?3Ig?$__cfG3})sMtmDM;V}VYko&)N5U*lNPzB1#uE^`U_Cqb+ z?|@Jjodm`cIcL1STyY$KM{^x*j7atw9`$M2btC5s%_n+9tG^e7{OJDw{Vr2Okom)F>@tA4$adu~5-w#tp@xxDX9u*F= zibhIgbsAy4%qxa;5j?c8wefmVMSmGVNXTtSpD|5&k%Ry6u;7LQn2acknv2D};VV_{&wdePF;>>QKUI z&3OXV#(RvHf(?wlVeouC2OD!GV;+n7Z1Q#eBb308f)GGH^>1+Kp;whuD+s|;Hct+$ zWQ$b#6P!Z!?D5}kYxUHdAjc96c+qxnEIF7?0T@^_hUbh8*{_`L3{p}Sw3d(un zCJn6!v_(Ud1Z?Yq>vqMGR|FB1MzyU{D)y7m$Fnr-XQLVxfLI%1J+04ALePME7DNf` zk*11}gyJkV-k1{w%`3dY)Ed>QAPu1rA{hg@dwsQ70$Qgtv$g^hj{gi%o2G?)#1yAs z18@;*u`HChSJLR9{~lUouC|S@JuDWx2gia<`5cyE>%;pa`FZ~p>|UL8-oxU@HRt=* z74KTZm)62fFh;ZQZCU8g)M~Q%f+Zzv>w;yWXvqDxZ?`+%Uf-YrSl12baZ>iJRD68A@aEe-{I+Ze1W-9{`$uM89~X3F2Z{$ zCdDf&`E~hI&%(hRLc6^-!t;UvDMxHe!eS~)3?fDbg+?aw;I*vfAgPV#iPmUN1Za&~ z>E*2K5L^vj(-cijr3nJGh5+ix2h*)i$<>AUM=G6zO0NQHoj7&kw74tpSyV2M3D*z) zJHXe99o^>0IyQP9?y4jBfB>=9hdJs*WV-@_af|GrC$`LRw59VPvMCM)%f!MzV+ySO znP+$lD^nP9yWx?1_~?p^|5Qk~v(MqUCmWg$h)ciQC)+%p?wzO5M^iXhOu_%L861=Rla6A z{g0#C%!yaK2f$2Vh^9P~Bb!6U*!Ld&)e&e6lDS!$L#LB`%?)(h>$?g zP)|0{OgSPIR}>=ztgr7BAI8fT7hERXg%R-hZLSy{D{DBpumlBzg+YiNSany+f{ih3 zDeJUJ)HAE@I5q9Zm*cMIw-*akVhl)OZW1^L_>uhpXpuP6;N>tk@wAQ?^PEd(}- zOa|R_?wLD}v@!K0$DkN*;W$p?Pt{RLYBnY!{a$=!1*CmifgGmiAjF?aZGP%0mX4=hQ2ldL~))K=W!w}iMF*7QV3LA z6N(%YF6)LIBT^I`JFqDC?zguqzQ1nx_Oc8J4Sx_Lxb2B* zbOliN1A^J(9)bH0PWSU_-eWRzoO{mS@-cJB`+T|)A!0VLkqrLr?Unxc?LZ7v2TUol z`jNSB%X#AccEiX0ju5ihNZRVxmlx!9>y;xD7VP_DS(9-{(s<_pQ#Y)TAlUaCj{VMn zCPlnlHmo@ypxQH;?*~PCzuoY^{_+>(9C5u~S(a%PQCUj!q^>sYMj~4W#rKap&blK7 zL5xD>69C&~Wk#S>oQ;v8Tyu|E=~)Pgbz?3W?YGqqr5xHbfshX1nQ zdR?$AVh%bXNo=|&!=WrP2`muDwF2^&*u<h219vgvMYbNF=#7 z(J_mW4J`Q{3e{?|%Y<*8=ZRx?b*O^-ezJc-ZAL;i^)gkf5=GSRln`V8Ft@@3pM6rS zL2dxDHndm~5|zb51A=8=rRs2~fZtvV zHu3RdOcsD^?`7agnQF?I`+juE{MH&(&Kl>Jfjt9h(Pqw%DP9H8j;f&O2;iS9r#)Dk z9mPJjDJw1(OO6w&sV*-&lx z__*OcC**K3$IW1ZwLl_zrEU{Ji-IKq=Ms=I)j+qEgz}U)@2P~YG8e3~_>vQ@*A?p$ zpjxmWJMQ-#_X6AxMX5Z2Ie!&RtU_`&qzGt=ghxfLWA@N8xZ^|TJ+Cx#UB=Py)k#$~dahr=r+LZUgyzJSHU2*Uu0uG5e@^u#@avHi8E64e@+4zlWU@T%)1;5deC%%~3;L)I>cH z2BaCpvR6A90DSxYikHg;@4xLf4!TLo^@b#INrk*pG^wBXVFrzsu!{ zmzNimT5;A&M1-jgDzwA0B`6e)Nsq+mOi^&24W$*FJBjZhC|+Jx{NWF;h^Sbj;N_CA zEfL8A)wCZc{`Si+`1baS|MBnthL`IVzx?u!78;VozJUifl8#S|0o!H4?L1Hq>ziPQ zN-3k4!0q;qx3_O>Ms)_9Co>|aRu1a)2LQA-+;2CaHN5=!PmmC>-#!2^2wBz@Q0?-o zP7M6Gy(29N-@m`%e%m`w#P90|$m7EJ{fkaRPeXlqxzqHo2`>I~uSas#^z}&5f6 zvp3LVJ~W^S1CR(|=2yruI%-2O31G2r6csZzVN$yxhs0rBUj=);mq6oMX|PV_PnCnF zuoAsaV=VrErcOhd*yErw2oDyot}vMISvqk^PB&mieyoBb1^1meQ0j5V9#}T9#Hu+q zy@C}+5cpf~X#c=tDDjV93J<=XoeH}efbbj6@bmQ$)IBB;RN3iw&OmuDo1Xmez!HEr zJbRpH&ZVkYWkU(8tLQyVdeG6PjMc-%!;u^)eqxFlF=fi1$N1&iny&@`rI=1X53n|6 zr~RGh{W7YU)7QSj%{LFA9v!{ky2QrKvFd}PV_0qruD$O|v&wcoo5}Sm&cc2mbmCkU zhlE(~!mPp8nm$z!#*E{tkE%!vsahKJGb=-J*W)BQAz;u%$? zVw6}f12HG%=P~EgJ*T#udnJoj`=51!|mjaHwb=Vu(k4!?EHf!)uZ!(xdP zCf}wE{{*=bFd*PN@M(RGdjL`k*E4Nwgi1g$iQDru#VQ2A5_QLMFrivj3bFVA(20P8 zd=I*OG?=rqUN$}|J{O03q81SN_*i+RXwUpq>X(0A5~S za9LLD_Zx}dLQZImz_LVSgI*oSY1|J*3<;%xTpELKmHnVR^Jx%xD700q+hSD@e?D}Z z7(`G?K}redc}J-yf;N1Ad%?f|Z+}KB2bL6Yy)0Olh@6Ok5dqR!3x56e1AqP7FZk0R z|AcSf{($>^XGA_F(3*_w0t84Y;d0sValhj@OOM)*a^gG-mj8#kH*J#ZNUlVWd&FHb zt3a|Bsihfd^vv}Cf73G?lf7QqBmq=q=8eVQ2X~K%doxi0$nKs|w4p&%W!@zgUymQ3 zwWo+u@vd52L}a}Lv;(x#71^W%jhkPKW+Dh^-92*_T&)(wqYxg&77<)8SFGy^Pfur@ z&MU6lN2LXWrsoR0Fifn^!Q)NjY3(oBWTO%OHjeba-xx8pen^gg0v(HA)zFZd5s0*T z*s|dH>4f#PAO!QJE1?!fsn#Q=5+c+YQF>cx`@$m?!-Ju?LqH4)Q8Oxmz@izC&}zpF zV#I{V+KYpvcN*8gNCj#D6OfJ7s6ZLBoCCY=B5j`@%1e&sgai%;xp5SYBeg7)S!7V4 zUKHCK=jj;?%~&0c3WW{980$P*oRPt17%@*h1LICiP{r8bXrCx%?&rZGq#jv%I1grI zeC9lOW@fxc6=~F=56MM7H3lcd4TeT#oIP{rj^Z9C-ky7K90JGP?@DpjJ)^>`6$N7O z1!@lKkxglw%brnEq0b>Gr7atT<^c70_?mawi(_H#y@9@4C=sP>#IPNuJ2O23{`T6D zh(7P&IJHKgRa%oqkM62-(Hk}P{w)3+)b=HdG}fi+*zNWKnL0=ENAmimk0?0QUC`3?*&|=E%MAjGwFF zz=QMb^ZC|yN9UvUV>#4(IOTGcV(?&tkvXk&(TrIYW!X#Ch_hIib7esU0x7B_C#5+} zDJu~QTR?#XOr8FLShHs$0UVSD9gw!z$+Ce(6p~aV15_|glXjVojGQxW+ZC7F4VNt; zNz@)Z6-X>o1_H;)8BP!I{NMw4n)7$_eCCDiwe2I{;h+jKAP{QJB0ROho80YRsO z)_x?$riNk;>Vs=l%i`2tb4TQLqW5xagg1ZIW^3~F%N^TKUds1T-3JNUbFstGsPEgm zd#YEb!4Au?hJdH1GeRKTE*fm}daw}Kngm9aD!6VNUM@G}BH(G!`bUA)9@_n?z@lr~ zhiz8cUX6cRmezwM?R?qZv@^6u#`E(T%N&ufS1j`r)@8=BM9kKlQbBqYk#fNgKYqZU zzyAUM&;R;&eDlq>c>n%=GpZ|*d86dPPY@~N!FgTOVaXNqJRy~WloFVMd0CLRf{!0x zAe9heRG3a6P4Pho>#2v zqL@(2dgZK6K+V;nZmh+mR(n9MXqyv+=56;BEYyhRpu%yW4Aws$ud-s$Xo{tjW(bLA zS!C*N?B>EVgkh`KGSmeht5X}4a5CRxMqqgESyjnodj7~2& zL~R0j^QPLO6O4TnPi0C)E(w61Q*uju*Jmcl%T{vn*tEyX2AK4&zcA zMBzJbFl#mh%~31HbxlOO_3$vK7U^>fP64CS&}XMPbwq%j2PIJ;a10Hevftai=1CrS zy5o9z2E>Q5l@&9541yK91ARY_0%x*%YxGQo*8J>fo7-!T*^RwS-(^b1d%x*?DD$3o zU}u*lKD$D_Ted--x7!W3tzt_RIVGf$ka7VtYZidnNFDa|Yu$@f2a{NN45bPRJ&rbb z#MuA)(eHoBaL06XKK8OxyV(1hHfP#phC>1s_}`q9oO2^KA=@XBjaji))Le19YX7r& zj_}xneo`rU#WsEj464DLL90y!Ay@I%8-QrmKLqRZFh_m%wBdG3xLuQGIjUtpib6d# z3t}0NQX!g3u*tCuJ9QYdgld7k4mbAZfg(7tCc*9HPH5oA`{on&>v%SuMcF#yiJ3Hr5APe?PMHFgx zIxS$Uy(ncvoU~IJ2f{QfkxVHC*V~5I%MDv9C<3G3RWo3kCd|`}Z0lcx2Jt8coYoZr zC6RE5ygP^h3@#gRyIn8^LRcb}IbfY6fngJ+wG=BDp9+i*IE%HaCv>j)AO?`j#^Pl#FjUdl5o9U z@stzZee;CN<%a9^M}V{dZeCZ7CWp|Xe4iSh%7S=5hht=S{M^0qCU5@x4hr>V^!e{M z_U5ECXj`X=)56eQQ!5i9B^MMFut}H5jHlB{d7oEJyAcse1#VkG+A=Pe8%oI*p)_nZ zRTZk{RgZ8OWtLDwsgw~LY^G(5h!&-o7wzMb?H#V7)eT;7lFr^6X5lj^JM{-TGpA`|G^z0BE?c`ijU24@Wz(< z*}xNG96G}{1!nsWuBZ)$fZ7}=m{IX~-uH$tNX=9&R+Ro3YoN#$8O=it2os|~nd)4o zYZTef6nZD*!`ga)Z5N#%vIgpVC`F`an!qaW;}q|&XQR~mm|O%76OlUyL=Dl>jL#55 z>(=b@N+G|n>uBJ`o7HtkzQu@5ub?$p^=yq6{p-2*K+yfOR%`rMFo*fDexwF}wI+X6 zoiq+?PDcQR9<hCCkHhWvj~VS3wxMZy@xXA zNU0jJm%i>av3B~-k06Zu(ciH}9h%|RqTtNdn3&pm7$G+f`%(JoV9ZaNU8tqB{v?_i zcjsfU9Vmb!t5GS6(x}wgmZWIo-DzHwxS?jCSm8GWmvn=$npWCWR-1G(6Xs>c5~KF4 zVU;lgfOtX?;N`k$@U;}=q>N%=o)H&9m}h{aQsLU4Tvh+3QdR%n5-zt5*IbdTK%Pi- z0h?YaqEyyS#8J(85(_#wJ7ONrq$2cHI8^ZfjmkI%Kp{W?lzQ8pWe+?EEJHy0-hK{1 zdd|G&N`!ZNq{&>szJ6CaI0B8K1h&q-nc)$*g9-a*@Ph3ab%OP*# z>e<vkVcXm5x#wG0w;Wy#M%$|M!3YfB4V;{12SZCv548o(2*RL$!He=NDd- z6L{McQy&7Ol&n0!6#PUjPrWkK4ssO|M~ z!FoC&T5(|v+SR%+qe{l*dI1Ne3Ok)ve0)tH+^1p53zNUlwy#pvE);DH_s9|X?>yYl z)^sOYWZ0FZHphGLJ?XUos)z`2ny@T0<`@wPpjsf%WED=t@t5|1RSQ6Oemek#+U zbMpBp$_}-Yrmv^AUOP0%_5u#_VN}Dx`&-td_1`!yQBoUBU69jULgW z!3r(WYx1P$$)m-`gVF0%%;4$2fKNda&h{(MJTS90fZRuczr?}#oFT$^dku&+c+wP9 zRinzgm-oz;y&nL!LhnkR8G~}9XWzFK941q+?zCd}ZY?rRy}*%3BgFHhLVOC|YsDIW zeRkZZ)IylKrR$P7rH4YZ15cOdK<-<8t>E z8GrKjs8&bdhKoQ@at~NAYg4zc8AqsKty(Z&9P*LXI6Kx4Ff)P(Bm}E<*+cWZxEaFT z0C6}F(@-l0gQeWpt7yGOzh`Md4|iJC@#ip^Bdj$$4TjbrM`BSpkf~OGLS&A03s#J>*Pol(zAsKz@CA|PpA(EzI9-R_jPtAz$j&I| z0h3}#j+~Mj@B;zM@tpMRFhHml+#yW!Y|S*aJ_$9V*3=4%FWZJ(1ZM#RlMy!QGo-97 z4k{Beqd7h_8q`dHXslldpcoN2g2JS|J(7|}fhRbcLM%*&gZj9^yxuLbBP-SUsOWhuMtQUMww{`#0 z`wZi5f7#I=*Z9%rn$!E>9&SUAkSg6qKPR)I@=b#Y72kaGjCGyx;ing+I1Y?B5#k&X zSJmlS!jQCeM$)y{x;zbZZrdw-Wjwp#w8FJ2lO2FY&97*eg6+0p+mdEja?*2?GEVQF z@$TJ%>&FjxS{IzpPgv&}Qy>MW*Njp&%?g%U!88FB@p}1)Km6fO_+S72KXH0GX!6!^eDWPMorA51PQSW(KBVs9_O+6_WWGPkTyde?; zb41u8mh%b@!3@}hZQCqbq4%HW2?ZHjNx0o!v92qgp3Zo=Bz$~LN*h-y;-qOwKQ!)f zKN^p+AkGo*1&Garq`v_i<^S+FvdaLPUHB@2Pp}MDG$U7qmPkRau+FOq*KNb~rqLx1 z46cG$s&c0f(PO=T*(r>cDH8^w!E1c-BQQ}!wI({=u~=arhPxCMuSE<-Q(fqHDn@+( zdk!0(Z3eixS?)CG&y^Naq_osGLBf|M=mCz|cZ zS|<8=oM1xa@k6!doCIrL8NsZ^rONbb5pvCNG8jbG zgqNh9ynueTTYCjq_}n3xu&(f5CF1m*JlN~ofdI!}azF?(UT;_Y@BjKQ{QJNC4?LaU;qrR1=zl=N5)*V&*(AoO9m=ma zP1iDORG}KY<#N3$I|6IAg*s9UUCxkiw0IU%J2&dST5D-cRQVAVno8c4M+sw_=Oi#ZT6 z038CP5q@W}VNvCpYF2g+Q}!NM=~28#j#hu)24mk(mR^HCr9AJ48iRT;$qNs~;fxIC z0*68z4ZQa6W;xRCx(X%Jh?c^*dku;P3KY%}qCqY&8mkIx%kFzYtZCe*#^Hs{syI%e z8~Xd`(LO@;_dhSBk@xD2@>JNi$1!ElqPJ!YhAAoya}}U4;-pG6l^XyC1-8!Sotk%4 zUZrZ0t{afL1{&3#qILlfp7m?%VDiSu2EJrpl~Q)%%OmNd!t2Jp8U%Dyke_o51(td1 zgW(48kX;(C;SkP}E%Q;Uow*)KzR!Lto&9MWt_U*P6 zY`22jrtq*LgxfV^v%s9zr9>r5;Q*$f2B$bIvsz&{bZyApMc2a>h4^j?M!nj4di;X; z%9ufUYdB#=|KbOZemb8JNE?1D zlT`%=Mx5FDzo-qKa>C_y!^`CrFRwS;wxW=|5U?yO0s_Juw9fIC%mGpQ-cqwYvozi4 zkE60r13wk(-Mh2VMQc%et=R%Agk_!)o!uhD3G=eza`}M& z`u!j9?YHmn?KjUjKb`UV;Uge|7#M|snkzcRs76+ndBVEP_>e9jh}DAJP)MVOsT9nt zPE1LgGA~%ci(?LJ(TKwwwY_@?N>O>+5;ltl&4yP`+YND9FvSUiijpIeFw&NByIip@ zXPnm+-#o9lUatrojjnZU66Eej<7~Ox3{21vUi10KebqP7BPIOk-=C}b7@PC>_xFWL zj|!LW9((+)MZ-heG@!&j<(uWF45vml0uEP@aOf?iAEy0|HAur+PduII%RZZ+ovWpqBe z6XWnDu|n;}|KHM-zSes1JTeicXz5E*?Cu~q-PF+Lo*mlNGjYRUg;WvuC_DgTy-+P6 z>r6qMJVtpcy`g9J2os4Pt7~ z$5s=jwJLe{zGE$q?{_}v1`ewC{8_3+OGX%o2w4Z|39*LuZvO1MnkZw5Y6a({GC-ru zN-QBLT3S26UmA?dW@(DiGA2XEck*LJO1qk>LJ5-w0JHT?DhACUg!Vr0Gs#t*Jz%Qhzp7tuZtHh1Qbq6Lt= zYrHveq+~bXsW}~v*Z_17rjk3At~E@AJI6qSIH_E)LTC>>InjcsL9OfCzGMnRP zSqp{+6fAI5D>ltmFD{UW$O5Em%18h+CzK1FZwHB1D24>0CGCFF6 z)+)+W76_<>#dMK0f}Pr&W`$4@YE@^(8L0Ys7F$2f4(|dHcFM&6H*Rx3KM<>J@n=6e zb;F1KM|ZZ>{`W-tzEZG#@zyxnIN}+ccFiLzn%BTsk0xi;`$YBwu*@2)pIDm@rSH}S zrQK&AiDI`a2!t4b(`msnM_ev1kh+0cTVqU9(BMOi0240P4evj^$B#d~$Lr;aZBvR} z5p`&5&0Rl`)*^{$S9GGPas*JjE=;HuNO{9{yF#QO#sHQAgo@KS0_>DD4?qSMW$)OX;~486beGM>VxMQudf&U=`Y{o{B**%-~I|U7hGRoG)+fjwj;IT zM`jg#%RJ*cWz?D=(8FE{n)c5Hi4s6c;mfr`at1-Pz}J*U$r%Jswe>==oRvC24@Sjy z+qC{;nlOcc8Uk{d)Zo|(Zr2-5r-)yD`wl<;_#RsxHmn<%cBtR-aJ%E~Cd=Knz+hg! zaiuRGZ)Xh&9c6!fjt8Hnu1NRFvDn{xjOJ&rXw#sXuHGs_+I5-N|yn@2T-75XHI5Yg3#s!!}0ca1TLo zu({Mmzu1hRCMfm>Z`&ctHLcMpt6ka6Nt=Ij90pORY$@hNSurw+fJ1Fj&Q|knM!Zg= zH!lNvlgVCxfYHcl-x;9@9sc3Z>xQXENgYn7JqLmbUF)&)DeqSR43T$F+VuU1rciyq zixs4fC(a95ef?}9nm`8a)P-gvy!Swr)}LUEwzmR8e@v4E)t^OFtvWMIE)|liV%2@$ znB&N9C5O?#>}%Z5o!0-%Q&c4}2P^tF<<;JshIKI6Y}X6+j6^+vqD(^jY2Rg=28Jtl z!gd*l0iyP)A>$ekXP`O5KA)rWV7Pl>Mh$u%95v{3BsAg#34nn9tB4MS4*{y&RRq~jveHzIU$FAE^^rW)XvBlr4~`UUFE|5Ebbh~(K#WN z&#s}|oZ~4D0YmvB(LT^FajUy1^YZ zTU~o+Wd&t+r-b~dw|6T>Ydb<8MFhf(Kx(8C2Hk7&S#wVoD5wOf0b_$nm*diwz#Nn& znw`9*ulZY+(;3bwY&c0eIRD{dDlbxCuy*8-@fSf z$-Mz+=LE_=xh@mVrv=1l!cx!aPq}bHt?ewwng~D_{sDW20qf&)mx;VogX$ zbF?Bl$&Jzb*V-f#OVQ?->xwu5dAr~=1+4RA?GOqGwL9w2;7^<)f;K-ks?Q9>dBQx; z__)2|Pk;Uke)H>JTB4J%-gVJ8wU|luhk+y2lUP3T|2T>sqEEOH6*(RzRl3UskH!J3xCP>vNMy`TXDlV@VOw)|> z`3XvlW+h2Z#ncn_3-bXH+F{RPC0a=qdOm8f1jhGX$D?( zfevfD2NY54sVaLhgG{?@?m9sag+?Ri0hyua*${|2wk<*_Y%r}}sOjRTW{qxDkzFKH z1B3w-72n7lRiP7OD_QC4ibnhZZjlRpSM2>yD6O62t@U~CX#hX1poe}VR>as~FJca! zZ{4<*Fe8MTaUbIV_3#2y`qH)S?)LbcwJK{}t;W>D0^tWn*?1*!Y;ZT~ooZ73B5KMW z)Ti>OFa)222OW(Pk5G<(_RhyH9S*dtoEK7h#wO*gl+no|ip5}}wPUhWk;(?qJLGSM|!klVXjE8!dzwJ;1 zm8+l{WTRz&YBh>oF(d8XiP0|LG&t3xJ>VA4{*GSjm2)-+sdui11tT<@VvJ&R|NcCz z9r3<1cDv5u46~g}biz$ zXU{Wicg|U1$c0g=HCi3Z@+`ElG$J+SE zj63~&r@!w%_bKNCz)<%w{26}l^V4Lv+@A~mTSIYE2soclIISzT>kDd0n4&fjbm(P_ zTBmWjX^%eQfN5E<2q8uRS=PKeJcG^wS^c*4WFcvFX$}qU_P^S4%RJ%v=>#C7QpLKg zc=volWM$aWK4fG&LQxknsACfe(6n7Cz%Agk%!qNu>*a<&fB#qf`q#h0^Yc4gUOwXW z;|BHGGX)lyu*?yqu6TXD;C4-z<{2qxWeBmTYRLsDC!9_vh)t?vQI&NfYhjqt zC&oA<#L1!%r4`YnT(G5tQVZr~21GfGry|I8LrMwP+YQsS;Jfd>!|V0WsL3lM8D8ZN zlVfil2)YZ!{N2V8kNu@M<>MHgkNay3o9}Nq#3JWj>}p=J(gC{hrj5`7>xQI*1LA`1 zL9D9@WU+fyP+L2O7C)-;bc}n}Q_*N29MjFrx%(T1G(+@s^UZz8rVRgDoMo zXw*%*^DsIgyGW`T-+OGRX9~z_U_2T#UTZG|Idm9?!p%kpgg#T3B)h-H!R8$9o=bF1 zYu$6@w<*~fX;9gq7eN2Zy4Bk2?F{Vh|J*<0o1@=%?|2Lj$WRZ__7_oGLx+mBMtMaW z7-}(YZk?UId7ekjJHyreEW3jkqGmfHG2+CS>`w@u#<$2R(9VR4fZuAKRl;uSM)HM*@>b`fK<3jx8RsZs@C)dakMou-%%SAO$( zJWA^ZFf)NBp9_@>0%^vq*8_~B8M4f~tnph<&{%HckjjV0D+3il1iP-A}}k> zDXY9|Oj4GG)}BNA;X3C?0^w8OG{s7IQ(FH5kD))T$Q+$T+6R0DwCl^^HK$`%+@A}8 zWk5$qSl4AFn&|J+@6kjT!8D#_9qqZfWg!(fnM=_uMJ`Cq0kMmcIUlxHhmZ!wY!57O z>d`qNhaq>kBM~)tt!J&m?mD|Sf^q>38V`Jz3K6EgCM-RNZG9C`bK6+~M zknZ2>80po>mPYIR3XNd_qK0cD8c_ac7P2&8S6)PfLevZ-k2XPk18adIHK$>CSkF>G zVSH`w$qh2;#(~|_8Hjw1y}(_)bd6X#HdxwRy*+%6$2oF`Kt!}?9ec-6YTskxj)yg4 ztPG%ZVa|ctdURNd;l~5apzUy#?p*EHjma~AXa<{SZ@OdD@1uLA?~~@xbxXKM25s{& zj(ZYlN6tR$+0PNZ+YN)xD%7szn%ei+hI7h0*?5QI)2}c49l#ELdF~m?`xm4 zF#-(^O=)!khRMeOm5@Hy9@Uas5LYE>a2Qq$1~)NdLd{Cw+e)7r0yiU>0Bcft2v8iO z^X*Tc_kRAJi+&HiI_}vokwNW@%DsW+dq$&c<0Bpww?$3G+%K{Tgeu+Rnu;8rt>Ks7 zFam6!dHsk-_q_K^Jl3^);AU_@1br?2L7F2dVn+KB0%cRs>ZkTH*5}-7Al=|~KgHz) z(4i7_xGtdN9=Vi8HJnq{XEkG?)N1(pz46-Mpyb)0+TfZV7-*TqW*oay?e#cT^I`AZ z93a$PuO@vLb9h^D2zS94ha%=&%&umPvWJ+JO{}39s5K58Wvm-YwZe+Li^+kp)`@z2 zXE?aN&LVD1!!nGcLERK%tLt#XRYdREp@LeoRf{rSm8>1mQ%Wc$8~adg!GbB|1o2llnPjDg5<7H+o{n7(1yb1ntA<4o2$Smg^;UeQfhWa zy8=v6Yl5tZzhq;WbSk{IVO<|%Coh`39}M&cEo&c=-M z$K#&3A3giw?>pEm-FMXYdGK^DpOvIWFi+ZAA`p;nH!y%DP6~grwO-Z}Ac7AcUXfD4 zG%c8yMYCE}AlLwMONlx{+hhg9Yg(+-0?zJcc2KQQtVYEg1J)&CVnWRskr>~7^Mr4| zdBQR)Oi)TPz2>q;3*VP{fdGhVBAU`g#`U^s+JqurUM~3kAOD2keD^KBeOj?B3v${Z zMSE+IXnMt!5DQ}py*v8#mM~2-QcCuY#<88VGFdH)r56R+=v%896gg*9Y`AS3h)0~O z01^;bH#~p5Vwq<=Kc6s76H=*|Cgs1*IU%KlRRq8K?ptiPY-|uC1)v#pCRBD10E@_| z4Nkj5R$(O`1}F5`^FljQ0k97kD)Iqn8)h(q+viodJZV7S3c#?BT7 zh2=O%GR|^P4^)gr`45-me$I}yfXu8ZD&1dq5>=#r9*Hk8v&FMq`Uk`a1Z% z9g@{EIqr0*VnXk=gTCuw&W1-)2}*C>*R>my$LQiX166=A$_}}wD0T;s&A@I&N}4Ql_HSs8wzR#Zr0H8gl(mhUad zU8&9fzeFS9(-bjVm*O~umi=gkI{EXNaYv+t3cgoCIM#pNMdv-hP%EU=wkLeQs`2ek zvO3+PBjVSjH}26yXIq1Nr@O6#|K77OwNvjp{cH8e{LWZGUzg#h8krj--L(6T8-DFG zF0(w_X=DLJ9m*6A&dca{w7{9qshw$`XOD7H)pJ*+B1qWHk3Z#6AB>q-HOzVt%o~=m z$s(x$2ZP|1{tU^Q(3PA4IP_`Qfn=rz5n!F^r9}dUKnSsAle?3uy#mz8?AUbQlhodM zJad|D^PV1qE~W9n9F}hQZ#VqAjtJJf?7u8Tja}?!G;~pMBG1%oH@RKgDQH>0n5T$HY}t)L3tnF?i1UhNJtIyN zf(W*hv8AnfK7l=xV~skq)}3_+gy4aCm)#{NkW@^Qb~<0@2&oCT%MHK&^>_H@`5C8W z21ECE$r&L|W<&PAJ9=gqF~$hy3_!7o3V~7%>yiq7{OJRJ|A#-~{2%|0^V2i(cEh&a zASD9`pib-!k20^zg!8%>q)J=E*P;!fh}f7JGS<^ci=He3uq+D*z?KqHQkD!6!AL32 zVg!9r*<7wE;c~s=bY3*w7&4}5MhG{f!n`3~F8HV4{1zWSyx^ywUfq0yYYE?o#|>9Z z*$@B<0VVq;Zae0%mHdExy%X{G94c>|!NY_*%dW8QUSYa&q#+99Ohw~`hjDOx+~Lte z;NIPqq0Yh-Cd)<~G6EJ26`(^-nsRf-ld&u9!0`?>^p4I4jfS~7lcg4j6a?1isD2NT zrpT({9KXadJa+na2g;*?M5INz#%vVG`gI7k?~({S`{4?z?NK*Y?{17HFvfy0Hb_8MJL541P#AugwSp3_rl|Gwk%S+CZ+D!tZ<9RN2mOqJvQm6XuPb zBu4EGU}^H*dhXWN9n2nS(|OwGlpPl`lE5u(~YP{^;Mr5M>E6wW4OU~$8a^kGJPK54i??( z3E)7TX>;!I3Z>*#vW#v(dwq+=7HXG0z6RQN0YDKDiaO40H9buR+Bp<|M~7(nyAO`H zjD#P3)WMbV1K8FmzG8cGF9+@72N$nk$>Dg$_lmNm#dnr)e5@SG}*dVOMiv2a~<1$*TcIF=i#oF)Hdr0M@@u{G( z2NGNded<~bir%jC_C4?Qr7M<-$c*!8#kwwlRFskc5yUBAnj)Bh*>p?ZGG1S<$hjiU zgxhVy^`_)c*K5Mx)iiWn!8*sP!JmICRIVI*74V(O!x9*^unmU*EpI|a3{p0g{~vWFyl02nHndn-lN){a{AUz z3Ak5^9_i-he0Yrkw;mwOy}))ix#)YL7H3D(h=t4&iUtt^Y#PW z9Cv?SVx^1qc|1BC?g;GyE%x7!97JC$ciKE~;Ahq$F>b|v%n-nAN>qXkVp0a_@QuspJ`8Q|rIZ1R zm?k&yeGbv)u+$-AQ@x*vy)OaH86)rI;d)LLw|}Q)4#m&KvEter9Y zWjn_4raYzlpvcka8-r9j_g`?|(dN+y1`MgPEHlpM6Q&rDQwD0ahR+J)o#$v7bH?qu z;q~K>;_2~>!*)~nWx zZ=S!yX^u*WBL!88I^v>wm7s9@DMrvVqY&fedPQ8;70a#Qngj)<)jc2(A|i5Oe0=|c zKm6fOc>0fj!|D8tZM!00Ud^lSg|Y4_1=2}bPcv>w*+>d$$LC}ZC8cO~B1Ww1s$GML zQA+McDHAFfB;3}JfHpxP1hC;vUoIEKX+oS9OjE(SEM`b3CGYzWAMy0`ge?iAe#G^b z{Y(yw1eSZGczkU_Zv#LMNPgiI(HmIyS@0c$9VUqWJZY0@f|Gs0aF}m@C>yU z2j{~i+y0Io0+xOVosGqxg9P0;cKmJt?uvN@@Js;U0#>6qwF+X4%Jz}f0V$~Jb;E^z zK_yK;M+SzxRSCiXL; zgl6y&!QfBb=#SKyD6|fMEbDj_s9uvHJBMX-MAZ0lMEQ7yKIcy5ScS9?3D8-6j&Ixy zp}`Sw|4QX2yuX1%haCE&eI)b(97ZU}{S0$6?)6^dbMN3iZbY^EjGZ-a->2b>nF4iV zS{#DW8N8|)J5`1UIvnjDgxI@j1SMQxB1|#tn6Lb(94FP+&ZF(kC?JDIjF4v1==BbY zFr3S_4l>?A`q_jV!+Iab;CmmPzOT)YHp5U^(cJE`pI2w-Y16E8a>(Y5k-^63g>fIL z10pcviw-Z`=G>#pIQ;JTcVCa4>^j)xP3hZb$7OK?Zj3O?;gX}A@FMDFn3D6q*gTL< zSIK3_WT^aY&H(Gbi#sK)s9uXoT~6xvhIz1!)eH!Wz&C$ja9BqjRa-U+!`=fJ4!;^M zf0);!6FLU(T9(ia1fMt)oRj-9-x z-LvZ>(*?D*b3Set0yTN@_HZ+PsM9HGwD~=Jlao%WLRglF^P>G6rb!tXg9jS?43OjW zr3J`*zm%PBrNMz3?q|-HXFF7unFerYbvQ$7zAoR2Y3q+6M6E$!b22V*4p zXhEq@K9}pBK7V>mqhw~r)9H+Lo`I_HCvAHXv{Zr#)2s-Nlrr9b_=xNE2Ifi6O47sv z2uP`5+cH|CTS!X=%rO`f=$>WK;k#}>PI1DztlIagWRM8X>x_5LE0&2+^A&{>00Fmp zBhB815WtZ^G2*%vy#Mfm`26k}TO}kR%vpi z&d*QCHDTLoFPPI=Q>%lNe(Ss}$ft_ymYV@3R-jahG9krTsl%oiTXZbOfRL1*T`KM) zQljt~u017vz22}aH+(azGjlG6m6U{B3jX^25BS~heuwQ`@%nlNwq%Q-LPm%{2+ia7 z7@uz!UGW#VKM60SvJsbTk8W-&k!FoB*KIfyz5h4MLW4!j=!_s1WGAAuCMzw*%K!0q7;NG;& z6?=tY*`)$OG!x@SI8SPT+P{PAZ-?S2fUWP7VYBRZwP=~0iUw%(bime+jqT!=~^FGz$BOrGdo8! zFdDR_@2GZxVO_m8*jd%aaxL*W++9W5sc{=a_n$I%Rkc`~xu9#I#$PiU33qTpp`I0y zT3aoVh?wlII|D8Tt1ZeE*{&M`q1FHn*1LzaFsW1}jc^RQOHgXA@-RPxvah1`EPy%5 zw&(7d$;LC$nSFEwU0Zju$cY*H{%Y0O2k%%vr$%SV1q4QnPIGF`TEDxkfu1de_+?{V zE2sq^1c2S})pb|aD0jBBX`JdK86Kq@k>c!*2dC7tJ&<~*3pfS?N=VxeYRs11-U1QE zAM~sxwQB)NMPBW!HIfVqE|AY1>zs}SatNkuBB32`0*#C zBv?-oL=jr#Lqjv(uJd&(5XzR2jlwtQWOC5^ewfpnGZ>6zUNOfL2pP3zEOWr`e)}!X zrwG)9FmZEkh*hRHD&MFGm0T)*{OJS!`onufCN&7>DF9Wo0!qEbSmKNt6E0iA#}BXg z{`Srp?>$gu zP8tQOkN{bWSt4r#xs(&G*BhSB+G2j1XG~kxj;}f6`ud7up58i7c)eas7zKQ8XopqD zrjjY0`^~TVuOH9P;ROF)<1JW5bA}H8?h5!ZETOAW?3@p_L#P6@nPV#KQH@2xqN{VD zepo}Iw$4oqnvx|RvR>AuUe(w=^w7;Xk3HDcL|&dpdCb)t^hnRaa4@Du;@zW1uNG7bV5o!MBE%qoq&M)Y(iE4TBR_I(Obx*U+0ThXQyL7wj%{m5*CM9fX;$BG zoY*gyS1nQ&c&FXf2o(qeAqq-W`o&rcV(`zIL&L^I$DS$hXK=^;iy!A_4JC$fB4FCd z&jw#3fXO9BmJ7AjC-iohxZ$Y$`A&i(b+O|&F)N7_E`&b1q$uo;}|2yosmunA=+Hf z(RsJ;0z2ml;7&cfThH-3UvI>Ie)vmw_cNCSh9E|*kv%ODvr%ya)v~R%`N{UGyP?_6 zp7jxXpDxkb4(UH3l`Wn@AnL&!u{5QMly1&9S-?(MupdWVpw19Cz26{h;t*_=-;tt1 zglh(60cr+SvfgdtIdb%@^gq^a`rPon`(@U^FBuigK%4?j=M_wZls1%-F$IOohQN)* zlc3p;k1rpUE*^}_^=3|j>N90?EDhetr2RM)eY7CP2oDg(sC2$Wgl*f9voh<&NzqC% zGD@mgV#IgvR=j&UV+sY4E2aogWkoe;n^(-sf@NMYO$&q=A78Hc>kmKTdc7eM6Xr>~ zD8Ae-(74ltIt7Gj#xzCTC?eN{KmOs*c=!B-?|ya0G%w&y3#zL4MpuJFL?hzU6d+(s zb3o2oyb@zXt=X~(1u0pmr$wJ081@IlPO5-{Ktcq~lNw98;&Q#=dfo8+{0{TX3U{~> zm=&M?{->XCIzQp*>5TU;ugG}=;KiAyNSWZb8--Sn4*GLG-h{ruck!HmpcC>ijd-~K zJ@TQB8nldBB(h7q+LopNyx%Odb=9K%9p)=f2#d(W0%}IJveqA zt5!o^f0na0>r{3TAWU(0WrxP8!&ohIzzVJ6&g|3vJ2C zFth4HnNS2st!UDV&aEay1QZSqq+3b5IZ?&S0Eo5Q>l6c~nGq&6xKy^N;}eFgp_!?7 zj*hQSa}s607RRhxGv34^se>Wu40w>x>^__xAM^Mx_d-u%Dl{o&?m2S`+Rw?|o0y=78*LP&A83nsxfj`i^kM zw%2~Coi1olQ>Q=%JOkP1xk1t_hS2Q_|2KBTH^b!zInd$J#)t9d~{YY zv>H*J>7$c4AOS=)pyvb!QW02@G^J=}uR_<9h!omi1B5H6%*awfr9#+i?Ruv9@8KH$ z+~L7M*OC6SUq0%?e>1E7dOoiJD$=$oEX(=j(B;!;V*yr9zXr~6FAP=2P6{kysn_{1lQXQDOcRCH!N`l5hKhI zr9}Mor}y~&haa(?XT-Q-St?S#pynZkjlOY|_AudB-@L<*?|)L*lsPHPjGXnQIWTGn z>W%wG9SyakqOZ=g?ZSq^98q$?>+36?o}Lf_V_qVrEg+QuDHR_+e8B1XNip#x1}zz{ zJ*NB*+v1P~Q6ce(J@pH+AfIx=zIKe)cqEw6gjUD=IW*@>o|UC%KbTo5lxtS!L+Vap zFasf4M`&^-r1zOZ><)-OmN8GY6&`{iCRWX!w`UeBXa*oLqJ)SkWaJ!>az+wC7DldO zU4biZY13%5T`?eZrCTcyKD`${HA1-0rg@s2a9^ZO=+RD3lkYoItCG;@r*!3Mk@J={ zGUu+b`dHC@7nm6S^z|h;)$3RcZ~sPs$XI-RAN3M}WiK8ZinSCdhMGaUJoSJLS8Zq4 zRQ~z?v5K55Q;>&D+Mp;~Ex=$Tj_aV;EG?3)QBtGY&4N5;K&lprcFcXRKiD~rqB*g_ z4KRbl(!2Kg5C}0E452y51yvMQLUGdQq(>TpymavY7!J)G}x zK;_fp(`VMt&nS=E-nNWtbs~&M*Bbgyxbxxr&#zaCM;UdWnj4^aZ0q^pSr8cwPP5lN<>3OKa zOc)R>CbACO><(O9e| zOp`h=Iaj33deWF(kZVOs8)~hX=7@QYK-rMC3#I^^mM5%B07=mvWDGsqAlnf}3={$? z2sveZ|Kof7{!f2FDZqE{&WM2xlg|W}0#r?-%rMe0-1f zbjG*uo zm2tb?ngjM|C=_OVhR+*CO^{*~zWe>KqgfpyEAB{m_js|7 zgc)q~$qL10Fx$Mzu>N9doERm`VZI;C2WS8ou)#L^cUS-lQIoKxB9*%9O|s7tnxU@` zq(5PeFuSSa8M?-rG!*8FcpZplA42SXgNO(*Xw%eM3#3$3(X0pTn%asQSW;1yf%mZ zV2~N+MGNVznBFscZ4VyIhZYdvmbqZL&(@9XNRJdTo8#x1qy2MLqvf=@&KWtC=5$IW zI183L9iP+nTKvu+V?-o_0eKe3gE_fm2pDXf_8R=k`)4PH1kp&f$*F!Le;qgA041-; z7OmHFoocrE46QZ=`cdl|?bO7!;N+C5DzkE~0JHV`sE~A!^Kc#0JS3Z^4!ewp+MPH} zyS4I~lh!#L1mw~j4zXwKY-!$G#GMlTowF8_Ft`E~*@ARhPq7u3r%EO>O})TXhUYJ} zST?Ba;2J+avTjh_%D2J+N>u**|A=GCDFsvG0UH zip_^4h$u7Tx&*9C(1PDU>YrGgxWFmbdtgY+pg#_0MB7q=Kw(|n7#?+oJrLm83x~nh zYCkh#pIjW;E6Yw70h^(C1ic>SH)c}w+IDCD5#nb5o z4vcx7uu8_Z-H=K~PFLg@u&guU^@=GoBvp%I;ggJMUz5f7MMDm5%-RFTr(zCP zOh?p|9W!B$5s}s7br_J=_?TgA5jf`16eSBR#qLqI-0pF^Z6Jtp;(pftaVI4V zY*ZpU;XaP~V83W1Dup6?2lR(y$aEZgG#nlZK*!2Ia)7nx$M|e)3c_HTO<|8$Jx0L? zBUpOFJZT2D7C(;9Q32GHdd!cgJ1Xw9Sk$`I-g`ki|B^Aw=<^c6dJq82u|1PtcbGW1 zd+Kag>pbJM&REwOAzBR()n5BFTt_od&;n%c&enK~)e)1%JAt4(J1(X6PKi-mTO?WxYNVO&Vs#HiG5tfG&f|beuS3|G-s5L$ zMI?iDRRG-OMyu01GSomCMee7DJTMSOsVHK5s`@@sitTLO$YUHn>rrL_yn$u0{ijr& zzMl)S2zX#{dvva3`2Q~%d!{gZ4W|9P%8(_i@=ynMxd2N74VhaDa5Tq{36UqQ3$^{a z@7sjk&yLQ=c+X-G?;@JPfdvJqf9R{`;~z5wK-B;)Y5ULsL5zfGb&l)|fPw~LZ!CvK zsr;$8@+|1GEE@Q;hNgN+F&lyghgUkyvk|Rmqd@O>B~~Y(wUfD|-pmoT*IT-6(g-n3 z_N1#vUjV>hx1j~rLW2uMaA30w^_{hbLFXX)Jwygt=Y;1G?U2zmQ`0dn^MB%^@7Xm#h9)XRaBf- z$TR~KwC>}_54fd_<#fWdOn7;HL3CP9VZyR#Bb@EBVY_KJagE4p!T0(3TfG1Adwl=H zPgtMM_}A&5F)a(Gl#p_Qh@l7IoZDUtGGSR}T&^2(1GbKftA)ZhqX=8p1?#%va=n58 z6Y0*V9;YCcAp3<)Cudf$;Jij8&6V}rT5OBS2D3^k5OPFH8dOl-aXWX_c zkkz8;QD1>FOCCMlzyG+)cs$5N{9{KyNFJ%>5D^IoL|CE`0f-Aiu>vQJ))Q+3B^dU* zR6%O*C`h?LiWoFOqobq1so1mYz#`=?)R+TOo?FcW0hoeP)J3P110fPXF(Q&6aYdjC z76QwLTN2a|n_=ORuwOcyyZx68Zz!mj3OgnLrOnvL==$h$4#-ywm9IbJT#O#BITJ^^ zWuP1Q4Ie%@CcWo?H?|u35BM#HY*K47R@~UjtvFIdv#;6uht#q+l=}RBD*dkeQAE?t zld;+?%Zk%k3-(%vV{$_SyFJ|;NpnCvi$#iq*x#+}jkr76&3-H$Qrq7}cISbFwcdsG0i8Y08xq3KOrP)locGMFIf(>_IqVM3 z5jez0zgu$I?fG5CWysDS*TIcvhik#+=KroStchhHyzrZ?;68A{G!sJPoxJm}u%=q| z-vo$fU>^+6eg|{NL<)p7JWwE-J;-|Yb5(lzS_HKKr4mZEepk67CEZg_1X9HsgqpML z&_KPfKfxa6hk9TsB>|J#WgOJ05(uWKkRJ|&X<|UC_Jwof)iP$mdPG&x`jq;B1O@VG z*>RYxUu?fQ1Lc1z_jy?mn2<}tIx)Wa z=7e|8C#*}v92t?-v4&WNAKpihHOMD~*VijPetd-xgJZzQmyftzld@p>nVC4?yq<8q z2|ivfxTPCHh?r-CIuRi*E4FmS4?p~b=jSK<`q$rLSr*(bSCo=9!pTY&>Qm1_8DW-L zdpBHe*>KHjbi#yBDWzl1rU^M`+_nuI0%Fkbilu^6N+_jh7Nk1tBp`4^&IunsykK1x zy!+-E>w3azy=fXMSEPJH%oEmi#p~r2+m=uoMdJuh`gu0iFB!*(GWmo2g9ph2^T3@8 zhQdr(>6VEF0c`AP+Ic$$v!>wGsrp{IMV{3lNjX`e)7GM+-3{!<^X@SqEgHZKq_V*| zofyEpOqk<@iLFSRyk^1*|46&gS{&bQru^g(jb$@jvHp@tJiu7KdNXN(Zllp9H0VRC z^DydKzjEMv!QARa%m9Y_|Ae-;1bg_wTiFeNwzq%(MX%TYwzF3MfAZdSxp5@P8ay5s z03@Z%tm^*8_Irl4eg6j^7{PielDUZs8M~cyFUOq^_^Z2(qp#tb#P4Ay|4KrzC^Za-+7nKtgTU z9V}hWM+i=6HHU~{954=JwGNR1NfJ*;Z-8lxZ~%!SXu$k%8a&8GTx1bBNj!%xwhU&t90wh zZ~N1{9QzyzBmAgOLT;10RUirKnHh;$lxz*JYtLa;3Hdi0eN@-?f8NpA-H0S~jg-OA zdnacmNhns&=q;buInig5Dck0{ZSKSBrLuKc#6z?KSeQkQtt!uQ!MY}_tA@HI2{8p2 zTu2Rm0F>(7LTP2o$>)@g1h>iP`XIc09NVLgo$NstY2KN5 zOgsun9SJTXAGf)Y?`(er_&{&4dmjjCMw7Rj>$Yeq1<^}*)i{nS8wx^vQH}5p0HGv^}Pv*P<_z)aojF`p|!yxsjqDCy4w}KIHxm-~=BY1}x2*Vgq zc*WoepFSM%>G1)F(M#CXAhhb{c>_CTKJ;J!i8IdUE1sTSkTYW(k09sq?fHyvU!D;d z01Q!+!*QJOXJg7vy$o@ZPxXPk~lOv8xN;eh2j<635LDp=<$4yOl<(}2tMDi%>#^0R~Oa7k1r zb(xpuD!sD~jrY49480O%hvDtvziH)GY291o?j4XDDW#xtFRYtYxpcyqU;8(DN(DMkbpgJ>qZBYW0#Z-t7xGX zR$b5DQMHm6&Hy{H#dhn&ihYUF*TQ=0wRh_>YraE`k+yIi?WaXlpo2b^bkES_YmnAM z5QRj%0)(73CyV8|MQP%u#8oR@*Z)!!;2PnDw~XE)aaZVy20JGqEg*7guHDI1uMqsy}AVPTD8oh^AqLD>`(+gRtF z2RW|{P3>MLBQh9Sv?t9+DDa_$+1VUO-c+e|fmp|1*odT0g(NZG8(USc*TMru^XAxc za>T;umAhacUe@X}nbOH1B+1?2x&iB2fYPuVkpUOU-6|-66YsL~0dO9}K*%ig!eB{; zy{;=#(ppNn)KDHH$f%)*J|G0D9u8(8T4C2T@&< zw)XcSsUp++ogkk#0ATxI^POtsC>t8@?jrIzB~>UT%Xq|tJt~AC<-7E9DCFzg{GFHg# zibWkSlOavEzNU;*{k9a+Bm&^YH@4=4Qh>BVC}o30*YsiCjy#Y7W=Tx1`FJAPU?TAc zX^J-nKts!B(35v|`=u7fk|-Dgb$ZR-6ro;LJ8?R~h;}>87c}{;G`^$;AJeyja~NX8 zNc0o?vG6PB1vtK(&sfrm!|{OQ;fQ5Pn3yroE6y)hIAqC@A_({xqWr|5 z#Y|B_rajlP&L}0pd51VS97YF+jO*nYpB^WCI!-u6k1;g%+-lf#dKKppheItd@E?Eq zcRYW8fe!V?g5phXKEF8Y2K*FI&xPzS!SeF&wp1$Mb z(*xq4K1%j|T2WF)D}KXuF$OQRV!1HF?2tKE%U|a0f;Q{OFh+oY>-CD97g(5scR}J7 zS*%AXC0s98@H8OAh^Xb^zdt`?8YYZ?`V$6CT2HB9%?UXtl#(%OLVGET)N!cz!QE82 zv^Nbb-AB2NmBy6HUcvfd+kNz1Za?OYyVjJ^dv5LSFbpL(+&%eCT_b<8Am%@L_FHqvntu7hvHTts|H3hdny>|KeKOj zyt)G(Ko_rkJBn3#EI^DhsYP=aDh(|Y%njdeOPkMvt#>Uv3R1YAoOmo9HC=gTt;w>zuo2cFDkt*b@ zVLfbTr6jA9&5zBXG)IK8kpsQb$rW_if$h2D$PayI#I3_iR0?Ph73luI3>S z97#Y|FH>tRfSc`R0a!!>>0|YH! zK|WUs*WlX#29-}!$PAghPwhO ze4!p$G9}idFpx58o>ejBx^{)D%wiI1^;8HcOCIc!7NM74R#D_NR4S?Wc4mi_y+#-` zif03OrEaEP!RW_^Hyf(jzQ~}2_t|>9#2w$I`@%`HihT(w6kDst) z#>>+S&ex3VS`acT(*zd~9Gqhu4+mW58JFd%f>$g{%vppF4M=&x%gY6S`}=P=o+f;J zd_Z1T-1*PID?q&u2`-Gd_I$gkhL)m{!d5Dy%5XSk^@nMyCPGn#8KuFFsu$ zgj@ISy;dy*+%I)od~bjE&Fk*NYWB=oJ22FHK7O|8%!y(|t$0-kPOKH)!bun|$iIc= zO*UPPCoV<6-rCr2YSJ&%GC-)nW1IPSd+_cgW&aB$YoAL}g^uBTf%gSrO$bDYPOMOp z6c4zf3ZM6IAz-SZ84kRz;AO1^l-h0UF?v@j-B=&rG5#$Crvo6a)%DolhsZ+DNP(bO z&V^M>?`swwy@n~@c0cVc7!J-!ygGLRp7!G{SA|Ma{mJSz)UmO@_kFz+4z?XHv#{G2 zl#Phn;jpf$uBTEYiCc9pRpzj{FGH-S*m(e$p@aqm6vmnhmPN=j#6u^FRMFyyW^$=CHy*v@6o@?>)d16jmCR&e8WMHPq) zRT*ypl))0ZRh3jVNKk#ZyTyHoPpbR_3baTRR0bh4$(T6n9|Wwi^vOF>YN;sbCK(sf z>k#=W&-Y%S`rPQDsLoO;Xhq+)V^m-2d12syh`{Q#EMmdgzGoo1^d7Djmc)*>`w0T3 zWxh-%Gy_HNk!bxvE4HRqVqsS@n9A@W!iR{W*7h}JTxU^wq;Of6Sk7%cfJRkr#s{Os z?uioWV7X2bv6P7{%o3K%Tyx_jJWD;SU8v6QRZ{nDLpu$pa&IehxXLmOaMJUT;lc38 z*&!n%r>w=8wK_HD8jdH*xsV!|p(H2Cdreu0TGmDKBy(xi(76<;W5irZG&WXv>7?wD z9}GwXs&9R!={3K%SvWQo3D5GSAh?3!rKAEuNo%}#Vg-aqSYxLmJr&f#==!0F*ZSjS0w9GA=XnMLn8pd=<#L5PjHtZ&m$%ETCi9xO{uU!Bm$OYM&YdCBmkF!>%8Fm^9z3Y_z`g!F^(ga zv?7DCEDMguBZeU&1g}Y2`$c9@g5J5{cj%IVckJmmE=M;^M>mHs|FH~R)j+4AIX<|J zHJ8*2Vv3KMNmCZ^n3AyFi15_HTOI3Ajcd0qZ>#~Ehkxbo_xjupW?|aWa0utH0+_*w z-Xnwr9|U~F7!ae>A|X$RLyOPOxqy?#pgZb{G#LoF?bbe2&Ro5(wdQhemcd%b!vVB4 zmFRcueNyrTFAA-Ov>4@aOvSP?&vOM9vZA{GsQa6N6W#$1>LEy5U0DWMEY+DoMUsEBht?`;A9s!rA>pE4!FeN# zu$;B6Cy~_nBuT}#?)yG$^K@jZ2#_MBki9`4-FVvA3H55bR?$$|8e+((p?8~n9s_lS zZ;CngOSS@C1^RtawZ2)QKyv1BbqGF)mqv3{iiXs#t1wK8hkKQMd52}J4<+_a6OYk* zKDe#Vt{?3JGAE9OlDxPA?;MJ=>(Y%3`lFh2PC}~ev3670tjMzW3Xz885WpSD1b>$z6sZC*MAjQMZq7mvoii(SanW&8pkn~Wj6!|?Hcw-q_ciIfT@$ym z^v(d>y!W=`uhAMNjtq!7hA#>_gi2F9BFyk6`yn7iCD%W^ZDsx+*_ye6i1G2`BZeVh zS)}5*2iGuBV|)b&Yg%!+TydQj#5f{`5nzX$R)`>Xs4QgO!!zK5$8?wwqOxCQfU}Sd zH0DrUHziBXb%2Jb1IOb5A0JPc#)v@5YUWyPufmL+BwTJ728`2$>yq*H=?PEYpYizN z6BvZ=&(C-{U$HK$xo{Dlp3fLUz@$9gLy!qSjswm^!1a1njg;YOfQMKy#$iBOMl8#W z&%b@c$Hzx}d^{n>h}kWe*MyR@2nj8ALnI*;L{cBbXbZg~Et(~HC~p=87m;V6j3(TQa`h6qM(amg%JWxxO_IuWRAu+xoN=v~)O-2n5*6>AQP{#*}rjKN@|W z1Bz?a3J0^kD!+aa<}q6W7J6ez=VH;dyp!!ZtcCG*@=XFvs9Nfs2VB;C2PZ@T&P(0{ zIfA!+2L@44>$+Uespr#@?+sikEfCCe)IwI8G$S^`1Il*oS?!FtzQu(Bkl2i_3RSxiuQ=Wfdhkm7>HA zdd@qiVX#g@)ATGw&W+(oS4dW}5$RUT1!7C)T(7mh87b zvzI}AfTQg@3z#F8{a8TA1z1uB2+Bewq&uP$Xac{k^~}IpicnQs4pk-U5d)ITN*JI- z9JZEBW^(ucx@GMAvo%QFXYP*8cE8zK(~m<>XmGdf_H5rRkge=?&fCVvbLP%5LncL>qr*Izzi zoi7SBQKL?V7!b9VTFx2Q>wwjg z{D?cf6k*+B2nF5wU>U~=hjGMwU27bhoh{pisaBi9u&U^< zFHqoZOTqiwy_nPOz3vHB>R;SQ*&6^-(YWT>DD|@MEbMj|vMTzhG4*DV5sOZ?q&KAi z6{Sd`GD>#TlAuN?AdT^@YoJ+Enqh!h-uTH-NY~oSD^ftp{XLCDpOY zUg-omrGjJvv}$>8m?*W+y^<_V@?~@_X;@9pQf*$?uo!EGP=Bh@+5<@|RA|+g3l|l) zU0j;=zrQnYUpwkRv=dd)J28Ak1@xqjKh|?6<+71TRwU9fH`!t1Xi25yQ1dj18mOqs zsj~kSu5>pQfS5v$rnInzAz898pnIpNoKUT1dkt* z=uTKNkwe~H=Qh9N-py`XFNaK_tX|o!iwyORYHm#71-S%pVKvN&H%I(J&dN4s24eI>dVOE{^X zTm8x96s+#2Y~vApwOiEIkB3cD#EEE?^Xq!C=G?a5^3^ z1c&Q+Rquk-)`-Dl9Hjy|vm}>auPaIcybAymo}XXf-BpT4YS?>9k{{#>+fE*)6k7U`m6C)Ta2rBT8Xo`sz5${`-b4Xu+&TwG8e2VVO>|m zFvvs=BT_QU&24jz&|Bc%>jn8|x8LVBG|Q`k4rN2xTihGGr)m|;If<~X0+p+WN)mr* z13Vbi+^{=IlEJ&spLg{fJ^w$5YL#hb@w_w!3kHZ$QbuqEi3#3ic$W|qco9jc4oKsQ zO*yZi^3{%X;G+}$-QURKnrRz)W%Jd(-@@Lk!nxD6Z3%nAWbt;M#d}+0AQDTf6o;fb zJ(ln&LcX;6EY-65{`JPHB)k1YvxQ`-;*Az+WuYB)eW3x3c1YU+-G|5s;K+#^6=haB zXjLAVY&{adJE*4jvh`F1Ey(__R#|!90OGCp3p%E(&IQ?9pay_kcdw$tomv;_0I{w` z0~7)V+7#qcIshV9$m(u4WzxZDoL#rK1;7cL91xi5TD;eaCfdp<6H-ljwHhs=*e4Bd zODSQQSFxV22{|S4Iu!*QRnaXqe76!nbfTgq_HM^UtfBx>22rW=;2aAHfR1&fP!WJ< zfRv121Ew5H7)TKiqmEx)b8frl$S~m+EZ1?Wit(15r0RmTHKw0;w+;AclSGIgH;WAp9z_Q=j%YZf*Kprd( zSBhkPeUmPy`__42oCZ98ctBcL5Q&k|k$A>q^cV&qR+_IfF6WCB>;{3zOPPx6th#cl z%xJcMqk82#o0^dCbp6V;+P#FA#uzY-5h#q)VZg)bfWxQ&1VXZ4g`=I4vv_LJ8*57V z{N*ct`}`GgoWMj_Q^w_T#pOCfWybUK8DGCX;o;$=8TkX&B&e#P?q&rEL6Fw6tVmfBn$5!M z*dc@{?%rjsU?avsUpEB~IvFNdoi{p5Q3g^Xtn-3z-=A*hpjS1tgdCQ;DcIi4NXD^+1fx|DrGNPc8AD~6|2T*Q};e~I+$Hi0Wpb?U$Z0* zI>+!-uuy`7fG=5Ol3Y*T0eLH!WJ#MCl;1xBALhxkp7!30{@INBHF!z-_r`XmVI&4V zS|P&wBDf0EAFRN@HK_$!Iy-wZ)Pnapc&I$Lo#z16B*i)%U8@*3cWa&v(GX zerS8A1}`;C!TtyUY@kEd>#}AABL-?vkD|_KC9i8&O>r{|aRC__fKgP3pz|V@+HPe7 z(6A%T{~)D|w!^spY<~#KaCC4=_mic-TM@oGPsJ2Os`Dm{f6Xx9N%Mo)=AIh>sWMgK z^k0j#=8b2jDK|#dEC595jaNzdzg{)xW-Z8Rt>H#(ZlM6IlULN{rp~hVdqby67G5Uc z&1GYXW8s=71q+Tei5Xf0Lp>GUOc9gh%s9a7Z`Qdd-M?mtGG(aS@wLBAk>?7K{_$-K zejBx`)9@vglIGz+3u`kEhk^L|0kr2Z&qcQ)@ATpJZ};eM+eT~W%G*sQ(h-4O+k1{3 zd~p)8wEMah@n)z*33YnBmcwD!CQQ?a=cjLK0G69N?-63aFh+RiRL@!^yox-SGp?5zm&*(y@y4ZWVQG32 zcE(QAHOyc_AP`F~mqopVKsby80t%*S#N+9J(==j=0l~9`FlVLeaZn>EAqEd0B2vzH zInVg^^n_(uaC-bG=SY!ABr!&b4`!MseERT+FvJOIS@87z8OL$NVLHGQ5TnCk3|J2X zUWNf_U6GSg>g9qG7~|j+Wkp;sGroLz!sEjuKK=TLaXiR(^neFYSy;TrH^&$-4pCk7 zVtvo0Ag7EF4S+0QW{l&Aafm48fVAXlagxatcM{jrK(6o9QUXVqmj%x+FPNqYAx5c7 z5~En+Qik&dgBB56mQ`F#u*0vjD%8bm3;5@}Ap13a^DVR9|5WQkS(F{AJ7A2qv)xgD zNL_=3P5yv)BwYEboUoprgtmYS3YI0;d=IgRZV$i0PVMA!D3`9MHR-f_)TVTCH!!^U z3}LV2qYK_{*V!ob=)<9eK9WrsYq$v4ng)R5teXo0Zk_P~b_YT|$nAf8)2|!PhbjT~ z4Wj1pusJiTybyv|77V!P90dg@mkutg26ZD$`+jyGXR8dJHqq%J`W9z*cLQWB$rM)5B6Yr{HPk06sBXn>FuSbVkC~+0wn2h2k7n%~+p2_CB6qPVtv$Hn9WLf&Vq_Al zZ~rs|EBxs;Mg{X4Nqvz7Ch?9i%e)k3Nw}|;Y8Katv&AYOW}O%FgmCvvZEkg(J6UMs zuzC0!rOZ4uUZtD^dWJv!Diz&%q#pmO5VP(g^?obsob0)lJlz}N>fG3Rb`UnNUH{5P z>%2YVZXSjAtB>mwn%t>|M{4r4fg@A`-8QmK34Cue4~5|+1Ij%v=C*|E7>hwQZ$4bQ zYt{sOdo+@@r|M%{L-pA^V9+NVzSbC(y>tJ>w*a$y?+%GT-)N8Ous;j$T$t|7{kHGC z|C5ekAplhB*~zwWZ#s6nfavD!&X8FDPED?Kh7bF_y~f+9q|;z--_b%%3?alh9*-D@ zL5nb3O(5V1BBg@!yz2SF zl0~1T=-@B}EOWr+y5g^Y{|#Y?`0()o!*s;5%wSH{mFuV}W`M&u4ocO!2t!E-SXZH= zBIgl&L`qWa@pL%ClgCV}Sl_k6CYX`3Sl5^U?}LIH;Z1hVVZOZJ`Q;g>#|IoWi+_j; zHj2~)@j9Xvl=NCed2HhbO?301?|tMXzOob4?lZvUXN8Y-9XEdRJEkwkdh`2`#sABg{7k zu!4kkU5o9$6VKV@rS;=ilfYm?NCkO!&s;+VR)zbwc#*f(2sE&GpU+- zYoTH`(P?ueBf)Hna$8S*Xhfa5%=Lh>`^m7{*!K65J7wx$Bo-}Y%u zXfBML3f3iy$2BEUinE3a>cy%7L+@uvdd6ZUs+`Pehz{f6F%FXVaXcI_O_SundD&0q z;V`wM=I!WS3BJ+X+H?1fu(KTN8765Qw&zUoHY0%UwXy|mfE`Ba53I^BApjuftM`~7 zAk?9O%gy`%4K!#A-pd}VVR~CNS`IF!7Wo2@7qCcxByM#V2LarHDANM;c_h@ci$?E( zw;6e61Z+0f>eXv(qswqT+fYl;Si*u6Y&6i%d5I zM9~D$3djw!)p^>T5!}`Vq09MDfy%U%WmdcskdUs-0tRv^5^`3-RGZd~!b!~|R~T{* z8+fYVM#AJO;Ro#Aq-Tbc5J3_NRwtf-<}`u0U-N+Se>#ezf5A(=V?Nyf)D!?wVWV(ZUB|>OpKSg9typ;k1dJ68{a#< zef>d9kGP}zSEI-7S!)fYH`?C&Qe_0&bs5;{DBEeI-S0cCmmw;%n3RTH31_eaO1F^_ zyM9A2*1f+UhiXi&Q$}xg(>`BGLF|Cs3?kPdF=o4A9Pn^DAvlMpFJD0ph%v%Jn52e5 zjO1&|`1buf&gToD9@w%d$#3VBjVh~^eX#SRZEeNP`q?80ALiApU zsM3NDrwNaT3CCd&=1}Sk+O-}Q18}OpIeG8L3fKqj%#kTGJEx*|EN zw}i!=&rI+kQ~|$giTXi9tJe8~loDQ^Ul2o->ZCDZ7zQbjR~S?zc6b<~)E~$h&78Jf z*u8<#7V<4<=t3z}F_PH#wxS7dijbdbH=Y3)7H2v1?|im-K73V>jE+;)N)e(g6scA{ z+Uk11*f^R_qxbE-9;hGadm-!*b_$}7)9wLo_CFVfQBNA+s^7gcm`uHQLVh-}pgS33 zULF?iQWdzj-@9#g==I>EW3ICQTrd3EbtQRLl^%Vc6qwY?=iZbV>h3{GyQkqm2F?@= zyr&8H5M`YUt76NW_jiMee$*uNsi=Z2bsU`c+wYVH)>+pA)kCZ6hgxiYE>wc6Vpqc- zG_N2haZe|$%3KU&nL^8L)!L2$%i5(P3Ruz1<(vTzv7&RaP{l?L0KyN}NY`TpeEk^q z2N#2P40yfwl`F^qx*7_)1zs)m$vrH!exQ5*cEd4l=YMbGv*cO{0TZgnl8YudW?@<> zS!x`Vf>cS_oHT_-Z{s10XfeMVcj3$_P;_a0h}{SynPoT#LCcrN9*jq5_%G zzS&&cUnOMCD4=OS549`0m(9b}?eBj?W3(KTp3=N|<>eVj>7pG0!3QCN5r)&I;dR&C zQ_`-i8k7`X8zJ0F^00F8&p~qRa+=+l81RSSP z3QeXJ0K8yR4=Msj(!dwuI?<=ip-m$l>K>#@8=T7^f3{ z`S=Sm6TW``4S)UZ?>L`lENKNZFpd)*9}f8R;e^v+K**(F97i1a5l`P%eEa?#A3r@} z96Z7}U;yEPJW?iHt}8BSuF8YRJ{gE{oUpDda#`{6a>mQc3mzX&h~tDMB~VFKK_wE4 zlP8B^a475*fJi4}48o$N$uVh_B7-F-jB&!c<{ImqONLm*9inqJ&KryjvV)ixL>AzWg@q`~w|^%^Gk=V{ zY>KYj6sQ)o-Mw4&<-Tr&pkt^1GwY67#MV@_>S{vLIP+@p*@J$V6^M+uRDlflAGl+I z*}iUYb$9J9?8-7c%td?qVO6j;KtxCEjd^P>aNUBm4Vl?~PS{!Onhkuos)w}jFQy+N#!Bp@x>w9|^ZiqKp6db?6Z}-QTA> ziN#ss*|XM%D5)T?T5n}pu_$e8Dp_Q75x;EF=QiL1bbGt^Y(VF@vY>V2RJQw>v{;ybw`zm1(&l{H7}(a8UaEGz~OKJ7eoD(Ba$#9+!2ag!U7;`!ua6C;IhX@}S#I}#ss3CU}Z+Vo0r0bgT zx8J_vx+EO@gv12r#e*Yx*ef^}kaHMDj}ISD`0(k7Y48Z^vLKMhbU47fU-0z(E53aF zisNBKj8-K&;^9bmp02o@XRK+3a|O-?c$eXv2a$*O5jkhPJYVo_{Eo+89x?aJ&Q@20v;BKI*}NooimLcVZP|ENc=*8!l33%(<=iV`IH-KNSzD)wXXpip-6rmEIoP zj^?u9{vlIA*&9IQ#{EyU>GzMOYkwe91qM@5ZQhTf2lek@b|Yh>v1fBP%Cm%J6!r9U;1x~iTOWG%pdU9?)Y<;c zV5p~q6@cW%r*8@M)$(7hSByrAAhD}vY!Nz9u4|~)LLp-iquN^wHz6Y3>#y{Bzt;QB zVq;Ti|0Qq{YcCh`peV~6bK7^DzrQw)4hFa+RY<*=Byq@nB1I1bWzP2d)TnpW>!j4X zRu#McUD?1=mro!eG<}LyGoWnl!j`kM)%p9iY|1DK)uMidEo@CgIVxBMvXdF>N2jnF zwP2xqEw9GBOjc{f!Y*59ZvL@pVsBD!!0R8o+p21_0j*mA!Z}yAE4Dq^DNqv9nAJ#u z3dGtyaKi$*PfF(A5Tn<2An{6rw??Et;FxYmqa?>9hW$8{svYh97e@d9fOJbyUee9pes41%f~+|Pwe}1B^g7rZrmU3?g#5VeAC5L}k)s=Z zK79Cul!52-6-)~**F{57g>Lw8JmSy4{(@hB`GC`L zL=1$m%nP^x$HRmVA3nl+!uk6XzI^?LX&j{E%4dH%PPkqte1Ewj7m{Lwm=WV30}}?g zoUxXK?=NS3`Ryw{e)<3a2q7qiE&x^qO<9TCf% zz%0Ncmx5)LWN05eT<{oTL=1zLu#ckHg@8DEtZTw$K4Ta>Vtm99BG#}Vr&a1bFk_4n z?(u}{vS3+OY(Ym$G-%4?&+V?B-qON|dM}6w{M!(emV@%P)!sq`+V^e+fQN+YSO`hf z1ujlDi&IzTAWh(v;%yQ(l^K;Std7i1Ce+)O#yYWafQX*=X4{6ybkDQ>(A)37qvc)a z_Z6rmZXGDjwY-YL6~I}|hdQqsCf4^!3Ve}L90MS(j8~G}EbKhNc{nF~riE6ivf%ud zM%5WWRF8pUts!@BV4Jn~rlRt09Q#lhW>(7I&eFVtxTcWwR?r(ms(OJ;=`~ZCc>;PM zbX!%v-LXG3ZVyY@j}{ls$~2Q!sn*IxJgPlaty$OPf(q_}R{{WGhRb<@g@=X+97d1B zG+>$zm<}VxF$x=qcbok!P4=@L%J!b?l-gnxqG)VR0v@(&xz>y*O|j;(o5!2^=FPZ~ z11iHATWFuFpqMOSxtJYW0Rnf_tZv`0(n8;~&WRLbW2{Mw*X4{QW$}h&15a(8=FB&8 ztG|nlNxzQUb9Q5Gl<)P~S?}XfibqQ7C13`Sz&o#8DOn6zq*T!bppL26JE6g5W;liz zzelOxf5bd+=2-&oL%wdGu_=&t%_dt7h-|44?zEjq ze~%LWuCs8n$q4=jL-Jas`h%jFlJbypq z<@q8xA#qTn9@l$aE^|Gf%qz4esRu%kGd`6BB48Ls#1H_I8hh)yz&l_%3~&w@qr+*E z+>8(yfJ7NNWH1L56l6|d2TX?v$KwNXCVcz$gui|MikItxkH3tV*Mz6vK5LXg#`o{f zZ~zX|fKQJP_~pYR9!?XEN2xOygadeiBPqRpI393*{*Loi5X|Gl5$n2Qi~&h$!{#+( zUc{ANRzfb8V8e&1?9rxaWe2x z?u3TieMqW@YT@y?^;L{rWtkwf zGfouwdzHz7E%?Y)>F5>l+o#VdZC*PUHis%f9Z{w{-`OYI=lkJts|r`^-|e+5EKbIw zP^ngOr_{wJjkE4ET9^QKW>HwHWN4R*=*0@L3Fo@p3(}TroJP5uP?c>sg z9643`9l^PRQlxk&NW!(G+I*XPll7$n2lLvYf)4?td(9WSoFTKgvgGGwdscbv;3}|% zQX2<0;|FyAC!q<$kJnE>n$@apG6hNS{4Z@bCT~dFA3Lq#o;g^Mq%2@iIB2|bwwNw2VCa`prrEhR-x;? zA8#rF-C`}+D!dyI0#w<23q1a~=7>SglK5OzE{q|o0S^sbQR`zaLTg%ZYh~XaEA6k< zI>-DkD}XG2!`kn0&HiYfOyM(cfrTQOH=`ESGRwPj-Dl7~yLrF2wwu*;>nXdZz#I6m zJS+E(IO(lLve9eXy@*H{q`Qz#MYAt)MV zMfO8rr(P@Q8|qgnKbSl3gVbSBrZMN+9QZB_j?5%#nNK9f1hKZ4H`fR&0_nlWejR4r zPt}47G-Z}=eFJ)7wHPZ1AW=M!^pLbEzGF@`f z9I|aQMm=l$v2tXd#@6=F$^eQU_V-s-s~=+T7gff6?*VeIS~59r5_5BT`; z0q65Ga#|1~VH~C2hlKq{0E}f3irrEeQVj4weHDy&xqixRODh5Im4I#-9sgT~?f5&X^7dVYgDtY{>htEa0Tp~$=o!7^fW8M}wy&)#YJt7u zf!TO{0Brewx%+Vd7m(k2N_rPG>^am*)U!h6T%^`VX3Yfw#4?|{a4u^1-yN9SRurfk z@KEsRslIL=g?m6$V`tTBd%vkwd22E1$d zCEGIQ8Y|C0tt(|@A|A3ITr~iyA&RJa5;kMH10e4aeME%dJPKu**IElj&Jz=_%==)@ z?lyPi5p`=qD2GL`vp#=;rg7Zv^=Y`3y<2R`a~^Z#v6h_!yb$a)smx&QM( zNCRB5_1bEdQ&?oeG!8f(4WZ%Zi+Y#L2`u zk;qf;J+c#z$6**-UatdkUO_G(z$x{7M#&jNh?s^!!c5bO!)d}{8c@=TI7r}@-? zfk(-+4t_w&gqQOfpT9hT;qj+Ge?m&Yb-7|3M=*=Q^m2Yehz=h=KH$?Y4>%kLsja4A zpv#;P#=#>k0tljyC|vOL?HfLRcmxsf{PF@K#yAd$W5l5_PRokxykJdNa0VD2*JVKg zOZ|^BU|j-oN_c*n@#V_}zyA7&5U-$Fig(!o>r860SVqPWkjD`zUBI~@gs7bTV!iQ1 zxLnTgo)AKWRA=mprl}&5auOw4tv=p)T;>HMOZW(L!R2znc^WZJ1L6?DB}=YDVZZ~! zFkp-WE?DHStb0n_zH+!5fG{xedIE2+wA-b6g~e2_W)(7J>*h+hq5SMVQ>{InmCHj8 zfm&RPq5Ss+C9l<`Czd%VV@=Uo3>yEfWd&Q@`fe+dT|=J4ye42)s$1p#-ww*%v1%8^ zs|QrOhJ_I5>pM9)eMrf@>y@2FmOwW-90CSpu3HFIA2Y42Nm>BNT&o5oNt4m7nlhMG zdF^sM0zgrEY;jwx??Dr6L-m4?f)s)9vK02PuqhA*2F0rdiG6oc#~`sO>IL8oE)u%f zl$zJ9JJ$bRNpV`QVmqczpMgP=7o^v*HDlKCY|58f;%`BCQ?^V2vD}DkKUoT8=EPW5 zVFgK9t?)L(q|c>mv^NIS0+Grv7Nh0(06qv)-ZV{8fN&J|wpo?UkkH0MLoMvtAxku? zM$GmH;r-PC=eo9<;x814a@b{^AC*J>jP!~Ex)-Gew{<0>^jf%QYmLRL_KUWbX7eOljVjjq5Lr0h#Z#qg=AKjgZk+$V?fz8d z#{T)=Xm`U2yMLEz;IjQ?yZZHh{1NT`nt$8SWZ_gb6!Y#B?ysTeLDBW0R+CzmfnL{} zes5z_)M1{}TO-@6xh6y%1;_U9fanX3ND56_j-N55wsPYj5`FjKbU+*&uJakCtWv}_ zI?Y%17~_bNGnVU&by-vvL~tRbb%myxph_b$Z=-lQVho^CFbtB@k#d%(q7X&P%NZp} z`0nxHfXCAWUS~X<4*2x(0jFs|1Q3gQfVlvUF)uS55r!$kxd`t^T;>)3^_MUB``^F8 z`4K~$0LsYu1((YThC`YcfEmZr0l)m|5g$Jta6Cp#lho+TnQ*x-2*+{6m9I!iTsXmd zq;`LuLFM z_t+|y`<+4+D$84@s9GgwtG_@%bssW|H>_jGa{EKCNEa5i4;j3P2QEtY>U!X8V76?2 zw%@-!%-Rd6W|*;OQ?hS+v)zFlN>^o>dW8&a-S_P#Cj=1CqRMSg^5C8C=&r+^ z5Jv5{8pqf*<)6DDH^7mzg83w8pRui_oD_he0!*i_7d9i6N%AZW>&(C<;MeR zc(V1g1DbENo3N|;)}b{@pZo9dbuDi3sPdND=@DaIT5(1KXYT$%3v-+BH|q45QI=d z&ri#3?$7ZU9ERxOsQ?~0jsqUX0mpF!pNtbnt=$v91YUzJ5nIOa~AH7c7!90f{?%xm>^i zro#xVfV*B1Vgz}QVH$8)6E5?DdCgeXtXcAeB`xq;gp7FwJHqF$-*9@E@aJDYVw?`j z3#_qfDwxEg%kV4=G~+mcc}2+uA%u>7MM76dC*P6q5i?87@v_cXQ`#KD3~0=8qcVw6 z!e6fQEWFEZfYo4F*Q{059gYtlFin$GQO*s5yt#O*g=DXwzi)HN!PD5Res*KF`1;nL zc9^IzCqdB?^9_JCw`|Fa8)a?PS_ z^DnP0NgTVJ4uTJ=90dm-M1c)K3JZdu)y>g<6l^qa8_>bkuD^Z18~DfuKJFZdcS8i; zTyt~-K7M!**?qduk{*26zc(R66^VBWc-Jihy19r#p9_TgHA(G{l(Ld1uvnz2JzH;8 zuKKisasla775|))7LYW7>=AsByZ}=u4GB=YR(ptUrS!1xUF#=%o$m{@!B_M-HbG$pSf(F37bL#Yvgz(-Y>b>8Dhf+V5-F5cQvT1ib znftw}=W7K4x8b4cg;GFeAzek+y4;49Cce}p`Fu8D_s>1mTGo=l5R-fDd4KL4y9FP8 zKa5J-yjwD6tou;_#l^lV)v+R!s zAq_<6ZGYmj6;A);u(w?=&ovGMPRA2s@K{%=p*IXsB8mp9A0?r$*LlYEdd0dFtPA71 zNOA5gmMDWf8zD$UwRrY0j!0Qj7Y6S&Z@T43dtyvOL?D23!f70FoJL7dCm@y7Rr6ggaHsk$g(8HDG{6@>^cPFPaGyktc(5r@Nw!@S~UULgLV zQV$&>)S@aVgv;fEZ(pDA;qd`64v2BUoGxmWu*A8hzy;?p4kK~`u4xrz%{gRKnpvpC zOgSefVM7}_${VYeQmqmLNn&0HACOYUvMdPRgCNPK&dI@&az@H4#&N(hu2{~O&GLM6 zA=1w+_}lN_f)II*_W+=Fa6pg3jfLK5PzW0)7E{zVKb=Lo9t0#TB=_j}D)_K|{xRCt z|5&SI2B5e4Hp^gVDTH}Ky7!$aw5Gh8RaQZW$)Yxy8!=DmGyN7|s&p3?f?_f!FF zz(*;?OzUqx_j~d|einS}ug$!z`*9ESG$(=nW1}#Tva&TRuuw)z{zp;}A|W_jzh|-I z+6h$SY+bWZAVPMcAeVwb45ATZ_;}wn zouO!SiXu>zJ*y+JxvZJB@7)$g2P+D!q?zms(P^z3tyfbSsd_`n?g?&Kpdd=;=BhnV zGJuJeXkNI0vJgGF0=NXgl`0v<;=jL##8ek{O3)!|v{qnN;a++$-+4;zZpE&T%v_U_ zEfKvXl6PmAK7RW9J~*DtW&LRx6W5AS?&ui7{-Wch!PgbjECb9(-7eu zp_C+%Ng=@E@fnhaFpMMOIAUE3e*63dU%oy8Lp1Fhz``$Mad@lg(n~yoYxq01p(8^Ya;BpPq31e8m6tZ@*%i4p^53Ij^YFno#o+ zJb6U#FouHU1O>%{S=|aa*)iLD)2Nn>WQ{Ymvjdl=S{!x(Pejzuxsi zjDFT<9c~3UR=_N2%yP9Ry;@s9rfhEkt(NRwLAaxs-o#-3i{q09j8Vs}9j=-%-ZWT+ zj1Cr~=>SO43i_$9)ch|!1{@bvQ&W29g_if?S0(_P7xgcqdOJS0*lNzF?&+WSX zVb^0xzBUUtgS$jz$>kYhz!+n7ojdPfE_$=Nq60eME|diXE>zb$Gpf>FzhC+!^a5yt zuIx#*^cK{)ZNsKuqAK36TM;d+r-EXjDx0tEMf32qkhNRcXJcDR2B%W{V6|9eESklR zixp`UkZuWgDB!s9ob;Y)=v?kc&_=P8 z0#B|2m(9Rw7}mWow;LJJ?{_r_Yyg6drBY89HJ&&!I%2llb=&XuT=)@aXSOUH{i)-s zbyqyWvjaO;ieg`ZN>dqsA4u4Kr>UR&OEsDI_Ce?1lt%phGWkbFpWpLbB0jnkhPQLO z@7vE9o?h!)^F(hwB0uW??V8!ITg|P>r5ar~eaGwb#gO$SwxrvBh}S}>#wE2=Dc2#! z1@D0IaKd3402XGeQi|jZ#zD@IK-IW)ez{=Lis#l$CYicveXb#q%oO@JOdyc#uM_uh#mqEMvucGV!U{yi>BY@42l6UO2HMd z$Fj`0E(<j+wQez(Gg#^RgqBxAuDsS`@IJtMQDjoe2tfyS7_hEqO+a$65Tn;N zVY9l}l5W2()M4MYho5Q)rQd9(KD^QP8xf85rG5k&mSUj;|1W!Qwk64pWC>ouqN--@ z5sb{L?&&#a{{O$`VY!{DhzI~RGhc!XGP9~0JWda{MHPxgAj_329a~W0 zW{mU#=_N(;()g&J!ppv1?&W`B1Ed<2L{$hi2pZK;tF<;WZLTCLB8G*~wJI)}jCIEH zImEMJSY@w9OYotCcPWG+gQz==ai^A@;Ge9(%n|EuH&^B>>Wq0Z_Ymzb% zEHU(IopefZ50~hhDJfzksYB|9>}^gL`(8H(x8~(yO$aFlclBZR-%B}zVf#&zlRW?w zIa3Hh2m_qgx*l2>RC9UcK4s@9XrvFWD)?C|jyF8}#h9qEzzopNJ8$bn`$HlZ0Dww{ zgF~i#VV`tX>wbiQond%Gyk6`rWAFN&wwH3vsF7!~T2=k~d%17M1UU!CymwOZZ2%Ns zthY;k8`9)_J!s-`{0oChIL)>^z?+3HaKz^#{icJpn?Laom4c6XU4r#!^CP% z+4&*P1ID(kbv=3?Fij&4(}Yz@HiYN+^-u_dBVCCt+412 z!iaGkLF^EsSWD;gj8C6F<9Ih>oJMThf~BmjxK*8BITXAHhBzQ)#=NeGF@jkVODn#< zSMrGjN<|JuUo&p`CdB2JQIZK0OUAaLT8ZAv@1zH5P8*^R7~_CBEZEAvDBG>}J>k25 zMk}fq3EggsgyhobGhdW0o#*N*^6_7!wL*9HcT*nrP^+c@Q1`k-EbIVAWz`^pk^;y9 zYVTkF(C+(va@$LDleH38^Oj#9wAJ|Nl;Daze_`(T(Yu!o5rb(N`%ii0K(xAjQwR47pFP)G48^)W)B{T-QGsVak9I)U z`BeFlvM>os!oSH9t5wmAEh!FZf0rtcuNAeI4G_5fH!O+UNHv17Y$+A5BQFu{A!3Lg z!8;7WsYAi0FxPxQt8DMk*0S}vhpGY%)<>$4w94?jTMKPY4A*A)VO+8C)e!8O7>R%W znR1yE^GB6f!1lH zg)sO(?ayz_O5I*GBKc49OkT}D;}R)~-J6vxpt{5mqQ~)gz;u|vC~8Gt=kp1GhYu0KM+A6)5^`CQQmcZieyFcK=Pe>b*yB;%y>0q+Iaz+XLQz zyTji<+~asSqU3^g*{&v*6e}D8A%=oAB@hVHIN_8};#JHEQcP>_+YjD@2U#^`6NSvU z-jx^ThN=C}~uWVvmw*3~$r5PX+^=Qzm zQW^>8OTxG}KK`o-Zd3KIjSH%WDG_M_L;19`Z#pwT8sn8gO($)z) zs|J#4dws=K<%WFm0R zS|>p~L0O}fX-i03mgwbXa823ktPDXRgk68G3=&u#L}SMDA!y>cI44}?TPLNJd>Qt1 zb9_i)V-Q7}8MT5ns-oT*Db-L>!U(pS9bM0N?mCOQ*j(N2M|B>sdDMYEUTSOK6gSlP zbEh1_)pf(LIzqYoX$D9!islO2DngU2d40QayBND-uJ_va>oM;g19iRn7SXVOTa?3| zo%;)bswbb&4pPetGUh0mi>%bQ@@zS0sX0;-UBD%4 z4ZQ)wC^^v10c(;7!sF8umUYEA93-#MIY~o^Kukhxv2B}6`r9Ws7cjh&z7vc_nZZc# z9ys2Q__u%mfcyIa;}kU*K*$-G*A1V)KH!&MKjGWM3Cs@8M}+e-BbS0W6gVFMl7b`T z2y32^0l*Q+6CNHP5PX57f_FG#7zZ3e03QhHn2?tZ=ktnnUIFs(euSr0yg(48|8zd% z^QX^ve}BL$+8@CF(3ngRjIG5z~e>Wemdr?;X;*3I>lE-it!Pkc4)pi$!P` z#o&lBh{3A-<2kEp!j|Y=u&x_?2pEP@lT1Cb7H~;vLx@3=d$!aV!)y)K#o2_&Eu5$w z-zEy}n6r;OD+8+>e8Z+>R4lmqHkedI&uPshpW=bYto_*UthyzC)z0IALFTq`_i zxg@@;uzlshrtVi~7~Sfwd6;W$2m568I$sza5U;Ueq;JXEsu1q=0KM!jMUY;6-;~we zgFSQB*K28h3^9W=k(W*BRYN+PN4>}%HLR{}F7Yz=EKepg^dNCg>gI@BUr8|3cnq!; zdm{S|RfE0lyPHq(%Q3OYz2h=G(?bLl*Ya+R?bmCG#sKqU_`A7Xn3mdR?bIf-B}HN5q$ zW`@6a4uP7X;T3WqG`(6mJWH)3pvhk5qBVSiP_!$3uSF?jOdN>h5EdGGb?RiLey)Tb zolJEE$GjU`yPx#iAz8hY&Y0URkA7(|;j&&!Z$|Ir#qJaBN5)*7D1}H8w=W|vx;AR7 z@PVBc#S!C z(PIcwC#htCUJ+-QR$2;c&hrFv9_RUluiqXqFDratJUl$$JTGwY*p?0RJcBu72p;$M zBmVZccZiWN#j%#+SX0KQFW>OXFQ4$`%M;cWU<7~yLMjD$U9e_J^d81hlyC~TA4j;7 zG3Nw!8B`dLkB^An<8V0QFh;Syq5~fi^1Pv}1*a`zTYZpeqr&)j0mdF~z-nYaP$y!p*uZZ*~`IXpdSK{v|}sfw!9 zl?e*rXv&X8KA@ghO-%Q~d_`oq%XmWos;XXH(Ds~Rq5aU!miuey>(U3k7CA6Vg#qrze{U?7qEyjY@|LXdpkTZAO1Cf> zsbj+ttVm#p zbguM~5S_1LEJm8Q(blDduo|`e6s{Rys`e3UE`TOTGb*KWRZTT7&?wJ$`6?aZNR5Cf zZrc=0a=BY8gv>&MZf=`mRJRNedBo_EiWrNMjEbf#?iZCDz?L_0V;?x6=JPmXp6enX z!8tjLiW#Z<@n&YYL~yQDrZ=Hp4nn;gJYtlMOum4;2ek5e*5bD;ma@Pm%}G-vg5x>^ z#Jrkf2uZ{)tASPxkZKsAD!Xks_A(+|7>-x_*bGuw^i9UPKxC5`OE+X=e;qqdsL6^; z2(IOOnKK0`fr_jdsShIGCpCr(7>$$J4C9`#Wp8}I<``b0MaVTG==8f;uf;{pXHty3 z(m%6fEsQA+8Ht>TENpwV8SecGYY@AFV#P#t_sEjG9K%;&ZM_GhI*l&cmH4<&kaz2X zuU?bCUsLM$tBO zHCDR>Ai*KQI|h7n28oOly}<`5LU38%*so*qvC6Xx@R^J&%`x&Y3=wr&XC;cyu6?ta1$3FA0m94E=e%#6p= zil6`S2|xex36GB}*5p9m!I4Mss$A#13MHL$7<74|<_2dqoavGeD})w_;J;4*shl**)(qQ~Bk#SE`FtfbRQw<8lT2*K5?R zH0o%n?4FS`Bc*Ev{#HNs&$U-Fm<5~boG7bY7~c6^?nm@NL(F^)S@{!Ng-Swmc3?@8 z1RP~3e+L9cV>T&j#%*A45oNj`}F6jj)=MjnJ{ z#f-LQz&mJ_<)V%SdlWtN87)bdh_2Z92x?$D1{I-UEf(3U`6x1WqzTEEXJcO2k}@&? z=Zi#eSkH#UNTDDW528U~#sMKno&k(bo}4^qfg0E(bwr9P!PM`uYo$sIXOY#-b=&W^ z=hoZrJ5936bk;rZy4;jotygYu86VGe%_4?ofRvgLPuF$#@Rle*b2c;xQ+EZHADK(9 zeU@m9$U3FNE0TX8wtP<<^H;W8F1+8@enifWUFSy_AHVPBP*=r2V=lUGbdKe^3wz4= zQpEXCfN>n~?%kc%jayJkg7<`RjEF-cxkxGD@$m`gvlIaLE&%LtUKThKr)6Q$VYh8V zW){XpqIQO72H+3}uZ6n@Uw*^S zzkI^O(}Gk8&PTvSqymI-oKSMXvaS*qXTgt!5D*6`AaWQ+P+_bESh5sLdHD8Cj1TYO z0r=o>IE;ADjFlOA-msicD5ccSDwSMKk-pt- zH3DKaKteT8T1|<|A5&D! zJN?}G)Qk`D$o0h|fzJ3a=#|v~xQKMUIDYC;Cb~ZC-_mx*haGCLND0uCvc6q-%1W-0 zorbs1NLyAWw^E%AcF+D9E?;}`d5N^1kx=d$<46brAqMy$idv~k;{Kl39=g}Z=(Q$L zmM$VvBsrg;xfG<)1Gc=Q?-xzg<6OOa>NQqEk6cPq+Ph~fS_OQbS1fC1fRtSGA_Rs- zC<-;P06#&%z9@~{S1FVAz6=()qD5Fqyo2qgw<92JM~ZP-P>>yvbzdZ{gWOk@gdQJm%mi$cpZUCbLiN;ZbuFNc&qZ02;{vOzf?5p zA^XH&Yz6Y#e>%@v*ZxZwA87i3%Wc2nz%z6d~~dLoc}DK9qO6Q`SlFA^LBqV(xa|@v5|Xg;_vo%|chhLg zZq#qgK`Us+O3`{@eAI$nMWH)-17_8XTIBJWLHxsQXMA)CF z@-Gw9iMg_e)u;%GqUBiXUc9NPEHj6W8HnR`w@OAf`28c7xE#1fftl?AVP+yzTc8in#|{QcP$NUvz>Yp=-o>cCRt z2$$$dn*ZIeIuInA(RrRl9z(+jF%C$$sa*7)kL|9t1-bL}$1^;a7owGV$m6y&?3 z*Xp1**=V_iLmY5-cfc?NoSq(#(hBc@5Cg_>kV1h>*tUf8dBL_NY@0e`a>3Jlf)Aq< z=1x)sd*?TIEcuVBGsGBhJWhyF*KU?%brd1D8HWgPMp{=4VeFipgp!2W6~tPh-16ou z!UFvK%dhzS*F~B(wkjIuboaPPx z`yU_i<>3jdgdyR4Kw-kRCM?^A;Jn9hm{1^|e}Q9V(6(W54k1Pr9KK@HR}gYa!iwcQ z{@4HfcML)C)dbw%O*qX99AylnL&_^s35Z$|*awewVSN1b1;^tN|IhLN#{JzLN){t% zTN8>NJclu0-7*%<5jjp19?oa6!XQG-APG5bpjAASJ|xWN8O)4v8ZnKN4qO6q5)T|i zIV^>-%qviYwhRoc%ZfS7@ZRGv40zhk;F5q6P|}8XcSn@WSaYfv(C09TJ1;DR8rY%} zm4hED#(1v=ni5(%Z=dt+z`f-)w$OfI?p$bGl{fCf)fMXVLfF{~+5KlUJ^*xHwwEJ3 zPT^1jFDp$Uc6Pm2uHUq0M4c1^3}*O z)~&wZeWAhcnWO4yxdFlJy|d3I#b1ucGwZ+qyWNj>#(RUKTwdRpy3EKB4^hemTWJ-~ zQ_9$KmI9=(RB09*K9pBpbEv%c`}d;q>M=-We%nk z%lMEZg=g+&7KXQyHQ><`<`O}||H?#W24gm+veM*GR--T_M(}YrKE_Oy3u*Owv}nkS z%X+`ghzL}~u#Z8E#fsCn&14XunjL6E$siwE{6yhbTxB$aO9nPZAwr>q0FM}=7{iGH zbn|Xg1daM=0)Ht+-;8=Dy5e@xG@+Gs4N`dd)HjqvkpZo8@~!>4Gh8i_MX0E&lCrFS zZ!lE1lY_FhZGGN`Y(6I^S_BzzY*4jloDVGdPhwm*`DXb&)q(Ls>m5aaj46YJxl9d1 zgZ{ycVs#K$znn8zprI|g2(B5I`<}n9v|l(~%s3OZ-t%kRQL_dpXVSrAIckk};{=++LX+g@u z*gD3D!!QcO3JzdG-U?2q1@l?Rm$sZFCpSYKg97J@VcgB7uZ&Vj9m}$AgYy81!Q$9~ z3vfJ)cz2wz%qL8P$A|ZK`03pdLv+}-1$kTG93c)7;~0_h1|q^R9uUV7zx?(q{_*Q? zDC~gKjHmh6n(wrxgf&a_B#_5AdECE0z&S?H%I?dW@N{1A>B|HD_b(sua9Xhy0{Ic} z5h*d&!dP-fUS#dru@W&A#**n7!aJ(O} zl`O^rC#>tDG?orw91)Z%^L$z`pBFecVi*RD(}ZP7*iyq7hZvEIg!f~J2&A<*R5>bH zeTEDvh~m`irI0EBIGxTNeomONn%7wtnFuSc(192SxKScNaR}mJ-BxTQ7HAwi@?35n z4*%R{N{uOWdRUak*ijQ_VP3e~SK3w4{y(iXYU{^ncZmdgWlT-R#zrrr+x~Ft4-H3z z*Zg=r^r;!7*NuoO^yqu2bbn+MJS?MRgI#IFBbQwiul>mP_J+;NO_Un@-Dq2lC?I%` zAw(qzkjTjQ&L_cF`fi}jSuGIB7tw;M+{wcpYwB~p*q;}oJL@h?D!g{r>D>ws(Q4mS%c?N-ToF?2IM#R7EYyQbtn`wTTwbu=+5ztPdFf>pcmVu4TDcncMF-g%O9k$j1LaY*dzfw-=K-w)cChO@SDFml$nwyg6b zPL58=hU}(79l_B{jai&ZB;*X)kwmoWcW*_=F0Vlqa#`8klqoG2AZ5mu3W9IZJ#Ztl zy^@S(1j1|t25|vpMsW_=OZDxH0`d+N2$UU=>^od>*_Ple5s-{hESi;TK4IUC6;AA* zqQnFHdB0^~+}h9AL*2u!%*g46`Qo#E`y+CoMKA=4R&(gg5%?!JlM&VzIqxA3NT-$B z`>Ak$&57Nj4ydmEzw+GuGwyX8YWn@nj1Y1yFZi!(&57IeDTQG{Aeiso-QjSUkaNbe zt^j8#I5-CQ!NUiSta;YwW!dSX-g~Jwo-&A%u1~Mgnr=-9Rz`A@wM2yUMGWec5;z0l z{s`|p)@{c9UBun}gcv1lA#+9m6s6JfTAgGCr?~e#kzMu1kB>_*{jGuq|h~GYb#d$8sh#(&Tcx+iJyKgxom5i{i8^&>j za}L8`A#qL0Vr~^`nbimp3d$im6b4c*cz8Nt7$e??J2>YNfuDJXbCS(Z8&WnZoKI!Z|4AY78DRNV54b2=%HDHr}u8U4kV_h!f&E>XA!d z2=aQbaTodM6~^jB;9L9m_j#vl)Y#VG8}_{^yFv^i(FTw*>oK4i+4^ZoLS}`Tuo)#? zGD)pJ1P3AydL!-l#nYgTL92z6b2CDO@)ihMU@&u2EN%==HLUuf&6$NID<`FKHwQ$3 z_Y6={7$A_%xqhJ=}-D zBET-Mxf`3etpDrQq9Cl!otGS81v_WZ&GxplNf}*{l|Fap6f~n!(GNsck-oppYwPeC z%k6ixAEmT^PqX_Or0Yj2`9IVM$wg{u{iTku3j@C}QrW-|wx?+l<}2@j`8>;cA0>4` zq90Jgql{%)u*|a(Q*6QcGd{F3F281}k#R1!C2IUn@ENr+1An!1S zfa5gbFirZL9ytNan(^~5pYb0*f5egsm^>s2)1wr|wr)b|l*H*0&gU6J9N;wp+d1Gk z9RLQ-=My-~p|O<&E>5i-uh_`~LdKE`9!?9U@qj_8DyK-`Lj?c*15zrO&kMLng109R z3K4D?u`V+{J)96fe#UedaesG&A3Vr2P&N<*5(RQqPD+x^2&@a1EkO<}R|h%gqJ~gF zN(sxdsR7ccRZ~(Aq}8?1LtrGeE}d|?FY5;9qa@cLVLA**sbHDUm<|UV4+C;qu&mM# zkguU^TR^)F$OdsRBSe4GZLgz6XbkehXx!BMPwoFCM#Zw`BGx8=Rw#`bQq)ZQ3aY|b ztZMPN_51I-5Y+d#KB4$+U%*8qf^Z#~c)4A9iwU5r_;9-(6)2$;r* zX^cA84nYg5*|W16m!gkxe+L=W4%(Qq;_WL@fG~HN2inG$;aZ_UDk}IzS!Vz_Y2ATt zO_S9qp%!7==j-f8y6C)C_EP6+jzi{Bjeu@i1Ut`?*J8#`hB%c{Nw8cM2H<>v*9Z?e zk>ym>xZteRvvTeIiSsWv%&1W_5kWL_Y$dAu!nj4x+HzpdPc#)!y|87 zjbJjkih~+?(Y0Old;yd4Hi-4kEE6Q7Ci2{o2b3~ca#;lOcb)u_>M-d3DIbubf9tBh%LFB%ScVVRk<|4T!ECz|!bI*CflgqG~AV3@c z&gmhqT^i4~WwaUE4uB^D+r8~fs!pG_e*64GW>#oaXI!Z>$PoeAY3`R2806*z@B}%Z z`680UO^!6kl@?I6`!i(C7C~MRb#+6u$HkJC(1wev<$7`M*%4gBF}f$wLFle1ruXNs zEBn6-UUwLh+rMw5JSMZ<1#RBGjHjnZWTsl|G^K>Bd1TE95&CK?jB49C=L8hSaT+mABfuLAWy#d1aS5TvDfBhZP@rVLK2np-MBhKd;B^N~RFbxyl-yd*) zyi=JjC237k1I;{@L_!M40wM(;_0-YXrgFN3CU+T z7Z74Z;f!^i@!O{_xPN!V;V=p0X^dFs1SGKrC3VcH5?m^^DWwc55Cx*jcA?^Qb|@1G zF1Le`Lr@twYx28 zZU>C5#*7Cs4!rlZmWNY%U8@@roK({GL{*8G*=A9Qs-oXh3AH|%Lf`*f8c;i@TZIys zg4V=4cV0$$Oncr7#RQ!J($^%R{xInKuovvGeyU>}%?dMiVKZG%{ zK8rFHq-HDQ$ei(_N!oyLso!5ABu~b+U%h=8FNiB8CM!F`9#MU}`bWi(S?(%eG7R zit~Q%SQTQ)@c>Mrv~h0VGm%GHkP<8f{IWTuia1>)2(e!y)IaacmyNCQNb`p9FM{V>mUR-HMdb6pHd4}beN{M*0(8yF7X z9-r`ZKH)Tc9*(%XyTfrhV2BYRjwl@PG(X`VAHU$!rw62s0P+ZNRM_i= zd0B*>-AEh&NkW&+NQx!f#9QkL(?OtFg;!C=vlQh^g%QV$gP%YItgBoctVM>7gB;*P zgpUF5@9wai7d)P4fi(GqVU%KmE_pD3W!>=k^Eccbj(B%}k7+z$8#9-X)94^?XZ`?Ox{2z6&w(u^U=42hbYb7B`WTB6L{8XCZP67Iewu&WV9i7+GnOT{S#kmIs$my<6VtoQv9vx)xkZ z?7z}@wHYMD@?VPsoJJwb4MwKkR`_wPcMiODL2@@_x4x(FY`$S!c1(~qp4Esqr-Y8J zysn0fSh2f%m~q-Kk=CE41N1V=oIBF>zsl0MZgw`+UGPsS!uYn<4EcXXfkHFVWL_Ns zA3pqqTsF+}4CV~y9mZi4YHmfsEbEHX`HW>-!C*LrE5T{O;F`P4C@JYZn&Dhg7v8}K z{qYVUITIWJA0wQ1NZSe*9FE5c$HOGf4Rcr&FZpTYl%>-HMA+61A3uJ^=PwU%A>#hs zPe_^Z28U|CkI>xPtaD}oF{fO$(vJiK!**m#qQ;Xx>Ax16P_VM_8l7o1DNIjtb|ggF4) znh=7=FpA;fon*ZqrV;Pn-JvjJUM0#>a;h~Dyz^jA`1~&rtc23q}lp8C5+RE1Yy}? zmgkmAxRR*8WuXu5+dl)-2~-D9Q&wKtuQhUX(^&cM)_8Xa^-w5D9@oEBS@&vyv}9g@ z6i02Z^~vV#Z!gr&f3Wom<>hlok;&JEbv=m8gZzVy5ADv2d!bj4de)i!x2D63=YS` zh-r)nL~tkqeJup%H7>7Z0zia$-SF+(13rKGiY;f{zyAp#4p`EL$Hzx}{rW`;gB$B> zaJZWW+}}-@rU4;DNX-x6%eNDL{`E7yJ)Mz?=-qG*>y|JtE0!%u)W^}TE=WZ-?vf;=gv{_Dq+BpOoN>In!!Sk!68FcrWI6SA%2Ty(!4}_Z=g_YGP4#bl!c#*dge-m3jv{oTH$xwHhAYSjw7}Vq&4q^*z+uY z-2DK1F;oL^H@|rmM!b+1=4mc{O@ZAsF-|sx0;1+Z(VPdk)K!s&Il98LR91#vB?XsAqMu_09GDThSQn zl%B?*%K9Wx#-u-4^R_Ii(++lfwC4?g3qypuxwqUa5*pPQ17Zvq21!D9UOs0$>~$`1 zDt<}Ks#JmYkpabC9?1u0x4e@9`fz3jQE>vG>m)|_D8&AYF2 zFY`%6K+Z_sNz}|cgyK-x11`d`Bqs-5M{Vx9ydl;DU+$)!*UR}JfL!yAcjMY*7XexT zmNkE&5NdJ6tEd{&_1fw#NH_L>{~sM29aqG7_ z*bF{KFvd8Md!;*z`)iep!_)2hZ+k%JTF-TxBZEP{*0=tw?#HEv&0XmU)SIhqPhO5k zZ-|htuigkD8F+ZsvEOaETe9p@XkY$$eP+C-E4-z>c4pr?i*A3gt$|zpxQY-xb1#{f zw>j4SXzTYpLl8c9cL#jsLk<^L`(Zh#;!UfB=VOhlMFNgw3`gSaiE^t9A znr!%$2l+5F_H?v8@}1=rP8CAviHU^em?0;V24=<1oN^ zFC{SM6`#L6VA&GJ>43xGPKt=c81K^w%9F(9%E}EICb%h)eLMj={IwR)|>nac_((11vkPd8w4+|1&@h=ui z%R=hyDpGoh67Y0d@b&S8=@1d(fWbS&U?u4X%*%pwUXaoTA3Q)3p#V!E!P9BQ$B$ny zjYp7^vhIch7H9#hfRrT>oI~&d%o$)HI4|itj#LTj4{I$N2kdac0IFifECw0DlANUI z*t%_qTf{Jq@F4(hte&r%iqBBImQ!`>fY=r1V(~;b1)Q%q^{@Nhy!%$OCuyF6)aL2)DcbY&2F5<}ak&6w z9;%VmJ+F795f3>;V)XFARmKqSC7j(nru+OY+3TjrKovk`@v^xyB|Vp$Vxd=hcFKOQ zoSV3)Ys_8E+kT9DW9#~!cHi6M)qSRZt~p0osTGKT)Kxj(Qo))sPRokZdBJ(vux<%! zPRM4IX-%Ta@MLS_s{hUCtG_!%h5&F;at_zV#!hQV%+-TiukS-Zh@=Vfp*^48+`Ny4 zGm0t&A}>&tNHgl$UZ~c_Zg+O$Ctv2Gc`yZ1$goI(LW6cRXVJA0V)M;x4uR`=+9}cU z5UT968X~BK1J*;<{2Z9k>pS+gt2_L@wkw)v!{sC?pSVS>`e(k*$+^vD=ZH`Ql~TzP z#Ap^TG}0)R8}eiqUK4~eZOC;m+i_3(Q`+9FZ`lhZ~g5*boqhj zpW{}$<+!vl`J?UD+I*|=VSo3A;dK;*!2Yj*Vl~GDfe~ZC{oMi6VZh_VSCo?A9fUD4 zhI)o?>xTKfAZ-Nll=0=;S1haOwje#}I=m`GQ|gbvT@`vygZ;b8$E=8@_ydz;uwJg2BlF}UP-k<06UckR`TLE=!tpiXQx&~S zj>(JmiUZ`oKr>dVmP~GnTa8+1biiw^j~=Gfdz}qyO+q6gr#VdZ5A%;+I?VbaSVGecwXpuhG5Qy^n58R6$ckHkXcGcc;J$%qMry ztKM*a@jNnPvqrb%Z^73}vbp{E;1QxkTP>mR;^CTivr0^Z1<+2mR7h1;IN3Aw)>gX& zb=~b5j(#dBNW#-MJ}{ zq93;!C0x`Ir3$hsYPXuKFR5AcmUE>#!SwQ$DBCVr|5sZn12Tx`&A=^@!e&XC20?L{Q|#gCgpZ#;Bjt>{`*#R2;_KrR9v)9Pola6l z0KgC<4 G-8YqN~5aWPpk|Hlo@xv`->${|2){?g( zfQp8@9czN3^RQDu3@N3+yzo2-2Xg`gH6MhFKtayVVVWkyK^VMZmSSj<7@KQCdM-=n z;cyU!j@!HLvUu4a*ecyc4F>akK1XkSh{tH}#o1r?Ur6!!QyMFFz9kwHagA(9f_Yak z&>2jlw1jkOo@n#1i>)B@-;5SjJiyOJ5_UbSp5?~Ywts(bd zjgQ}#7qm}?-$gC#6Q(Pw0NzDt#!(H zMQVij_S*+s9uRdqbLS)5-Ma6iLCSKg7JkzU*naA><3a&{;rYJfZ2v<|;^Wp2k^e+` z=xiCwny{Ghu^Drj)NZi|NKrX0hwwjQ}F)PXX-)Kn`h*MA?0{uhcmNYGrH@ z*%fY>Z`5YF9EqFnnv&XeN9I(NVOF+8B2?&_tWDb|FOP*8A>T9G)eyN1f9}TJlx3kR zZtjvl=LG0sfsP#5>Ha2EBfS5tMZb)e*MU%!u@5TN%mH9o^3F+=$zY|Fe@8z%sZteU zCy^@$uJvy&pY1Yo$=&N$)9@Fz=Z%j)>3(R&$M-s8Z#lkNWFF5lV*RK_M4G1~2FN%J z@Xq1%^r&YJsY6=e`-O{^eps+=8){?&K+c=6`!S(pr~#r0<`=bhLI@QZ1fasPh!`aa z(L@Md+3J)$#f#i2dDFlE!%+}jh3pJUIL!-w{rC}2rxQMW_&eUce+MFu?eiC;oH4J9 zFyY1lLmY5C9B@1wFpd#XiKS8ko=!7<{rDOG@yl;G&j}@ra3NIq^s;W4=MAZ3WNruw zB2rc#;Qt2z0RR8wy=j-^Hj*WJ4GZZWOQuvk-S7YZv3K4%-80paa*2%1og@HzAIuB@ z>F%*)rc_lu&U104ltf8B)J21KkU+y=ybnleM#&i#h9OjBUMDSAR+p?E5z3_=Cd^VDeo0BI zqYrRIQkXDh%=3gW>@f^cs>o;cDiCTt4rykYGplJeeAVx7-%eEjU#UNPH)mBg+qHF0 zxi3aX^-glwG(_IU^v5^(_X!F*^V7NCy>y3DMZWA<>+FK4UE)!5q*f2-KVqW z>2ci@95gGRS=_b17RqFOv4eAMF|+w;^R#_uhAFG4$_qmdlw8+AU3I!d_{8W^`#}A3 zQ9s{2Iz1q5z37$aszg^$Pp62jfb#kutoN~8iQMH&uquzW-o!?U7bW!6CErv;((#2R z;!&gB^#t#M5Y$lO9fB9ns?~aFare9$xAlCqx$8lrfvQxrhDHT++k=(#-Sx|??51yn z`&R~_3{WD1L?)Q>Y<0PsHVlyc#;IST$Q)A4TQ|z6}`pGla|LdN+ zeNO-UUcRsU_C1jt)tmPn;A7nn>gGy*W(5ZARQ+)sfbB6NC>S`D?;HEgU@B_|A{q&kp+ zyppB%UkxRdT6$c7V`6;%a))oKi3e0WI?tdz>iqh2_}k>&~%REzaCL;yT?lPAf}eW1v#6n11D2@1s{< zWeIE#apxFJ1jmrvJ!MVJx-K1La+@N+uzE_8>mb#kR~|7{>R8v5dBLi;4X7YWNI?K{ z9;E<2B*~p3k3bBE468juaPW?SVzK^PYSfLqJF|AXT_ow@1Q@Z1h2}t&Qz<^E&nB@d zj>9N)>JVVKECsA#mBzfV7#*e>4ov=4n;f+;O+b|(-N&tXA*x0wMO~Y`WliLA4ni?c zfF}>nPGvX?Rs(5x8C6iQk$fjgrzyi1Tc?oPnN-xy=2DSA&&Bo%!YaRqnNb{X zWX^UTcwHQr*ZpX@Ib5&b_T!v!&qA)uyYgP}ah{#84{4Vfu$(G7Gk$M-&dZe1Q);>X7&7h#PK*IGl^kSNga|#%-D17{3=Pn33JvI z0WnrW$J+FS!jf3-oyQP_1V?h9J;pJDh}CEo5gm&TAAAknPMq;{nDF@c3~~Yg_HX|S z*Voq=hXK<(!F$3y9g)+5xZh)cHRAT>8rQok?8hB;;|{?OSW?Ef$0L6Gbca9gz9GYb zSm4L96r{vB&I_hxMrJl=hk~Qh_9`GbAQEmxK#B0)H+Ent>MX)% z-r~4XtEA6!#@D-heExic|M<6C>_=gaGRoRvNQiR=E5E;RvukKek(jZ=^8t^KkJ#_` z7={7kIM(Fa?ks>K2mlFA+~!#Xo&%7eRfn@NwMc?_&O#sQyM(*d!ee2=ax4dsQ$aJ& zGp1=m4B|fXUc%E#DM(8~j600uD9i_DReblhcxUTQy_Qf8oINH~F7g>=X4`%We)uP8 zCxV7d@pjEaL6#FhAe8F;FbhQcS#DrO3B{t>0SU8UXq92_h4IRhv0aJVwI;i(DQ+w5 z-_n>*6@Lr+xg1Bn26dKLDPc6`E^mqeGuH!@iyl~}jC2P~pYvg#X{=bM;Vh>=LyHS$ z-LqN1NS_O_ebyFTztS2Do&dn$9ikT@9)cJ^B>CKHkvQ$AelNuorwyH{?l%D+V#w&h zM=5y|=CA=DMS#cJ>(k#G)#&oV+jU>HXLXz})iG(s>eltHg{hJf$hdV$)_m^CueY~as}0zS zys;WH1QY^!K(tnTcmoh$nrr+!JNd=a}fhi zUC-Fcn9E~np>w-3}@3FijIsa$6y4N%8^k z#ldF}*8C3V94Iq#PMGIbDLHE>gz(W5f)6JP*g`t8Jxi6vWmyEU?RJu+8Y4nZn2r-t zN*D(5A{~!YU2K3@(>R|CK>fah=3Hxnb6se$Z;jsblx?YRbv)>GE5Sc&+j>6wgUzh@ zPL)p!n{rN)ki{Ayb1wC2yjUkxSrz?xKqy#)kJkW*C}UCuJLO!foO2034I$G`?KJqoBogab1MkycF@bvhyv`-a5My zSg$K-ju^g%nHqTr)TsR#NT5RsQ&cZ;a=kdhdFK+UkV zCQdL4=5{kPVh|BZ5kps7+vi!*-rC+3GQ+HlTnr*%);W8P9q@nJSwMBKdH7Y|sfIdj zx>Xp3ipI}3Bb=;p4Zl|XfD2Us<(zmyG~htfg!O%<8-F8|UjC&46;jR{p%N_28qFvIffkfVMT#RqyVf(3~R-zNu5jStv=4cYr5gzZYgIW9tk= zaKzHL7$r}H2-6~rgwM}MFcEG)e8m3h25BjHem>yo>451t!?D9SM%-NO@yqQ8>~})* zJM0F8A>#2c;mh3v{`hoEolN&e=03n1y=)9J! ztQ;B!;q~BWXb42x=;n$qrJ!__wq{W=EBlwbCw%?-h)50}Zuej&q2&!3b4k6K2M~A6Jy7! zVO>85d@!2QZ`8G~&gE7SC9J^5+A4kFxhLSG^HQw7*1uoh18p7F(5qJA+{hj5aPGr^ zdgb>$tzYZp99(elL6Z0^C(OQD-Un9TqgjsERyYZtYRYF;t5;K=dgb5fR4v~{^X%R* z_U~(^P_^}ZIw)t147~oKC$um|lR7r`46=|mj~IbmBmo#TyfGn_j3rALodG)Dzg^ zy_wNb4Y#%HjF~1EAY6XeHV>OIxFA2aVThOSSTBNkLeB0fHl`}zCo-q3n(FCqmW$@a zF=NL|pJ?}#Np$2;kn4Op=hW+%S7+o~WDL;;pOOYzZ^pd`hJDBu(Du}iH1X;D@J&wk zTpeRGq`Yjq{L{d|#ph@;`k%2Ty$=$BGY&W&4pNXh2F;lcNIAn3NwMxZVOEA$BS;bt zvgVHl0%b8`_qwK~K7`=K)2v=}BftTmkvj!}BsVd5kLZE@I0%UoF9=?|ae*9$DD1LI zHi*aPBR+rqhLj6F+cSaQN~no!7T>H@lTZh5pqbx)Ameqo*y(vC62fc@?o-h1qJ zgVeephsR^WY4kXN51QcY<%TmLzZf|e#%P+qGcXxPxeiln4iJ}lHiva5cE+Pb|4Mb1? zu`H<_BF>7#2|$sm=fYNF0>ciZSqsXP;|$OfhW$=ZCTGNDK_W)U3BgC)TD>>!b&N(qLd+c1+of27@)%7l;MMVGW1sB@^r1Xhg#5`5;h3m zoaO;BLxL68(!5kb-`JhdS>5_KFRGwtEgED>;@TS_R^3WHTMxm`3d7CnZD0LR_8y}e zepMM`TgX>`{YhufHquZ)W4@Bg*D0|uCn@V)R`w3DsJss>J5u+k{p^IwkVvdZPF=!r zh!_T?*p2}{1aNOT+`J#v5GrXoXDm612Sk+??<2TV3JNbeF9ZwYDy9^7A&~-R&Vx1! zd3El5&6w>e+{?I2+k-o{E&I$0JgQeFB{>vxPH{P&uVpJnhaXK%jPA#=IID$J`iJsSN1aTgm^dtI3+Cdwg?0%q-9|M zYE=v*xrB8LAZ&@QaP@qe!J>{Gn?mVy4QuV1j&iwzOJjE94Ce^rIG}|;u_Udcg_ga< zVt6v^bzDw2#~O^3Gi!N0rZ)H3R9Tmom3)TRyqV{;!I!+3)amPX%`glyE;-AXpE(a3 z5SIOPdY*0e4-_(HE7iuv@!lK+TV~t zBmTte-*?VrPALUZ!#0Z=HtZrn-!hUSKWAZ(~{kTA|tFwYs!#~FutK}rQ7MhWAk z0C?HoIcu#t9{?|BeNfK_K|Sjv`O@Yw%Y{Ksu5)k*Udr@1Fvc(lHLfQ(crY>?Yn>xc za6ydqkI&Eee0PtB=L7cp8(iIf0QmvalJRsn;*T$P_;&w@QZlY@Zt(B__BZ_c>n+CU z;D>r$YHTFP>r#>-LBfzop*>an#PKd za=|n$2qc0$#u39Xf}F#W3Lc-1`10_C=feWHAXbNNqY!k#fk2K|3#|~LAmH)&fUoyY z7{>v>+*~1$B<`dTkV3(dG8X5MRrv?RvL$RK5_k9I%U4`oU1JGV8bD4qo>yzN z4+OUeY6OWZ-mFt*p$4HUWLDb&KotyBNSUnqMc)pdl(M+2)S43G0F(u3S+Kw%4x?C9 zgCx-8f|Uhxz$>wXBnZ>-IiHv1*($kwe4XcFFE4L<&YwsMv0VI(n$Qr%O&oUT6s zA>I)jtQLq2i-l5=7w8+QT<<+%5YS-<>RQRQi7nk{ z8`$sPX1ljUaZ2^)QnG||6!Q?v-mewJi-vljTm&C~Rcj#^r{2=_HI^8xga_)4Gr;Ve zgAb6LJYqP%L()TCz~$0~jmiOS-XpeyrfWo9m2z!?G@)ME>tgg8BH4gcUWL9gN=;5@ zkSnt3UuiYuica@#ndXGRC2Rqjat8D?H7Qzt;6Uz6Xte){>Z zx}zGm3KzVl#QwES)*N~1>8tNdD)+fFtiaAd1D-D{?b{HblSdFqSbN_~ZdM!9JKj4# z+qJ)KuZ7Ig1c!P>u0ZuMS}J~y#%m%+!G`$5hia)jM(#BMi2jMh?|&wDR* z=?F-R5N;fgM?4?JlfdjH^v{V-=zKs<3Ckkp51T_~$)9#!^FcR5=DZXSbl#(+1muhm z3DE<=5yt3oz26}^LS7ayGR9p%^bWgWKn$VgIvuA4%aReo2p0w%jx!F&&v<@*#o@O`(uCI0&h6v{bIo&@V@c1lrVnUH8RU5n6k^|JS)-~8n zyng2SX~ws^2V9Q>u7?4;As_}uj0HnsEV-!bO5>%~(&j?|Ifs-Io(@MmACCBS|2ORS zdrZ>==2?{=b$Jn0tF;dvX^|trJ1@zGbxc&igDav>b1N#?Hc)$^zF?ev{`vU{Avlaf zkkGItiC`%O%d%j<+aVWVo+nsY`}Jb~Ijy>i&6;xVn!?qZYlR2T3D))R|1k>C&uS}c zm?nz@<`J;<(yTb%nU|!24~z5mBsi~-u8*zBP0ydTx`04hP%zR`u9ob+}3Zm<3<)?o=F(@-o>%>k*2zQBvG zxGnC;=qzF+hJ@g~kYS7iVsr{L{mM)9QhRl0ROOMY7e~l+TK<)R4>3wBFzF9-`~y6Y z(z9yJxKuQ8+X|Oe_*vnCj3NaiwW2s`UX()#4k>f>{K!6ut7r0Nq6wr)-`dO4_lykyf^A^C_`@K)D)t^fT zmYa8=HMj9FpM6eY^>P&O)(c{f`U{t(qn;V-v@bcg@yQ=bW-Tf4`yVyvMU^yrnMwv3A*8)faN5s#^$X{W-a7Z)ul6T=VMj z8^DLrLpR;5l!D*_uCA`J+wGC_jCr2H%;MPy!VX&;kkf+aqa>y?96*|H=YpPH1#?Q6 zmqi$$R!2G4JVzhY@U4H3L2@|EIOQD+J-j0f!DBz}Rab{n$z}u}5u@m%A&T+-Fim(m z9*aWDG))8T;QamMrW6Yd}Gky6&EodLgmxW;}YT#~UbHY3?nCFD++be{~I36c_ zxqrZMPRJm{aYSZG!!Szy5p?+=KfA5GA6Od> z<=EX=Yr7MqSCH4><9`MS0nWRs#q*)XsYnWz5j9pSOBgpG?5lHI$8vKBZ!KQ(Tm>{_ zH9*!D^TG^&MmAn%u_Dbg$d&fCKbS3H`)4T!EvLm*q28>cYw&T}SECL-Z32>YG)a{= zlB;u(e6v)53<3I?#lT^dem~YsR!Ak<7=n1GTq6kpL-&Um^CI-J>OIdpS(u>inXf1?b{EE}7v?oFASQi{c^2@vED32z0!+YQXb7KA<%A-PK~W=9#cQo3Vwvn~CuutMPype)OaO=3!gQ4BGtvdaKgiW& zROq`2xRm>30(?;_!vdX~&e!k7(4x`-K$iM(CBth}j&ly=9deQAp=p{bX`gsc zB;oC$j3Ib@xVggLK3?PYdWTAEfsb6ItVSwQZ^Tp~5`rfy9W zsY!5$;{lKJBFWYS9G8p)r!l&kZ0HFdQMj`$cDV=|Bo(GB%X5rCCLA;4%fl1)U+yGx ze!5r6#g8UOaTUvYJFTjS+Zk}#*>f+Uo> z06<(*DMOh(l#5+mp+0B`7qkwF_F2FO*Q8TBJc=q{r1v7oVwX$gni%YG-Hr&=xh z@(!l0-*EePyxsNLf9BO|D*dK?<~8eAt7>jvArPF`njJ*73c66RItk}9<|<;aGF@0x znr;9ZV{Zi*^m%~r#yhjZFjw;io1z2;Z@glq0HxM%IrBa&id^O{OsTFLtwX?0R)B-J zguxA9cEMEr_(;|5MwQ3r4Re|(QA2GSHEV%V-D_Q*nmb_0w^ixVp)q94EOgoG&B!dd zA4?KpmI}n!0YWNT0|Z8Exb;SE7l#0v?#Bu^jCOYj0YeD2uw?WeV+a_0kc#%*ELv1` zsh(fg&4YZeHCG_T+j3<-lCdxfTMJXFb6r<~C|3-n-$k2y;%>I|89FmymarobYDHl4 z@VF|gY{r`xPP$Oq0Ik-SUEKjCFqnkiQCJg$5P8_#%8QAN$pvZ2diaXxXVI`a)gJ3J zy>qorlF2FqCSX_B&#HR{2yO4%`$OWTn=n%VvhmA@yf$-e*XH;^V$8; z3{?7FmaNK>!)S`RgtY+_tk7gzhp!z{W%KtwhfR(l9c#;X={%b<#s!V+fU05$n8L|A z@3Kc2a=lZ^^V(dibK}UZ&Pm(91lIG;cCgT6NmQ4|%4`hYAfE|J1GR z!nuB?)E#h|d)K!u5F$04HV>lIZmEU?wlm1VVtjaOgkWrdLz_9$x-4L>BrT-$)_vHO z?J)w?IcaB-{JdQcy!Ga63&)()ygAp*d(qydEl^!M)DpSRdP{0IMb3+E;IZ3}AO}qI z0rPTz3lNs7*lH-HEIGgAEL7@ot0Zd*BGUkkV z7Rud>EbFAQgv#3TmYZcV#=>k1BcvqZONBj_lCVq}WnNH7t*K68YFP)*QxIXq1(^zp zGQFUPaO?$W^_KX6TrwVy3%)%a@bLVE7)Gf+90@Ty4PlZhx>AUX#e1PK?x~1H?aSRA zu5YezbA63r7%)#!V|{k5o5h07;Vq_0V8m2>@2X zm6IH{;~>dP!w^x57B$Q>d>9ZS!AFOZ&j{_4U%lUq!PWJ;7V-d8N$f&oU`ow}?ai0} z^zQbjgpaHww0wWX>SD3u-nkm8;XL5I<#GU4kWO*~nl)Ss*r`D69&LxXDSgh7vhwQB zWWcCF!L6+9ExOi3dAa;PHlLMl_u}g}g{d0TtM;^Ry$aaBBy|DrcHrbrc$6-m)xf#s zd2qjw__U9vkQd0_PNiTexdIM(PwJAI73c7FiN3tX7|r73y%P>~?^$# zGlkz7wI|l&(Z)pQq=q8scc9Ku;dOj@8!I`$Y(5L0g7>PNu>tVyKCtzy*yA z5HAiLwOkj1C}jFvLJKWB9c->^x`t7q9Xj@zR^0rBkP#Lbw9W%Lz1F;u_7^U>Q;<(R zb4~e|-=BJX*ZZYv%%sqd3CM+~;8iMsXH*R*xF{5RL zEI`pasrwN^P(5x2kR%Esy6V}=nXwr0h{}7(9Vg_HS`ppujIUwLkiG>U;A7Yr9Tb!n z3#)P%g2NCA`#{)_9z!5_5->{uLLi}#A0oUGu1w2PQ*H+$S%4cNyhBpnp+bXf4^^CN!$;~w`9GmbNXeFU7>8cId-K5`N=DwUO_ zWewJ2FKZ>kLab%&hr(H6y5V4jky6Q6IDwpp--!fV=y9T~}lF5HOBW zik>8SDNllWV+=xtn+ozgW4pln&1Oksq#*+Ac+KJ<9}vW7@1}@WTx=7TL5jAFy8$UJ zSeAsmNQhSm9{b%6&(FuYz|3O&Gn&OauarJC!PEhgnKrn*qQ*0^?lD-3WX=#Xkw+-O9;N^`iOF^wd~oz zBhdbJ2Dq`&=Sp3qV)+c)N!8D9fn}S+9*n%)RwN3YjD36S0LaBF)Y|O`P7BmIa)1+{ z)%WG{8Za;H3t+mu5R$qoDXzad^RUt-`U8L!scoNb>c#0;y=?)v{15YpSbZQ5kdrfp z#gkVlm~$4o_*}XWw6>}YXc|F@x4aADdd8e8gC==4Z}FWQ(cbHQA3EEG_dfYVssAZW z&%ja&p!qu8pU%&AC+D30%p3VpPCOBAO_#l$mlL_XK9}VqvajUU8d}=tH}UK9dI{kF z?AC*inm>C6e5^b79`NyVnmMH^i|G444GGbdQ2O9KQkszSA~l74kmACSFzA$KO!I^# zC4id2#)ve`$Zald10QCnBS#1^VhF~*DeKAX4<%=Kau|a!!ba~fjsY>adX{MM++B%$$`T4@xy~*)Twe+8?uXlJ?Dsp2F+jY@1z+wT@cZXG z+&>;MFFrIO6H)3D;LwxY~{41(|2m;$6_N8g?}?{J2cI$l9WGom(zzRVqiq8gS7l z*MkFE5reD4})O80BI4F774Sq8drK^5jNG?UvE z)H^9sSnr2Z(g6vT69A!i!3rpEaz`&cw;y}+vi0wg&qYjhtp^`}Dhpz#P)eT*`lR!d zV5zg`+F#MQlQ(>+ioyC5+{+IZxwC z5Q!KQl}n2XT26%b@Xj|~TF)Lc-NfLv-Uw?^RWF89?^M+=gW7-)1{9Xs8c$Ep`117* zOb+9Ij}QkS7sMfA90y_KN*U1s*SirPuCH)&b%ot-k1;wN4-+1rpYi+GJAD53h~u0A z3h++DNmX7gIf?PDw0e5ZH2~5(AZlo~QNHUM3F8=%lMJLCJyJ2$3kNZZcr{oCSr)ts zQlm>;O{fTB?}VnSG4m1_9&^fgcskwU4LyE|AGP|Dq=bFM{3AVP`EJ4O%9kcpx zZ4hDcuq?u8UK0E?VW0Nc?*>Fo1WGC#TrP0VV>gb->Ch=pr%LL3o5f!@D?MF2;QKh{ zljr_Lvf_Jy*ES)vx-m)SC0e}fiq1iPd3Jt^Ynh&z3e70MyjmzEh^d~8*<7x<( zJl9okVb@!9J8?D61l~|RQMCy)ZW1d zhZqPUb_{3ToExCC17&s3V%`{IhvK&PET2mzNzn3C0*tv*&H==~BKt%FBj2fM4 zA8e>dEw#)qhh4s)s+vcTc!_M6r(!He~4_gNgkl5U=Wy?W?Gtslr`*YQlQecg9JQp!KR)-4CvF+jVbs4|Enz zp|EtO^+P{dL5vMye|?8Ws>ky+cRF)D>ev156l{QC1)g#d3!nWg%AD*a0Uwr6(vptr zGJhk8weULUpkY^1jI`#0P^;_E4kj~9^yAsTM^h*zghKqk_KuZ?*4V8&Fmj?CtCqfs zUGAPL7O__jYHG@xdG*ZtWR`ooroD~MO_Ai=lQ0}rHemV@6nv1z$?N@kDON$|`sLQj zf2UA3z=did`?1y;S60966#vcuP{+nOStpLVeZ6Kyv+%3_)%v;Cw^N2XR?kOgDCoA7 zeRe$)KQ4O(>`Ff8+uIs^Sbqypt+7yxr;r$M$$Hn1&#At9 zVb&;|a~)ww@+M>44T#YL$S65!j)a6`QMZBTdBHqQNLrZMQK)4oD6|?cWY2{D+hmlJ z%I6@_ZBdE_{|aGJ^n~CT(Gj8p#zDfa#0u04W$j-a0C*gy8DH=2ad-cSad(C5+YfLq z;`wmI;W*>*=^4v(1m}cZ^!W9|4Su=3!u8b-aToxS+J9f}pYdP6eZlAZ0}cx#v65s! zBAjadv8?;k$bGcRd76`kwR<@4!ATO-(XDf9K{l#Rn)6amlw{))3v-DHS|X30&i0bf zDJ)95XRi}3!iE79ayX_1`7z?Z7-CXX#(oYN){n zl(b-)W*iO&3{e!xVTi~nBc-DAMW3(5mRw%F!vQ=CucsPg7< zK~rp3f6+K)0~QjB=hQM8YMzi8GS1as$Tb(r_M(OnYFu>R*IpTV_5E~zygN7Jjm-c6 zQKOk(%?k`PQJc6bueg(It(hlw^_-2zuM*%@7OGQlE%!GNBJ;t_fI~I<{b;LRVTcjZ z46J;{koD?)RAGP1$F^%=9e!m%;X7XF((^1y9;mT@Z@InYDQsH$!}nQgdyFrtN9)x6nPa7cyUrqIWoj%2b7qg!5pxZ z1z+!<@bUH$SGyf{g9oC9V2uT(5SFDNC9MOH6C47DVUz+_IpKIbU^fO_?RSVVVh#&( zHo>n8#6w(by8%l|)z$61xYVmvxg=!mBPtvcXiG$Ki3LvTJA*;J_`4|_%FP> z48RJ6Y+DaL3KvkJ3O>#jMBKEkg4sX2Sp~(4Du6#D=Tw!poJ;fWF!vv-cOZIA5z%?@ zeZ0QRx8H&gwR+dEIlI4(-$@N1tjAyl44E|?wf$SXsB)dr@H+qV0Z8w?GRrK`K(cfF z44}OOiLz&2khNFQ=B-)r+Lw7-WDiOF@B8Ae+KnzP=8oPJh_}csdU^8wip4$O`5h5q?G$tqLafyVJ-9 zGTkJ<{*>wkArjr`O#sA!xMeI=g9pr_Z4?DRFDIw2m3QEUzDj%ZdcVCO>AzHCkzGW< zem^2cuOXEgM2uk!h(oNyS4zQTH9b_f1R!N2N?DDO84{}E?OZclf~o9F7yNZ*H->x&}Ef2EI}-YbE#KJoY<} zUq9U9AEkLe(0M<){aT}@#L7`4 zD1Z*QL(Y?sImssS9z_$Ym;`Bgr@Yx3f>m5m?*RecpA33)ArcB_P$_^P&_aF)L>?3b zFg_g?eE#}?57*Zi;x)W;h{0osQekmCM$AhFrwqv8>p3`b5}#+vQ?Y6xt%=@7Lc3V47yv;Ov!=KSSUMbz2r*)Jb&YWx zah!y6IpqYO7mVW|?z=i7O}Vj!`?5q+>kVU*46F;7%*AcSt4m7msnrTVjgdb0iF18< zPkU?V%%(?Yp1KjNl@r&lRpmo}UFmeIGU_Umjd?H5^mb*T6GhD8e<5dv`E`4Xdj~*j z_qeaqvYB5KKAShLUJ5d>VV)E;&&N5%gRS5F%33Rz?YZ_lTT^!EpzV1+qu|s&uN9GV zR!>`@W;ZkMK<>OAC3kE=+(28s$;H+Kl{Q_bAd{B&37({itak`bzDKW0C)vE2f=Kni z?OR(4rkbxQc4f?fHZ6qSUf=3Nt|!23%B!tfU!7I}bB!~tug&yeX$JOXVMfi<%Jmhv zn@cO!XVxhZ0XAc3@|!Goz<>$C@)$0)9=!BQ_lZa3tjya2r&GoD)Vt9c$C%sRSGg7p zXxRK$T4t|#Vbr~w@}hAAcoJ%*JCv3AiWw;28c|?xRP_z4*`Vq=0^}U# z?z7b0U^7BJyVl7to+$d8T!hUBOrP$*ivFW|2kWS}xjdads$*oIu=$holdGO_(zwOA zJ1>=!p`g&lNUbXN?aY1J8faIxybt?Usu;7EtOJ84wJtYWo!<7#^|R%l=g!myV4ByF z--7Sf&*F71&*>$tKivPWo74W<698hM(cWcqe+IOWq{)ugyMMAw&t20SfZwA#ygqG^ zHCfc#>9N}laz5n@O8_5Yln~}DxeLoIg#iJQ2pW~XNm-?~hULER&P();0lcK3#k`8l zS(vFvLI;B<41>ooNNqdEQcJ{9%e5HB5g>=dG~w>y9?!=EhH;0hn-53@m>!=o&kLRp z2Q141B49TT`1Qjz{`SiUTQU>myCj7tu`5V4IPDq)M7$8TrPM~>gl7b8? zE@-k7faaOVnUsN+c-{dsGhPuGVP0lA9HSG#+3PYE@W~|BNs=mu&N&2faB%>1Fl~Vd zqm&9~smIT;-ZaVQiq=Y($$2YmT@kNp^Mbrlhu!{8l?FWB!#%yZJd04R)QSrB6Y zIfqnor9z8wK!_1uy&TLWN#bhh^WI^%A0>`$nsuQ$4aH(bX~O8xSW_T}m2Otcw-9SI zP|DKk2|y}iJ|7Rl*fs9poJSl+_-V$nELfHWx3^b#c2pHBM^*?)ErMsNrEe|Vg~W2H znii~RmmMzk0Fx)wJ=xs?M6@c{);4S}uwQQ5z^+qa_Il{oYl1~|qOSJ@SiF7{tZN0W z7qVUx&ZWR0)y?8bsF%eaK>*esDFYBT!~`wsq!xJ#lVGyA>jGk-ezbd%dR9!K&c%ub zw!`S0_a-Z=iB%!)1-cx-dYGOA6M%NuRRtB@WxWE_-q&g2G`0cO90uw=%P0M;>&;?+ zJK-URQh@~k7BHAIQfUPgbCEhi*@RoQ9Cu+aoY$T0-)FrnP$x~np|Oz-A&NUaN*LrA zM~tITAR4uz(Jj}|qs?L73TLr~4pGzAO!3#g!vUmZ7&c!-PM4|5!J@I{z0zqn>IV_+ zJ!CC3uYnChVKnwWy9HT@iG-7~u4ynT&8WSiQP;Zq4AfmA>(>EVvD}##tw_9bA=2bX zQ~+n580XQ76|&_QHjsO%HO1qcX1mqE*K5PrYOHyuwK}?7pdrXw@WE?%g94HuUd@Wr zIxz1@XuLC<)shr^2s6%Nw7YD8t&jUQdDWC$1N^4wQUTaW89V}k`}I=6>j(Vu@fNo?*YM6`F2KXn zg#Yh9f5YED-Qh47WH`7WG~=@W98yYH7RzrlT`IyD;g%(V;3S!zOxACwnG8|@AgKdV z3Z&|#({RBe$Ym9Z#F`tS18UZZ(T&aZat8gSBmjb()$(v^rFUR>OmoKl{S$7lM~v|n zaR|t{Ab7?wNMcaVjFJi#rMwMZ>vb?NFBy+d&+tBA97dGFQW<@{w&ma%0_NmEiy%&N zUInGFl@tbCVHDCTl|En?2Y`Wjo+Vxn!e4LJ7&%JBs-CkHAe^Qd^E3$sS_l}&v5t?h zEeLfmD~kNS5Uft%RK-Gslz2w;pf^Q&bNg~CcmBt;T0Cr%OzO@1p=)>T4m|yS&N_6S z=}M9`x2+eVeYnoLuRt%-*%{Be0uZPrUDw)MmdSfPP12{2==+U*}pv@La!js&)Qtts56=^Lz4T9xL6C z4R~}#S}SO?Fo!>>b=F>>Ka{(-;-(N-h`{WuFyhu($QaASVYG zGsBez`g@ww(yga+3hKE%**9aitNkKZE8bhaSEbUe@HU01Iac!J5`y6R5* z>gcRY7X6_Ix~6tFFmvKLTl%JVW&|YwRN!2J4?>m_ zgU1j&ydzontjwFvBSfJE=0Z45Gd_R$g2Qpb&CN$RA28>Fr>6s+4+rT7WduhUqsPtF zh!3|{7s}SAvH@PbYD4vHdr0!esssRKtN3WuWbsI}P5j>R_`Yzr_kxcGuHJO-((*mB z(wA;4i2tZ!)6W+XiD$H0xIKaove#B!KBJXi=>OK8EC8$3FUi~v@JsjH7Lxokw`x_e zR5d!Ml%3>cbReWRjx^(w8ZfviZGCQq#5;R9E^=_{8R3W;D0Kue`*;1L^%yhOLiv(W zpl@3MVnzpm)W}Bb8b7a0JVW+GD#EN-6nvB-;a67UV>9Bl-^4A#zbU!vv1e`oUSg*m z$jXs_rv<*=-{@AYci$l!Y@HI6QOKXtRR3A68jFg^22+~fq9Aiulk8&a6mcT+Ry5jg zI|Ec>0ap9MK*)MD?1?{0t$en-8xXSgf;2|H`Zx1fp1ao>!)G;Ib~5gJ&yx4F)45zn zPn?_UbN#Y)Dwki9>$10oaln2*B1DI2&L|~G!NCyVeUvMUom)v0#+$CYEW+AIPKy6o z-Ma0V8T~Fn3i#!c0TS>MoJa6t#B^Tt)Nzy?;t&J8lUzaqssSx%@xtRY;r{*s4^Ibx z9GnkWGUITVaR2y>`8a{V7)Ou&e!%U1z|D;^#||Udc}z>jmv2w_DUd${ZGw8e|^u!R5^8zjzg;d?a$e2>W-az6;iGo82y+kDb<$gP`)914y8-Y%1i7E}M3UlJIWS3E#M+2#d5h^u9xk z7=lAto)Nu6^n|Nj#BPZ2kag%F1W3Z_I)I$VaZb3qf54}&Us1sL_19lf;4mL%JU$(9 zI2^z!Ywlyj&D9RSe7M5x^&U|}t8yXSKTY`0zyFTE|M3OnBuAwZi?c9-N%iy%lT|n5 zMVTwj2$hvee>+FL;nrq1e7wDdbI&+T2@FCmKyU-5WMM|EidU;TYeKV>0u*^@4Dl>e zf)%Vv=0f#OiyA6g^*|=Z<6*`hpT1&@5x@TO0WmN@k|fGGBkgzKG6P8#!?Yy$fiT1X z2g1|CBODX{w%ruW>va&mL+VNhXSns*c?X8>69XTm9R_C)qV4Z;LMmn@KiyB;LP}iN1)=uqA^@?t z?bq39dXJU&;^{&u@G&YiASpdy2Pk3SUtguZOQ@l6_P(cJ2dZeRb>BI)b;ufHG>;53 zR%2R$a0h0%oF3h*opKuG=k+kNxm^{*+@dP|{9EdY)IP9;1FaMaM+h3zZ>`XB<__)A z=N{L98#GK-657o(Q}cpc^%4(%JB6w;cmtsE?FS>Jtloy0J0(}@IiG@EG zZW-8MSlu}>oaIWIkV^(ivL3;^fIGRYwPDvgchBCUd~!PUfa|LfA8&W~ z<-;ByKkRXHwZ}N@KrZ0!{t5s4w?FXP=R4d#JOkvlf*z1^!Llq!SqiIK833mg=3ro1 zW=zurBEmQhnrcwgsH8xTy7>b{hB`njLj~b#w-ce3pRgQeaWNrCu0@Q(0ATLqZn7D& zIjA_0*Ft{Y*PM@j;Ruq-HU8RMNg|BIsx&Pb506KDdw9at^%aKbK%9hbEP9M_z#OCs zdoGg9isDho)kAt&796JuQHzZc04j_^fM=;L8N5Roq}X9voC^8gT_$n3Mt_!6-h@fQB=N z-Pv35+70}p>uc+YshHt~W~2>bB?fY0VZKV1|6$}0rXaumW_^`%c}JO7VIf1M->nCh zYo0F;vBpOq5iPzJ1fy`c^g7!ElewSM_V3laUGQd{n}?T;4?o`6LT6qXURy6>(aI}8 zCsWh_?ayp~MTp0x)(kn4pMru)Z{1x5fLAPI1uf(b0Elhpzil0#HU}zN`JAHDg#6W$~1)BOWTvJ=(rm$-hfkvD46aW)=b?AA}jT zr~x`9QNq<&Qc6Kek~0l5@b2NZMk7|XDg_%vSa}gz(c{*FD;Q(M7=z@N7&#Fnyq{cv z3lSiPIVC(FCrl|}-0!iwx<)F%k_)CqLPUu?;1YrZ#u%_42khd2=!9x`P804P9`XC9 zJKQ}!W9BTOXC%gQNxWZ>3s%LC#gOYOIiJ0XbIo{fO^P7DG~XjZ|)C@8tAOtLoBv6YPNWg-J12fe^ES33(tg5hwB@U;Qa6BAvb#;Yd7<2(jZ3RG> z5n_nK9Hs~SshnF^r#{Ck#ibx?OfoaS%v}1<-sp^#6E9mpWRthr4qmI;SB~ml0KKgX zC!=hNSOY-Kq+*_vQt}By2-Fg26pL?}8zu$k*Z`1@F|F_GwzDhNxpN@q4b3cowNEGa zFsmCXjVoxQbf^nEWl?fU;ks{f$w;+mpKLr?|Dph0YAwXmgrZsxHMYO6EDC*2Rqr*uM7*;FAvg@9!x%kckj=wds5SV27=m~~m~c2g zq?bAMo^?wMZfmov^o#;|}|)5&QiP<1oNGk7X&idwj&-fBS^b-|q4F z{EXrpoRgiJO2(2FWEk74_NB|Gg@Pl|TU*Fl&S|xu#OY>hbG1uAR1l_Vf+yh|zP;Lk z+=5vXosUa`cMv-eEYzqL5As&1N+(Derle=4OgQ8so`q>n`1bIG z>#Gshw|m4Wn|lZzLm-UNVTvRSDwYhy43J0g4qOVJo}Ms_gCrBV0HOt1TpE&~NWx+? zL_$dsX%_Bwr;Hb7RS=hvtN*UQ^MsNMyz?jutlC(}mxV->QZP-E#1@XDzHdT-m9STO z*ab)>Zw}vnBbo4UXN5*Zp8p(1jvs2~>DacHt)xYjOS66(>yd&t)0LSk1?i78Q*c*} z-P!qD34QtWi(10#>}2&Qf3=@2`OhBtusbG1%~H2%+9~RC7hodhdMfpcE|dD4VX0rAAi}NHMxC0 z%;nUAcdoUV_}7!S<*G}aS>F>tOWfbyq2DWXs>*NY?Yx}mtl4M&D)Z^85ojCw)Idy| z&h-j-)9P92hUccfh1xi+&E>kQ-m*XX>o2v-^1~F^!lf2x1)$Ve?HsD>ps>`tkw0)t%8rLEWf;Awo0{lP}WRqTadl&h{^VP=28RSm%Si=g2hh_R_nfPL(1L< zBKi*S&xM_u8of0R>eW=zQ67Ug%qz8;~K#UR|Y{7jhkR+!hj$i<3Uex-@hIV7qKP={_akm^_qe&a!frpp$B4{?r{jd*KHcHBPhT*nj5v;13Q&s3 z+hq|tac1!b6*W0{AB4n(IsgLHv%tn&*sxX>9rWnKC{Boh@bGl_|Ji%bHA!+@OYi`s znYl+qW>wGKdH<)I**o1OGBcFB8ANKvADhxKJexQ7W!$O8a3(})-d=P}?k zO_&St`~5S@Tu{=**s@T%Y4vtq>A-CTnwa!xQ!Ct^3D@;K(1ZOipZt1OivoS}%pSV**50&u! z$=}KgYJ6%zxl*j|MJ}2h#g(%<7q%}X!*`Plkwk(R*=Vjat^>n^wCcaUFoZT<0L_C{ z3kdp%s9vUmtkrNL1`Sa?;%42?rS;jfPxa?PHYbrRrUd8|5sz!MxpRCt0L!vi(Dv_Y zgTPS(6d5F%2u2@nS-VEwfRu@icY!iac*>1&r+j#1=M2W zh>WgTKW$uW@)LLUOw1PtBX)$fq~MeV%ORuvkq3hdfFx;-V==YrLj? z&*m+EjkkGBNU=Co3^N&i+)i*$y=m)bTQvexXh z&s=GWLV%eB<2d0woggT9X0Jf3hGP8wrVpz`Y?Kn{TPHBn?m2So}bQ)6Ma?zQNk z@tR~cf&e$V3vzmf5D+*bP7}s)P!5*`i!30~nBj38z%d|M8r;XHCp(*2m+CWlm*Y1 z1;2gzjPp6-9F+jYK$ymer3i*0U`V0DtC#|qBXSnxEVx`QIGuD+$2efgsRjfysjw-e zRdF@oNK4K8CK723$Q6Jhg1jg$P(%rtpT(6IB*T<3HpWt{2%#0c30l-qQ#mG-k}wPb z^GwLOEopV%S7PQ`roYM|3me(c7rrOv8^gy@`vYt^-}cmM(Wc$lJGo`zpuLT= zpY=B0svwC^Q|oI}&S__jBLDkpZjWuaI{`s9*KI#TGn(b(5W9k8G|eG)`Y+cwneJg- z-(M$6Y#yzCHeU3E61{j$57{dyRxDThBGBxHA*j`#1N(t&7EQadJ=g2=wDELP)%Ujj ze&l(o7Ntg)G!?Iaa=V3lGPZiYyh+#hmf-yktJnJ9#UU3RpSO4IvtPAwuJ=s)d#^w_ z&ua7cR&zzCqxmKX!V5(j6v>qkzfX_U(JH<1M)!i)XZNzIBBbM07Uf=kh_nVFnNn-}&)1|G7xn(Ou3IUb+PzyEQ+fm03w+S#SWJo8K_XpCeMhTM+;31o zzP0s=LTid;i5xHu1BhT57z?IxwB$e*XkUP|6r@EJdlD;DivqE#swwGs^!SSH3DiUq zL)6Tt3fpS+F*G9;Yn>laBUlWAKt)4Uy#JyGQqR6gtO-#r*DF4MxyO&C7{&T^9`Fvbk2d!fa6D8@HhrZO#N zX8VB|5R2d16qJ0et*nlleo85(cr@{%@2`L9f?e-r?Z@_FGr{D~V&nZ-s$eJUer8cY zUX>63x#)K#8&u8LtEXK64?*z0Jw2o572L?uRcKqS5ADvwP7b5(`_>)@BIP?-3WBI7 zIxs^D5Q0M=aH4uY zTbPm|ehQhlm&erUD9#X$&|G10pjtvdY$42xN)}C_n(rbHelGic|y~ zBA5fNS1sf4e7z#g3nBqSAe_blKmT-xyBjSy7{Y*31osaY{PWk}@yqXD@O+s;6d)Et zpK`*oD8Kd?O);gLEpCRmp)EQMz=vHMxsq&#Nsy@00flX`Xl~0ug1F>@ho>uYF1XGK zr_)JuB99SqE(lAuw4(knf8m_j%a5I6J5g`~O;e1lJe`JMlh!k9}ms%+@<&3ANC&V%0?(P;r0zwYRNd-fYYGoP( z4#b!c7B-=z6(4ddjUOfstzzyf<*>A3RAg4qQq%+A<4PUD3THWyg zplQ`}E>!_pvMQRXG;fGFv8qaSXDug~#khh_)_nwO2Hai{?s3-ZpL&JI#W4FrxX}$S#+v`dQ;1IGu^8G+lLYzmu1si*-RoWvzy5`u?%l>-hL%ZSeOqw8R z7_~^=k`{;*Orr*#11h5}%Tg7PV!TR?E4iLuj;;81ZDBo5CNcqD4dR-~HDVkFFex-3 zFk>22DG3&5B;*+$N$VK}^-L^F#>3+io-bFhDKj}2TrO8Y2=`BqSe6+fM2thk&H03X z{Nrcb+-MoYk#lc{$Xw4Mb)PAwV+bR^=2t< zHBz^2Ux$NPAy@NtK?sc7yEBGitoL}({xnUPM$Hza89&UeM{aSc_Pmv_>@%xGMSWA$ zyZp`g-}5iG9>3N845p{VmFswVh0psy-2nVR)EJE_gm&2_YLM~qn6{aG`Z$w@o!=-i zU4K!ED%>45!eg97%DaiXpsSkg{g8IBkCc%;0K%-Zoe;c-)Eqpc-w(alMK3^wuf4DZ zsx_^?!YtZd1@~+%R5KgQm%_y)*a@jEZ~rY)z!5BS?MsT9a78!s?mrKEqQ`Y8D*N8^uK+RE$n2gLgPs zrQTI|)9jMPU=kv3(4x9fu2!v-;2$LOIgh5`HK<&XMEmK&`T>2}(!lM$-9f_+lxy13 zY2oHgvf5;d`?o!D@cf-2PkLVRHh&{hYcz;JIU{!|y$Egn{quGobPuYE1h7U+v)N@p#dDgV3tjs~cVFLpS%>no`&A*V~pCbo}h4j=ixTj@o)14`dkGh{ZYq zAZYM;j10*MrDSjb#%a=W4MbRyLgn+4HDDY-O=)LmEejwyqhK+Eg&DnwkxQ%bA!da1 zWJIH7whC4gC=e_3icOv&^>iwY(los)!vp|~%jFqQ&llt@R(YJ%P%Q$0kr#dUG>*7A zjri%~9e(<7r==eVc($4$|N89{KHWcJ&OnGG5JpAlGnGo~1Dcb*U^BZ|PV*=o&F8a`kU7$ivZ!4E?KrGjMDn2Kni znWrLDA_$B1&{z--8rR^RacsR4x9ru*ZNixtVg0GAXW_Ga=qg5`GSuhKbWP?LK5j1r$h-wyw)w%N}?+8rW z0GnL_Z0Y=A`!g+W9?RiEq7%W2dnLey2^X+hb#f)o8Zd53Ntc^`CEyaO^0Iw*KV}gX zj{1yTI|f=>!Lusl74@#wYkJ89Mh{;Q{Oh~P8BPuBJe)86+2&$hXJo5@Y?u3^s|n&8gKgf;MFyU zTWH&tEj_DA0<_;+*|xNyX?78Kojwt$7sR~0&P#G~$e9Y@#R7v=0a2q?Ptdkrq|H=* zb!!PO8chdO0nyJYV|YH7A_XXuV^K@1d=L&_AIm6RKRjZBEnc4`}wwgZuuy`qmVR0HWm$VjzrT1VL+45y1kyW9@D+ zPe)pkri>ReTo}><{wNC7ve6Yg%#`1s)t)2OJS>wLxM&tLHC z@1ODc@fp`F5RO1p&q%gP?_O|`W3avMO4MM=(3)mdrFg|3n=;~Spk9v_dp*X0+nW(L zrw9^31XOVMgfzsKJD(VYY#JIYU`7vH_5(p`m02kvxu9@C@G6jzYOo!N2CBMrUP=Ly z?;^|iL<~VdLB^Z~pC6y`+n0OX-QI$zU>G!pVw8xK2c(=)a?&_>vct*&93xU%@brAa z>3q_O%>is)96yeZZyTZ@CFtKt02WBi#@Gp_HrX0v1dy0Xq)xgs^R*`g$e3rr<)ZO5 z>~&MjvAZk_nDx6-QeD)eR)ai^S^fW`xmMoN+j)>`AXMcenD^s|NE1A^hWjrMUS-99 zdO;6ZnOp>0g>+Md5W4=fQb?LNt3G7Ys?aE#&s}#l^GYke=+>>8lJ=eLHMmz5gya~n zuIHUrpR=?8HvMVzz>-5aqi#aG3m(ojUlG))7qZ)wtoFn9U(H$G>9OeFVFDJ^%O+^D z!45+84u!nU*_)Sk!+M1ozo29t|Fdh~@y^}3=oIg!e0acriwWuVE5!+WRr}K7d`fAF z2f9o;BEC;u2-p4bYCo~po!@HIb3kzg!n~%^F46+HWS)bJA^?fTTNLtBhSj%t|$#6^8^G<)HFmw44UbT?fJp#RD~E&3h?}V!935FRcpX9 zFG#6iSyWka`QS8-IG;}VaC?jM=?t&q_w;nZufKo7FTeecho>1y0*E#Cqh!I7GICc4 zyV!Zxx_|3f=ek{AQ2u)9$UT!J0+15y!C&X(?uc8uNe>^|`^1ms?mCOy0^)D$T|PSC)c!7O+TVMY>-Pxu8j(uDd@VKYXc1BDH$^OI!S#B@;=7bOea*)ah$=qlm zFk*~|QILw(1%dd6mc|9C5g8S5e8cwys?CMpUy7x$%Yx^pC&Z}P3v^BjQd+Pq3&v4l zW-6@Z7llJF4CA;?`C2ahH)`)^TzZHDw-k5Git}g4PWd>vefPK5-t7V%Ov$0{(Cy$K z_IY2UtrJFNFHpV4s9u8HuzAF4hBP7zf|hCxeXCW^%!`-O&UL@K)C9_^NI-+iz1U#; zSJt09VYCSkZ+6$_qxH>Y%&pgBl|tG6G5f-GX+*`WRc0kzO>tf0^?baina%Ni13GM# zdbCf}EVe68Pu-XP8LNV&FF*f9jkD`N)oV*tMyONJG|jqtwanXL7Q|c}mSyQ}?R5?{ zZSUI}XkO?Rt$X|yy`b)(R_`7Gp`fPGwopj5x`8Yn#X4mQK+z3W&zn7b@BR3J&CjmD zy&%p%%nRaryD0`g(&KU17Z+@820UY(PpeAA?^DiyL}MZd6G$`-ud`Jl^8(fo(@qTU zjE1i@+FV0Kt7`83_XGl%v<6Dystma@@E#?khw*7g+u1*YX4T3IuI#yJSpjz#442-+;gpVfZBQR1axXf1#M2FPa4o}nO zFK&xt7JBo7#27)A@hN+{UgK|K{(HqaW)vncMSvJg14I&*G~?lE#;;%QadSE$j*KBh z0D{6JP6fC;XXLcN8K#B|5QsI!;?v_3PLpN>;2=QO_!F0ah#Vs*2stC%Hwk>g8@d(sadw>-CD$JYqVjo2`geUDcqctie(-pji60tyRMugJ!`q z0pURKtqXJX=U2Xc?TQ_}d*cyDmv=_U)iv#JRpMG%u3RBt4&a~%TWdS8Sq@zJ(5K4k z=~!Vl^?6h+Nln1>cdWCJDFN9FZ@u@407zI*h|X6v73v9kQ)oDv)U~C|Cr|7p4uffr z-an(ajUV-4`;~OhR3Wq%t($ec3a&;RYruICLn$!&^RG_`!x7D?De*GHmQ0e+jaVT3xO_&2clx%>)(3esW!Lk zYItn4kE%Kv!w2Yd-z@7$EFDTnCo@* zC!rtl>Q*%R(`|mGLBOkX|3lQ1J*)igcf4m`dkm}_f8!8wKA$nfh?Hg&6pWEE48bh? z7HFOdmL(zOj4=kS6%%}qh}M4*!>ZJ5LsW%ru81LpqUvegQf?qJVTg=a`BRf41CwQM zj0hZXnG+r!o^ZWhF%Bm%N6gDrc`31`k&2qpaXJTlxEt~D!wKh;Le1tS;nzjV z!KW`5ESW((r~*O(IcwZ^P6=Bmxc?qwbS96=KURAX*vTf)L{N$hDn=_Z0x?dJaXUuD z$jF&2I~fUt3JMk6j(WlEGy$X<)D#(WBBWGm7K{*rGI7%Yw;M{#`l)%|z&#sI|#mTZ<=-}r9fHtO#fUEOF^yf90eEoK!i zJeRkIB5HN?1tCVn;K4m6-ev)U=Bq9tVkwq+gT$Wew1?nH&B}(DbtHEEd2@dELdxdv zz$#e#5M#U{3P(g@rN2>kOZ`RKzOKqC861$RyiIIcxhWhRG-WPGz!bFFwbSWNGj?1& z6I3AWg*g*MnH7&783u!E1VZ7Tj&0mBjrCYHSN?j=)=Y2x_oKF(!;MwWm1czipc346 z?f&^hyj}-cGoiKe;)81iFd=TO?gUUY!-Z522e262DrJuyz$&15EzNX4U6MgtKi_i> zH9MRySVdW8t7oL=ihZg?KtTiis&}>TMmGm9H_w>V6f!5&#pbdItO%h#s21gOA-?o; zs*P)PLC4ePPB3=bRxfH6II)MIMWm>s@Jo?0)^bJMSlT0#X}W~nlXpeODp8+ zM+Xry&9^BRomci?4tJK}&7$t^lKp;uQ+U~2P~npWYCSiF&Al`cgj@=uW|e>!zH_0n zN(q~`r*&@&NV~3X6ow)m{;qp0FdH9Z-NOb2DJXS~$Y`an9aDJWZQjjyh<~lJ8g}Q@ zey_^05p_)!++VYPkJESZM{V{3VSE2EKXJjI%oCz(Ir~C zO&0Nf+aSv;r)X!K_@=fu6gBVOasPh^gwM|b1d#~SIAWXz6j7K}U_uNLF=`rH5lxh$ z#_hZgF>>AfezoD{>=hUgQNaFwVoER{dP;fNR_b11KqJEp#5t15vPeo@3oXwKGKo zlnV;GJ1SVZ)Br-R0y=YOUXbF!;qKp9XxqJ;AfqcvamYGIB!2 zt!ai81|vX9!KY7O5aWnp7;wE_>xOq%Gn3|u9>$>xQ?p_0XmeG9!qylw}5%28KtreKoyIwiz;}qVvkHjn+{aA9Z40^TY+Bd3>37?<1-(%a+1=YwY-I+A;NAH?GE$cF(u=*<(RU z2Zf&wo$2yNifSfe*mB&`N_oL*D`8s1>7Ud%r*XzA4rC>=3OY*E& zme2%j@~hHiDn(^A(RarfEd4ubU9!OH0db|I#)nwlu@E)uRR{rJ?jP{!_s_^#FpL94 zv^>MIESB;-Bj*J%0_U3(K772z-R)VSR59Y35`O*t0ss8f2&-2|Bv^u&Qqv6g zG^Q4CHuQkjA)+pp^=)V&gOHa}W%$4W=hJ}y`TzU_|KsikVA-6CkP59@Ye}VKqy!9u4oVBOXb`z?7PmyW6^>Mc zfaCMX1emACXZ$_}+})fpjRRr~5GW3Mh!MjWaGlkb7#M2076G7v-j9z@`0!yy2q(lC z)^$9f6YK?R$~YhwV9D7Or2R&3MS@m;=`1{3Vr7=CQjnGf%c9E8FhmS-KuMYgE#-u< z)-lka&cC7^c|86I!Pp7)5wUdk;KWp8K7th-v^WUQRoyF;Xv&8Yg0)alg;g{K#AAU% zXC>8Z%=+;IEJM{T{*HEJ4XpU%zd>`NRZLOMrDz7EY!$Lou|Q?R%9he)#%b2D_l~FT zle#wk`#SKvrF8c7Jr)G^V|jgHdgVi;S%jJ=04v(JChm9f?RRjO>+$0auY1%x4D z97iw_=4-e~oX%ytLsAbHQ1cze*n(YSCw!3>lv_mZ~ ze~jiPJ;>tZLW(b@HzLB46F%QR;r4uw^UWD|cV~b!0E`)kF<=}cmL-`2Ak|`+Y~$bG z-vbbg#8~F6)^GsmV<`K>WP&R8y2`3G%PB02!GairRq!XBu=PTkV8DzpWYF*9tY?)Lg!z&JAvdmwBD}32TGe`Y|izgAXm51eo$-LCY)U zqY1e%g8w7vxX9Z~wilNVm5_H>A$IS@_bX7w_uDf~Ao8q9Qph|XZe1yLtVHf{SPM=y z{sE}M?`uk~W3Kz5kflp*|GsT}?@>T{uZDfuRkGiDKi;Q+08I(;G4$hI{o~Cd9Abld z#pS(&@4O#nZ5it>dFfWTA?fehR5&&zqyR#izH)EXUXQXe=f7xd+E+1%pTG~BpOfBz z2K=?2)3@HdhghwZ>DSK3;qz#FK49Lz_Zbta7lhtt1R;|(>d{~SwSGg^x!n^ju=w!V zZ@k}?klN=z&>*lgalFghvhLFMmiHKVen=ywqti5C90vff7zc~39R>u0UX@&smV|kp zO%|ZKN7Wea{EO;Y(-`Kga48HBZV6V12r&*_{HX0`Q&>V!bOo{M0t~2J83=KRn8q`f zWyY8LPq;1_ah!1X;U^FgE|;rjtIAoG4`iHA6Mp)5hmRj`ahfJIRCB=RFOT@;^Ampg z?Gu(I12I(iqZC!5Q+BjX<4;Wv3U$r;6;+z^vcIrD+NBgQ1Tg|1?{4t%;|+#^QJ@9G zpD!0(mlzTAG6O|0PMZ1Za(zZA8j!=5E#M}O05LAJdNh;?XMnWOlILiUQWPgA>Y^8D zAgbo%516J2rDR+$S0p=(#H{PoSxZOrUIdUgwI09g#^n_);YAV-Z09kOe1i=s)=V=6J)&fKs zD-VyiVHnI!RWL6La>jq9&fu_M90!cUsDbBJIhulco}vtx^F=qx zffe*ynaFL zXu3<3*%_2GmW={-K$v~si5v^lZ1eH81?ehGth0ny*v~sWRc_txKd&!1e(zSm@$Y)? zp+8wYAgQZ-o>%X2_&`%i^vv`|EZUqeSGamX!-ZW>KrqCpgx@goN5W!-V;y)yR1s1- z)C=Fcz5mXmP`w`qllmGH_VkBaL5(`)s(nV?`~J6j21Mx#_+_Qws2_fBKj7Z0*ehNu zc$pZ6_-#PRZhimCMuh4`O(`KnO(v3&t!9VEfVqd>70H)8YVX`2q+WXxwbT*-*m>%e zPd8SOcxur7neG)$FV0-vu+o2zMf`})0;ry3>h@^h^*r(B^Y)A>ec1Dc_4`}lSD>gc zhfu%E(&}Y+@OaI(Y2Wo;1n+yVv^%Z!RzzVq{k~P&)<#c8lPI+D6KUbnnnkd*v(v*k zE05g9J6WHc9>K=al3lC<>t1c^bGzmjx-Q+m!-j;)KD*=A9J-DF*`D#EUT*itn@S^I zhQRrgmYflXfZN+!oK6!2nh!owz!0OtgS6UoP!l2Q_foTx&clFb*RE2aES*JU%@G9C3Sdi#Uw9 zT&^g>$Qf9!2{8cEIN-zW3IFx+4nO_;5flj5WdS7O)5A0V_y7A(e7S!D4`+lhsLW9^ zN>chzN>-yJ5f}jlQlZQt>Uq++(QDfp`QF&@CtiD9BLf2we*Soi|N8j@KHQx_lJWd} z!TsYCo}ZqO<{4#VL;xDB69qiKdV_F1)H3cjp$T?LE zzKB^ZnX2aKo|Ae|O6fLv0YQb+4B&ar`2GG7!)e6Fn-OCSAXWi&7y^bdU`d*nUW*N~ zW;!r|=yJK>eAYb4!!ST3BTI&6sZnfS?+J+|Se45j%dLH#P7{I^PTUGyK2^Q2)>|)f z(Tc%26-Y#N&*dyypizJ!jvAPh29&g@Cxn#v{wKFy=nNbXgJwALw4<(keD!lz0vm0Z zj$4_riv9SSrU8(ztc?qbc$?s^Zrcch0A5r+epG7~@x4V~8KD|vOR@C32h3q5s9IXl z{%=Ruzoqo{dqY!Uhg$E$70Y!~xYSF^hgeG9w_3JLe!YTH%&PPr@%yOl=!HOy46AR+ zgx2?OMycS~_uo@i-j8>@qZ6e1D%?jNAQu?_Z(BDfG27|ZIV%w1TGW#)aM^|Zzl$RG zAJJs3nDsRLMP+^UnRhjyen_8I=jiLt50M(xRtg8cj`w_T+om!0w*BrX-SOs*x88&h z2&ZYnFqjp;XeOo@L#-gLX>t|rE+XL2Qq8)omUG6E)H{;BhK28tKp0FB$>vE=*cBV< zzgS#!!DOf$jR_%(VQq?I;HU|3VnE=CfBo~nF{cEMjLV#GP0wo7gLD}`vsMy6jkuY# zo`z&q0v;b{{QBt&9-c0s5HXA+C#zbea87T z;52;15L8hRA;biXF<|kuwrb`=0ieP584pj7m~PIvyS>FQjVQ>F#jH)}YJ5B6ydVGWZHrdewylwaR_6vQW9s*`Uir{i(666cCN={}OU%*R0{+N7^pf%c zU>Dn}u(WM zo|(+7`?cnh$8nhV!|NJl5%sVzL9Fh@7z5&<#SY`(-V*|W!KASk%v$Oqi{Sb30l$6v z1QEjRDIyoa_414*WlLYL$(Bwr;O^#x4>vcMhJcc^e8pUVfBpJ9K0iDm6(GbB6k>&J zms~6@J}UG|=e0Y#tf&MsS|g=s;;4=M(JLQYzDf|>-VFHg@q}?GxLoe>@E{NbJU%UW zz9ytx5MYjtI54KufYW&dNdZN5Wpa!di9sPEWx?foMNS!DfDsW`H%Wmr8Btf1K^ayX z3r#Z&L9LcSiWWiffuO-MJZKd_D;{O8Z)U>d^A(@JJmP$sFvN({$TpaOaTHwU1%hmw zP?eJ*jF3U_^z?-LFJDYk9w0KRu#}ToLvExS^|a@fD4T00sioYxWZH_5J=H;_vlHy!Fb5Gso|Z z@7Ojm!$F^Vw^u$i1t=hekOWo<+lvn7ni1rPd4FHO+?x@xg5NP!ucdAegH=%@vd3dI z#{7=?TX}%QJ;f?WRzuOzV`^T^*PjhE!A0D;+$m77@$`Fniwg9I2tD!c|7uy4&=nzkt7*xwZ|bz;!O_lB*M*n8Ak_+P zXaTJIdMxb!fHOyi1_02&OaI}`Z6sLJEfHBSxy29ldttXLzS6*S*qx2?A*JG9-rcrF zidw9(Th5~-J3pemrhL@*_Mc^cHr9<+bowZ>;W!L9PbUN-&EyKw+E5_^Le*oTGFLL* z-x3l@Dac8ATnt(i+W*F^CCh^qDs)=s$|y%dAcNA9*2@c)cGnG`Ak^aAafld9LAYFI z{Oh0pL`oUMc!nTiSu(EIS>b;rgQei+G~n*;jJw+rr_+dG9Fah{&I>+$dBQ(`|AObM zLb+lbKpZSRuyolen}>V5H-Hw8=zMN_^4E$>C)c8YP!I_C$In0E!#P6I6CURaK7C36 z4Y(}8b;&4&aGp+>#tAWS#xMe>6N7{?@q}MLebUZL03l*oW{5N^@)&>^ERd(9n)h8q zV>U9AX>IN$Y0NvNY`E=+loTgND(zQKh!~{FQ}+=Ccu^CO2EKm!a*tuuz@iw>U>-o6 zL7a5ckwHW3#EF`=LCko1dcx<=pYik0KZ98}fqLtT)srwym4O&Y>yFsM6%aC5f6oP( zG6F>q7{pN#sniJ5f&B#CB>;f=y40eL<2YhjWhRgQv9P+PLLBslrdc-Qumy{5h ze-4_MnJnO36BuYs5x1(tp0)9S(3%Yk_VZsX3k!raA3v{!Pp?Ev?gcp@(pzX4uvr`X zl}Tfj2zz+WR`4vcUkCC1)M1lX_ke1uTCr!S?bgnLsAn-3-H#a-`ys{Z6y#mL`ETQM z#b}EK_7Qb^VoeC$q8zvv&R=1IHs)NcrE5cD(a(`CIz7?fuWX6ncB-rYO1y<*z+Dd)eIC_H>1-J7PipK{^(TNeN+0c$Ep|7XtKqz^W2yOmsQ`3NUru_IfFL;vH zue8FuUKr4x;uWUZc-H>iVz3MXvGJCIx_6ruv8f?Ge*nOy9cdOURIj7bDzP2o|TKofuhk!w2kd^Re1gF{Xz7K_w9?-x~#o%V$Y?@GyIOK zl)Ryw`1Py(0%^r#eLuOvcgXs2&JMt`z41 z?(HzbQIQe`_tkaQ@y0RgdKzlRJ-(;Yg!5^_X&5jJ0c4cW$D}nsCe3=aEO@$H@qC>T z;)oDOT&_v8NakXR3ku@EIGqNJQ$QReI7So+ShC>plJT$KzTp0G#*%>$CdBE4oQsAd zB*C&k)8udDy6S~xi&>I2%y_%b-3Q|}Kwv>6pv+hNba#WhL2x%Q$}piU6Fz@=#FDRA z5ReKmO=sNQo-oCTIE(>t(1R?ZZV^{1G_qbXV`2wz;DFHzeI*jq(nwAmNvl#rGF-WK zK}Sf_9=g{f1YTi|i5V;!MdC0HEfi>*phIWQOUC`v6*rF;Ow)v$F@iaQDVWwvNTt9l z5T~St$cAA+S{7U`7d$^-G*@&`=u@t4Cmn}CU2a1ld=tr?9 zD0aZ>5<0yogk{NyL5tCFhzM3Sz7zp+n^#Tk=Np}{*&mzZ`{%cBvBdX+BvYq+ShlMg z;Pmp`tqYXT=gHydXDZobVsLzLp3IbI~?Ek$#B0~f<=elQ8Eut=QGg}-@knVt^ zWC<=n)ed`)_WPw>L5D)B5=7tAt$7+|Ju|v5wrgIetP>b~yzB9Q{a$&C=Ir;eeb-IX zdz$b0Hg}%I%_a6tzX)cuGb0I^;lz{>B@ebM#OeBGFS>G9eCnw^{IJn}OkX>t1QX^_ zjdATk-@#Wl^IhrRW>37{_SaMjSdA0(YMqRo5+a?zt{rJ(e&sovPSj_;ba_)Up#~E( zVuDJoLA2&#-DjiqLhBc7*^Mes*p&!aS*1BwnC)D zL;{uqmW;cb5kK9G7`Px(fiPi+5rG+VQqPA7A;baWbVdk3JWUf~3|0X=gFr|rY4krL z$Z)<=ik4F!2b@n60Ks*Z6`l;Bilfhk>H)4wh%@}0YsxPvrr?OfCYoTDOVO-UK3T+I z=)gn-m&=SV4^Ox~pKyw2Xq^bniZezn){=99fKXDw={zBXfM*EiWx?a~6NYI(7$RcH zkY%ZG1h!ft9IQZLRxMgLeLHCGatkKlR(Q;TU?-w^q_@N&*})jSzDW$4wZH>h30>U6 z@xJ(XZ7xLW@3zKPILGv@l>*X3sp@BEjzlddO2e;E4Rljoz+j=uXa0Rl(eh{;zH?)6- z(_N+NC8-&vwjBN~n807r`ZXbe1|d^XYq)aW)%8uTd5^U8-m$*D@0`vx=#?3nvhS^? zJid31Us8r^uU<7myz31(HVnMg=*?SRIy044iDuSg5#wKkT;VTzZ=cH9KdYy-KqTx1 zh(D|~e*W*&sN=!prpkl;v{FK(36y_iThn=ZvtZ&;dzUUm$4UtB@;T&<5>k~i7x=%y zkW_i^kEE~Uids7eM1*l1@!{@+dR?Et-U+S)iAe97& z%P0hf#-j!XiX=d^3VMue5btJ&QrVU@@Z198=Xt@?^99S2@$u)Mky6I|c!k35G$bG} zVH^X_Hxte`Crsxv#?uK15l`2IPoMAc%demC@OS~(vq1)=oUknVPcwT-SBI-LFG1jI zye)Qngv4;83Z#Ib#@sPR{QT1`Zl(xW<#~^r9LEt$o-ta1Zi9z8^At`pQSQacMMjRuSwBUNZ;_3Mb z=bLl&f~d!#x#V0Zb;YIxjoV`ie@Y!v(S3_y(7zNk2v=YUqo?-$J_0?jIG6{&rxfI(9uPMacxp;9x=>0>iTTo4-_wrQ+$)2( zzj*+<^3Ao4Uj#dNM>C3eT*=?(VEG)Ad2__P4BO&n+Pv?b!0`ufUx8&(5oy8au8i34 zvP~=cp69;f?ERhA>8)0UFlKPEI#Z=p@ZPpMc3Gd%iXb=wzW?90YHq}Q>kO>s`cF_m z*3W<4OY=kCW$526ltyK`PUyxSZgZrB_;bHS5&0oJjI?L9k7lqAA>cGkIG;}_S+iET zH{26E6f^RA+=usznODk459jT_^Y=NA3mi2O6D&yx1Cmvin8t`0$-tT|aR7C4jg}oC zJU%|-`RNHEXmP}4xz_lUoEHEEF$7gWrik-t!gM-;hXI#s!l%y<_}4GL? z48j72#)^j!AjpVA9JB&D3G$+otd-3zSF>bDNgydWO(RaD)(a^mBZi<>y4(WE-Mbw~ z@n(9?)U_qXu)wUsKABb+LbLjjLMnusOS;(Ps%o#fzl$;Ed0ue;@QAyc6JoptIB0eB zF=8m1Vt0YU8d6Fa!-&Wci8E48xLhu(tOSNgRN=|6d|r3OQ}&hugzRuK^-Op_G(!nj z1m^^LAJlT_iF2%VBgHE>I{cvMJM?KDOQP*rAnDOKidCts*J0W*wpm zZTmVxyC1*p@gw2=wdZ(#aKifM!8_KKS=mY+GA*`hL|ptv|b2 zbHUJO(0}jl>0d$blsI-?DSc?YwTFki>-cp7M4gX~rF}Ja^f04U$Lg^*kt z^={+WwZ)-#<(*6lqs`D>KCt#JC*eLd7@{hb?|de1ePk;1I(Un!A0U#Rq*0O<@? zUOE0~Z2m2G-yN&>Xz>AlZB0|^XP~=Vsu@-d2rOzTW>4zjw6aq!H+{1Q?P8?X^j)(l ztuJUbYkE4-D23a0`Gnp7y^4fv%Mon;X9HB~TC@g9ONF1|ee{2b`w?18cZ|La+2Z zsHY(!gaIOi`8s1+7BEMgPba`UBFmECb@rGE!^pU~8F4;O7{&-SM z_j{zJAaFz+CKRwDYEs7HFn*n^I&Z5wXx!2eZ_lnRL)|}s#k-IIC4*5AnQ$`>xVt$U zT8RM;nA3vgdO={u>3qWFx`1g02p9q(22EfRr)k7#8nGls5g^YAu6ZdmODB>6oQ9|Y z*(IsR#GqO^XT`%AVI~2P#?s`(BvU>XK!UBChf0XZcs#DI_%{wsp#r)P}gi1Yc35Lips3wQN9^u3xs zSknlrc4rD!jTunIghHz|;qf3Vjk?MLhQ>5+P|g|Gc}5rr6G!t7X(jZuBooY{6+WC- z0tpe-qDxIsdW~u9MY0`H_!fiE3->sK(BH-4F`NmZWiVo@0padV+GYOPy>Kts)oblv z?^y}v0SV>-A*u;vx%*}(U>%Kf1)old>)l*taSS zIZWnQz3j%@L?WfRw#N&Qi=MfhRS9|No!{E}x!Ze(^lUrS$~{+R!#SMy{<*7j*p2k) zTL0;19ayhzcX1UWiWwrLc;I|*3HWhy)bDSq-@Wvjt`BY@T8*Ww1s$T@Gdhu~Lax?F zp$WsybJyPGXGu1%aZg)6uCLuoZwu3hcO8DUQu;Rcx$>d=2&)UW9yfijj@x+})1HEm z@x|AiMQbI*_8v6>@ei@wf9E56g|9(dU4UsGOx+s_ktR_-@O7EQ7liRNA?smgUYV4YlMVolT-$zv0bwjnNdAl>*{|`EG144B4N(E!^1sB4!D80eUr# zj?`C!C^mF+Ww`jhCbCU9u^-!x-}08*68ai*L~2MJ#{n@gQd$7B#(U-|fkCg5$xk8F zO5fg6QpKd0v6!H87ZGV324He#sp}am){sDe6qJ&18VNV238!%YLo0zh64>vFLFKx| zjOHm748w>R2Q%npH&kL9dOV%)pl#7=5m`=D{ z6XHb`kYE`{3kWfUU^OoWgs{|ZMuYr_P)+7DL`Gyn&I!x)g6rj?ZY}>$G-yUxDJwyX zkBD>(Syx+g0x&h}P&XOXJyslRuD1m>S3vU~1zY@r>zwfA{u!rnM7TSt_KSca7NnrL zop~5=U9xGN8n_fvLP`sspD*Sn;_AKwOnL29MLA1KoNn06%mxrN>&j(rh8Y#>%}tA2ys)N170dueqn zychL1cf&?26cmEjjqucX7Vt}3Vpn^=8pm$!-pMRo`2P{DS61Gww7mE%z!My(PzqGW zy0OrSU#fPVcW#ujZU3D?RPPQquOipaUXk{F;}sGp>QyL%H;0|wEp=Es* zx7Pf>bL+j+s%X8@ciRhi&$ZvtRy4=xS#cWJRrKyX2;2A#rIiE}%kgFgK!A{|2TNahCnz9!5h_><}3pWQ%8JM$HJMU|;9mQO(Ji(iHIvNI8uZJtu1i=&;H>UxoF(7K% zJC%hA2D^%evI)q!;PFYTfs4I=NeiyC#a)+-A_Xx5;}kHRETBCyK!PPLxPO}Q+vj^c zJzoKin5Hv`BT`X?YMvLQB~_z+Kh|oD4a!Gd=Jub8G+M8tGcVmEdR-*d2To(eX&SIx zuXuWXwu0dWkp*KIaK5?4G)_2;GtT2^p%5x}g%}a%WkFy@5z(^kNFW3i$eAH52xiF$ zCNP$gu*@^kvVcH{!w3!mDJ#56I$<;A=1TNKsVnOTU!S}6f-tcu0A^7nJM`kj%?s=h zmZjkF@q#Z?#A#sMo=320!b8kRnSn51Nf*p%K?n-d8pja;aCyEWhKSqS8(kcPdO+Km zB=dq4 z)jqzv(V=m^>-$^p`9btSj1hHlzAwS8UhNIh54E&@Tl*f>@VMU%${=+nsxr+MjNU-yLH^o#+P2r{texfe_t~xW-`=IY0D07!;B6T>tM!|J8V`ud$94*csO(W3YHgL! z3rbZ&Rwv*a&OebteCi_rtD@bF<6yqFN{IgGXni+~Kf?>MGCXhk*$ezz1E2Hz{z>f$ zU9WK*FpkmQQV`>41}xDUYiYq^g`HjToacGfhd29gHI4vuQ0e9^AQIf1CQO4WAAu}+ z1Ob87kk3&QBQZ0cFIPM~JgDqUgats#1xw17&Rzg3RCqc~R_`N%1DmmZ!I%37eER$a z*OU?Ch&T*bQmG18t8)@pWAweOvXFZqE#Z>|=Q9uMH8L8-q_B7hfeK-a0XL_B+w+Jq z2IQ+?p0E1Mz@Q<3#(;4gF~kAWIAR*2)@Kpjxf}xG!^0!;lGQyFBBU@t0Wl0ViN&g% z>xS^)64xqYjEKXa1zlv(to?qNO$el-(eiGItTK2hHB6^J=(=HGL|V1bW?fL>1YkF+ zL!@Ay7d$*X;bx2&hJb0{>S`J?AfgJ%k~G*m2GvSSh`6K+uGcH3N!?*gK~s{ge3q6a zz=DVhTVWSACQ}?QZy0p4piybI4-qfx)Ch;?mu3^T3xFkMgkUpcnH(q;%Zz0feS@ME zpG@(mw!`sVZNrz}%c58Ax~et!k50(sz6!Z1ccvJ+bz9%7Y^i!qpl;H-XNbDY4Ng(j zQxx3$VQ?<4%C3pp?sgPS(cA`W81GjU5O;AOwW@G+!sttF56pPw+Nyl$|Gdj<_o)ia z?$z*FbW8lI|IUSc!wR9*+i8N2rsVc`tWw>@CHMQj{Sz^z>vgHUIv1XV96|Dg4^SZ$H+aQD`DR|r6BTszeO&pMhpa zcL`nut=#^~9Ib!bj_*C^%kJvD&Fx;R69w(F?iah4+S9kj2i+88718{P&Zx2NmkYDR zVE%@x*3y*WHEBK?|5l~mwl!hjU$gl}HK?vDeKy9Iwc>tzG=olm-h4e^bFYE=o^5hF zwC#41?o8SOLxt0}4K?)Yc#rsbReEVD+3N)N-jFqH$1^Lrhoiu}$K=B4de7=NT+fw( z$JbN0o8x0N!R~W6HnIQjNI<7`v`Xl*o*Obbrk^T1n|AeXOfNM-=Mfh@Z?aQFrSY*! z(@__*DInc_VrQ8caR`{k0R)u~IWR)BxQ|khN=7oFx;F$$?VrjiAqK$dyU{+_JA|Zu zPhCIR)|sFR#xxFy!DI`l7kuq)P}wGzjOWWUuJcvpjZ%=VSEOB@TrUc}3UPpdREa6-{m{U3t6J2_F)C*@1F)}y8WJ;hltp?- zv&G9wLF9tlA>iXQ;C6}_m{7)u(>UNVr+RPTfXD$uV2nZ0QzXUy2_iA#{rv;doG^_y zAS#%z0;T~m4uB+O+5~V^0f1QL3JySw0R@JutJ09W1=;TPpz@-bT*w%h5pjVaTlBr> zTz0pad9y0+m`T8e5II_AEcPixDjAnc!u{h#wZ?D;b3_Qh5P>-r3d6jj-`%YD+b89M@Ku7JFhRNq(1+4%qsenV) zHJ|{J0_Fh8T62LBK_Z%sq02ZVo}~I4(#UQFjFDjAo^Btd0oirbE>;CsSGQ?&vvhb5P1RtCH8^4VBjkKAHT@| zoY4!Qeru^ zqlK|}^E_|%EdW?%zSgh)J?-5h1zvQlrO))aBl}&{%bi?7ql)SfKj=-OTK8ppUZe@8 zecVr51Kp21Wo*?pR^g3DtvXcwpBN2*8knRIDC%4~6i4j-hNIs!QX>+XgzJKoE5@B1 zqxy-!%8*&#q!+r^^Ya?L(6ra~P(e*oUhg-a+COzmRt^&kY}>XAWatA|vY_Mk?|onO zLXC)K-*MxRbAqbat~{a5Bv*i{-)^%=ZfiYOpO6c(0#MPkpiC}XyyiJlo=kOIRQEaR z?+1dvc|I6`7<9bWlCXaHKB&-aj#^CAAU1xTzlyr|)!tRwZ1cMrMj%C5^azZ0Ns|G3 zWsxAJ2q_GvdPnMvZ0KXh%e@MlLUCT3)2`Sl9m^7cWf_HKhpXqT+tl?LrpO3^Nf2Yi z?adiOR2V*y79V!lmcV09EIv{UHZP(|MKzQaj2xh_SvN&My&34oUN zNC`9yh-t}qn4fSRGMFMtp(=!zBDLbSS9^8lP|PAqOcq1ZS>j5uvt-u&lq_I-&y=>! zg{{8Ky$kgvX9zR66sv;jn>9dCu;ha0>w?Fp1?TC6F=`>2zykXJ*n8I{$#I-Z@Bkzw z_lT_OnO(De|K}SUo9@cExVuCW`vU+;$rabks{L&3V_S4qc*rG+7Z3+H0H}ZiBc<37 zU#XgwEyjSH3vRcY*8E64Kxb!UP$eitq=`{v1=R|#`q{RYMyNLrNS{y#ALC~#l$f+C zI9DK}g4P01P%27bP$5uN%2Fj*#q=^14*DA4_@~>3UPB&mK2mEuGsA$;X(NuhSHgta zy}D)Bv3cG+SlO0}3Mds0dM`Hz5l}i9IBI)Z&F|nTf5rRp>3b4k-?a@Nw|YFv_+08% zJ!>x1YAG!(b+MmnX;xkF7=x~x0%(@ZciV^JU~8!%^t|Ps(ac@Rx+dN!-1u%Ak6-(* zCg*B%!Mi)!y}$l9KX5fRuaEWj`tpAB>`4SzYFB(DQh#4FkOkBZIsiZzgGyzejz1qX zLd)v3<>>3i9f8LYh`GDnAZy%+(Ph9#gY}s-l$Of7ooU-=wzJ?J8S$Md)Ad-uRpTtH zsKtI9ldwZ2Zv~fr^|@V#b;TO({hDWHZ;h`?OJbvV_wlggGaqN zzKzj*S9{c`I8}*uwA$Aa6hSEl>Jr6#HBo(!w5nmzu7> z`RtyW-{b70a?H^(ZC4;-x98 zDb(Y8=Xt{U;iLv=1c)%M*Bk!*?-%_0KhL-=D>y_DNZAOvVp+AcL}}$Cs3mN14@`)` zGG-cGxD+$?8x3A93&W(>uCJ-738$Iy=alf{l<+V`oM)w%WizfX>x$dcqNhYHkdi@B z4dzq}2o+L`W)71XF~!Lyt)f6Z9#cA5^>gL(oH7d)5gCO`t22HF<>LQKATlZsojQVMf@zc~=ka^>YWDYJqu2H3UmvZ)8;d#m znEH7}6s!)XHU z;KQ@gRZG#WfP~;#fm|+p)aq{Sdv)ixQi>im&!D4o7@8dLHRZzdS7vArKE@cHG3(a` z)WP@r-@6ALfG`m%|2-h~qo(iL<6Z6)5Bi)zWR$1v%o{<-ur@#2w(`L}pLX-#HB6*{ zovC{y$35Iv;qPtZnxu+Gjox-`|B|0m2*1(?z3UbMu&EuN)aP`Qdfd81Rwj zY-~)7%5^BJJjCJQ4LNL&<;t+g-GLo)&|&%5(A+)xyX!o@VwW1ub%0>8x*@6;0)VC5 zRfBo47zj8UkYF-SiCl) z7Qvb`F1H1_mZ7Fu7sD-dz!JPvOP}uvWSmW;UL&nPMWR02K%!m?R73_I&nNu(nDED( z@OVC9o)VY?q5#Xpn5F>KqW-s{OeUop+7A@mDMP3trZgj+CrA~%yu2dktX07!g9j6) zqfIRisD%kh)M}qXgjXIVvDz5=s?cI@R(Qk%7dPv+nNM^>xHY^gl5Gl_fe;Z*t+w@o z=vu1))}j^CUtbrTPiIU~7eb5?rQE=z>CGe?l>}<74S}()8OyR@UMEcRgur3gq(HME zkmieaODt7W2&^N@O?6J;-ePwoC~5~GLI4X&5v;2|BeexfI*;gSYdcrg`?eq7@3r4! z!F9S62Ep6wIqd0H_cBXt;hLKD31wY_n_xxhWRp#+eV53<${^%Ec-6P8Q2%sOptS$2 z_saw2M)08rVOJ8>^}oHz27Ew0Ab!ABk-_`nF&^mm|6pwPz~ej3*U)Im?(>SaminHM zVb6YKX?2Uiw$Bbed@qbC3tds!jO%Y$9-G5s^9Spx14;I*IG@;~zj58lw_o&Q!PKZ4 z?Ong|lmUG7RGyA(fUT+Z^IGs7z@WbNKG%L>4u(yyh5@H1Z&SYR`{`dD+jozLd+&ph zrfdyrT`%g?J=%hh{dsbCzk|MdJ5+Svc$e+m==to`vGqZ_>|;Vu%BtA%*cum(n#;u| z9}oU61NhkQnXyKUx6+Ik2lsk+BXrs#h`zLGzV1PJySYNYrGdToT)77yn~N|EhYDwA z#(au6oo53WFosBI#x)P7?uNE)-ILJ+;svq60_Fgh6FoJ#Q@&N-Q@NsympMkvF_?nh zJKGFnW+W;(<8rxTS#A&!%+rLJq9zq;iCpxonSz3v)9Hk1nm}OWEO>r-#a}<4aa~qa zkIxR8h-j@9OD?$FZp~PIG%sR+f=o|sUV4*}-GJD?&+yd48c%YXA|B5Z9?mn)bHHg1 zNRc2?fJ#svROx%8RvWw8z0ewBjxWW$(V$ceVQotdCiI7qRQ2~Lmu1;y_!e2yMKIRZ(n}AG&)v|Pn zXclPoyx0rc9Td}8)ypcl+zS4Bdd9=U3Fklm#Pl#>stL7b1;B(fMaV6C2&FD4aZBI5f;qO5-&YFXqVkO3q3&O&c14iPYCznBpER>k z2O-3Q5PkE0EiltJIx&Gw!R_=MjfY^X^$`MD_0j$wr8sYstw_`{m%ROcV4>Ro?!k(q z;(sWO35|mA(V7@Du(%7~|9d=~+;hmJ>8smZY~nMz!nE&Jv)GAxIMr>f+*vgmnC*wO zlC!}1-=3Xr?59zbei3*OfEXe|B*eh#wjRbyJ)v7qh-@se%1_$&qvddK1&)7*{#4 z8NmYu_dUfM4!&zeWo`yW@&Ic##j<&$rBRfdSIX!zrIyXT>u!CtT41g$?Uk$DvL^tw z&7G~gvRxY*ct#jA8NuZjsVu8S&!{3m2!t}!p8$H2oMDjLSn99T;^{V@4Q&J=USmQP z=`BF=qPhSN6|L=Y)yJe$4dEcZCj;KJa}<}=JYZQCh!n)A>1mnOGn1{%g2xbtm=I#$qauQJ&B!IAWHl~Dqv<=Pk^w*nMQnYx zvJ5)s#EjE?!khx86fEWMYS~Y9K0@U7++|&{E*Z4|r}+UPseX(MltqD3qdo^zDqWyTUld~#^L(%cq=y(A?IT!JdUtP@u2_;DdsPUz5^k&D=koc`+;PK`-?EY+43r zT599NgyQ#Y1Smk~_kXqhPVg}*rQJg$n}bVgwwgYSL2=f)~jwsBr+U;xH+ zyJ}TJrzd@IcF7xu@Q00Ds1HR>w-@wvwfl>6&Mn#C`|k}jkIt4}(Wz_T-QU0VCfi;Q zd*b+S?h*XhUaiV-r)9Z<`0L)w-{0>0H2nVgaDZLgDXlv&_qQ~Gf^@D*yEu531HTrW z`~mg&zuOz@!zfkXZDULpfM%fR2tpNiwcdN;TT7T@$!W?rYIp#iaR>+L@_%=uW6=AV zZ+_qf*~|$m4a?OZ?ibkzF__#ib8leT-}mmb-Lu7LlOX_U3R(%A2-RYD-4NPj@&Mir zy_2Nz%C2{W5LNNESk$&oY)`8mYuTji6p6&GlDCTIV$>@#-B18x)KamQjAdPwk`*jY zc~yO`_mjZ^I87(abFxfcn$_xZTk!Mw884R$maHrp;s^mF7hov`xfT>?BAzN+pv*7IejC2@{k8McOq0CF9P*H#}d^t!hJ0-hJ*@n=8Q$Y*-h3v$JI{QcKTJ z&%t=i9)SLt<{2lHQ|JIjln!T#8?h^|+pI8e_W!^_<^0LdT8I7)@X>)sd&&lUh)81@ zd2H0EOxJtP<5ABWz;J7b@^-&sV;?s|vZY{s8!hkJ{vh6XKR!X2(b}h7nfkBPHk@*zyZ9~$Zw|rx zwv}mbueC1Q$gJ|ZdHvxB-yuIJD}a5P)vmyh|BfDe1fZp)-WN6-I=kzyCJ&L}p8wr? zBO=hQ2%0-cf_Vmxy^f!2eJ*wePjr%I+ijwsItUTeO~CtS&ncjn7ASv%gMo~0YDDMm z)kW7o_WOm2HHK(RcZ_2%$lvK1`3QWdIa$4HF$PRAVVY8-9>&-`fwfkYL8*7|($OX( z@>q}9GGUEIzR~NbUR>27m;{kEMk7VlC!KoLji=J|o4jV+ZdY7yE0`1J>4Yl4bzM+I zJ*XAR!iX{AkH<%xrdi{<2&kptdb#1}(+i$oFIY=O2oop-h-g-=yk=z6VJiU)Em+Ow zW(`&cjfqgIh8&bqQ2g9O&(@}3PJ=F3Em*4iGT}TW{Bb_v$7#YT1x%4N#@0L>9&jIn zAaJ$NjS3;vJ-nu!I{~ISA;f@OR>V>YuFDPas+G^@X~wc#QEF}R2jQv$ATT3XDz6v= z){FtIUBNLRoo4l3K-0|;Dp1wR5J{u{)eGuw%-td}K~V;)9+Safpt&pEy2Mq3X4K_r zw-YT#|NMHzY%ZZw(#g=tt%#ApSu-RE1i2JIG`JhkijtSh1rHBr1t*AFBvEw1sXp%r zoe&D6Hka|+8*iV55pvdBeU`t$px>bVb~h@SBw&ksfRY zgFpd2@qtlWicuBW>I&`Vv~j!J`k}t=$a^7XMEUOqY|lp1frdXa+D83C+ju`P&i?jU zzE%AiEtY0+aJ$q{pULJqT&>KIzXO5npR*Z1U;Cf<=7AUF@1|wF-+W&P0-JsnRkz4_ zwX;5KydOEQ8!#aUYeg(ZMB9;WVB;pR%vbtXhI-OtAfyx!BU!dNRkSOKo(IBaU}oIb zjMuBC#C?2x#KZXkORiXI1zW*Dae&2y#~(AMq_Na_)q;b6J-y)P^9ynj5GQ~mAgXU) z*NnvzMUhuV>+h>2Edm5UE=4_j)ni-k>DvnaHMO(`-dDr`%z^OdIpL3U!ow7BN`w>% zT9{pIjv~l{)g#01qx%Oepx}E(W7ksB+HAMmidZ0!yyE%g1#7X&DqfdY+-^72qMNXa#wx6NMF5c3f>TJC&nHNE#jGYfF%@aLTI{QL}NLQ0ca^0kd9Z({+f>w&72y(~bQD#(Yt?KYF9uvIqoc@A)hEcZ8SuVVDTd{Iv2MnN6Ft9|v1 z7AkCbBg|Y;@W#Z(r?g;f+t7Eyow`eS`!S&_FJDy5cb8V^G&`+>2mrg27I+W6FFMQr@4ZL)j-*Fu zGQ-yhi>7CFwzDe|ny8b3YUxrpH6m0gAu;7_P;^ALv0DB%0;Bkj2fo){aEoaJzI8MA z4qBR#uIfAx4LGW9<0|y^-Ul@Hwf5VQcl<;7J*;1ED;_F~e# zv!|{di3$g5euULQ&S*zzCuF4U13=Gf4q`#*n!QN3i`B##R*Q(%3~`{WeP|nC3y$P| zjkSNp!P7Rol#bJIx(y0oi|f>1dY@`9_$-5E+ndJy>pOWme#g;mM)xwe4Qz$zQg6z- zu51184%);%v-P~ybBFudJ5tzu-{*zeHMXZj(2IEPz^1Lqz_6g~$-E!VRxxj?c~F&U zLnv8VOrR$AQM%J;7;O4gdkG5_5FdUv{Xl6=j0zHs_dQJ$PE%4w3{?J=x`L|8mKXy# zL@djKpHI)orQnZ0{=nno4-f^^Ra4&PTu_TTB|MmGFQo~mdB)@U1gRC5>lLrJ70a6Kz^#y+QKceArDZW|3BO1I z=jnvH&Ipk9r@g2*Z?iqstlk+$KarSJ-y;5{aW1O*`OEuq@9;)VsQ5H3r>zkj~s z&mXh8&LSfPMu`zAM%+SabQ4{iJ@`qAgFMtj>c65y!`c4ZtJ;$VfO`^wwm%5FS}j`;(nnx_$n3`St{3lT=<)At zeoO{br=;y@Tw^c1RqmfRmYD+xI_|A1G6(N6zPI$g(7~(U0pi`gTAV^#`?|(l_jQnU zY#wBxI);`nYWO`!H*b8at?-n>ll1$ROO? zMQVK6Jgwj2e$A-Hg;p8cGSM_40xj@c?HX4M`+TDTl4f)nfCFUkOF6cpHx|)h8)V+y zUq1Tka7Bg1!f*puB67F*8wf-P6i#>fxTnW`a-+(&-_(8@U>#TqnqVjNX|TIjQ+ib% z4M`sWko1H%Z}r!$>284e(7^Y`-#7Pv!=N?XWw=%$mVHh8EcMRAC{`{(hU)}YPiVi7 zM_KamG{MJQMD8vh83{yW?3j0&sF8I_0rQ+xX@ygm`#$M$i{`B~&xqcl!s%$8p8Kh{ zu=7Y0hcGjy7?6^C<5lK)5``F}8n9D@fbjJCircbUVda1|7vz%h`g+Cndc(3T04h## z#>4rHhsQIFY=N>?JU_qUum3#Za=8IQ;Fv%hP>Lo=T2_tY0MTY##l|OwsN(=MdRi%} zr>jhNU2S9%=-!}`5lAq{h##kje?3h2af&!a#uQ-1ozeD(mpO0%Hm8I+MO@85o~^`3 zE(IVhz!)`h9ZWz}^kXw8tYuZp4hiylL#~3; zX~N^fBhoYh!pM2WG)1ga#PxcErr<4rO(2s#(+}`wb+ET1SD;EAETL_MG>cO$#S{R$ zj?n50ROr1gw-rC1UT~iNM2b-h2`0gm1JcS^OGPQ!l#1-9l4XoB;_B5&EeZL7nKHEm?{+>SK6|B`_ zG1ULqW%)V2uPtyI)ApkO8~Wn+*nqwv`u#uC_Tb})ZbjfhG0+W1cb4^ofLNvmhJIxp z-uv!R9MP6e1-QbY{py81z2Kk_TC#@*K8mH@E!DtBwSe?q8^e{U+SWe1=Z_xaBKPQm=Hcs4-pH;GX``fG z6EWoJ7Dy8H2IyIWRv$pUe)_ETPTzR9i8(e?8&l^@>si#1YH^wF+))#%*0U*#zmJ|E^z^QjiV&c!H#j z0eJ9YFj=ZxDTo|!o)gX|_4rJQF{Oay#eWBo4>K5w2m&$YX~sNF2;mhu7cAE2QoLe) zR3myVNP!WvR73z`@g4JY(m~3Em*-cU=Y&5V&+x#$Vu-5R%FZPtSHXNb;g3ImAf*YH z%N4iF1u>p5pJpuUO{4WI5Xksqfz<_=>z(lSni?ADR{*4gMU564;yJfvP-@+{A)*|( zfB$^O|M=GPHO2M++kkW)0l`h4sN}6vMh6k%D53Y(X zm@4ui^|M&|Vii_qff+%A2}V^A%>}wUd5NvsWvQrKS{l|v%js`yZ-Eal(xu>_gNDuG zx9{Oi%7Dj^xHkh0aCizot#s~SLmIfDFMtnkd+>2*sZ|a{fA78ZUF2@4ogVRPhwQGO z*!OD>PE^QN>(Al4RDnq8Rm~yrngXt9j>VeXnhX7^ANp!4{B265xYl#*^oIEnF(aqVjtmR98~#WsE>NZ3@8#a) z{RZh^>hoTZ-``YDHl?JZd;0teCP?YV%MpMygJC|UVCnihWieR# zUNNipvSvIzzo1qiO(#s#41loYjAdDoO93&ELd5Aj;dDMDO`2L>0K8sT{Cs)E^NR+Z z3j#m^6&jPgENjoGcCcr9)pJXHP}K9`Mn)z#o_6q|+#@w?vs6STJj@XfrwMb4h|%cK zF*NUwO{x8>I+|15Vqi?sK~u(RAY`o;vmA|mAr&zM#43WUCJfwG!s&Fzd^(}589A@G zU2nKvuQ;7N4x%5P#9oN6HjW$z#%s-}k^$C&Bk>xLuPbWaax9zmx&_CyS5nJGDVo(P zXA9tUO{EKUhPttxvgN6OJ7q;JEw|f>pUWeuIF8-}vDzA{ zc!M1E%kF2h?d}BiouYbVUTju``+IqRx|idaaTz_TB=mgISB#NU25tR*T1l`2FnWuuk}|Zb95}viIZU^eQzV@NV5G1u-xqrq}}M zT(GtP;9YATp6c#)_n6YIKX13aRe!HJcCW_vTGfI+_l*5gKT>7-y$=*D@0VS-x#4)( z_LuvE{t&Pv%A9^gtv|Ys?_&*qSxLJWZ690!lasv<_sXQd zo7|5%I)G^F?oPzoSPXJs04*fz>z$Os>9RXi(TdCsglywMM!De68`p^|kNZV-PkFl= zjyJD;>u+5)9_(*WSH=g~?}OMBc#rqpP0^|~It*kC`? z%bJk~*<3B|p|Mm#KuY`CJw^u;^?EsVsl9v9IvOp*rjMl=4%o9&<%;X=iraMshlDiE z00pe2VqKNS3~0*VDMieu331Z6j$-QFU(Xl(=jRJ9OGc5F>J!y;;8OAyu$vCv&c-SY zXt|fh_3F`(LC~Q2VDYo41tf$>z#sF3AE$&V zKY?yk^u{y%5nRkt&d@E^uNE ze;^V_$&i{gpD}A7`RQSXpkTRW?Idde5o~df^*tPYGckfuMbEZX@_` z+LRx#BPHtHL&LxZvmam$b2h-@o5FA7pLZAP{rx-eA@eeAh51OII(D7EaV)lj?4Gt& zrK&6Z8y=bQ-rM`r-O1y+a<>fkAYr}HG1%M&Qd{PpBF%f(<3DP%`AU1@!I<1FwPS^* zxrsrW--(o}7h-HewWn|n@2oxP?>=H=IRec0T|6G=gG+ZHrRf_O0mFL_8v*mZ61P>z zrQMeZ3T}5y)FwNc43WDb_kIo^ya7N)&!4U+6>+s?J*!4_ZC{Af;vzp~ZO|%OKHxIIh1D z{Se*d4f*USk8^aCT6m*W()NA*pDpl}jojITc2`6~aB_r4wmp*pJ|FyWLKe?DrV!8rxYF(BDFtw7SxX@O@vbi=CR#uCBegaMquDsUT|BoS%OsA4#d!uL_Jf17M2SZFs1@MX8_TIwn-PT z53spd-J%ih(&Q0}5H6PmFVh9{957E>VR?!Xb1b-pf|Xs7@PV~uDS|?pIxVH1vFhlc zAsRe8EMPX5-5bK}&ayJ#4*z8YLjYR-c``wwRqHJqK`B-^NnAVfA?W{cV6ojQMcc1_ zws~f&Zm}LISp8?TCO1`f=wAa1Gre zFI6-bhl3D*=4Q;k`24;uHy<|ZwJYMaj`2s^{cxw&wA8wmZg@-~>WB0@-Rz3cuQj!{ z@kX#RG1%PhBjWVzFTdJGrM+ACd*+$0lpjz^d>zU7{=ZMLc6sJ07G*0OPzBrKjB-B@ zhjViQ1G){hnoREPK=kG~tTskJr87$fUt7~}uyO$=ZWS!Sv#ZTdF=4X&g0 z&-Q7X$MILNVX=ZijZ;!R_Cqtn{L}Bb0pELr+Gh+#=pfwVK)`CDHVCbsneHQfA)t~Q zgX~$Bc*TLp(VqJ%fNg8CDWRUiTzaCHf1urafsR4u?*!-sYV|(cXu8)~)No2o`QJR2 zsBc_5kEOAusf*(yy#{Cs@!|T}edK2|Gh>=fIY&1*x(7uwIu(?hEh}Sf`%EwB_tY5F zINblZA29@5hiIAQLSRf5P#t5mgfso-Aw;8JY1Y$aRf<(i32B}|L|893EVorN#0J7_ zaTO2e6Q+4Wu?4>SBMO49{fIzC?dRtJdU}DB}I%%v1X+_Z6Z9y&-tfdbD5tNz{ zSy7@`deV7{DA0v+yT0N)pD|ApmPs>c`5{LjfEfV{F=5oKXyOWOfJoke5T>YZRYfRZ zK-2pT2jI9l1cWM#+mi8ex#9dUW11o!!h{qe=2S4nf*X&As0Q7(z`7z>*MbmgqyIPv zX@@TPrU!yBYVfQ=(%h&YHW_EvC?(i#o?ZY;Jq+2=b%@BJfT)3wmf zw{1CZHAl$GLsS4sj2=Wb0vxv-9tg2!^`Rb6-Yn$Y1(h=?dKL#0c6Z$#z{l|XCUn1f z2z8(;Zyy)~2yu(>_?}<2;<;6P+mEgUyPLia!ph8Q+fME7>L98EkJ>?rYPxUs=t!ac zP8+Q{Ua-*%SUQLVy23_oa`^=%V(Zb^c{th+oF8Ie@^qK`)Xg8w<=6YFo+>>A+`G^U zkhy|D!|#7X6ERPS^!3oziLPUpugB%g*FtKx14N_g~a-U$dfBt{RBW3?TpR7yVQ_0wQiOc}qrZ$CMLG8rtWtaZB3;W}Mt;0+e%HSG-1|3r6 znh}JcXFRhYg@_-g3IF~}0hil~rva%W{2>RvG=z zt>oFend$Dw|JPHf1`dkOncz5;hk4a+xzcLC+e9^QhX=9yX0zw@!1f-zsnt^PW|~|7 z9yL31A7NE6u(5(~zuCva({TFl>NS++wUgcO?OWsEZYG=#JPTW(sIJ()9yZnC?`nb3^`V{V zbi4i2>Fzm)3PuBLV(;I_p!R9Dv4V}&2s{{04{#;*qmiJE2g9#+FJE8(ZG47a2-IV& z#`wN(D|C1WzFO?R1`x1c0QVu(em!me?rYCe@`{+_eOSsH>Q(!k45ayv8x^nq>`#=8 z{0(i~$JUQgyn_0?R#leKWC;xy4u`J&XH_u|r*Vy*UEo-;b>@nYrM)FV*}P=nDWgsfmSSy8Fca%sZJcZdI%=!R#w0^|Wd z6&(GUp(%7R1WYk$@V?}Cd859VnaHjiuO;kR_kJijACjT2OXdF=03i=lvd`J)tKFr~ z6DcTEC9L1u!Au7qdauBmWg)is5{;7h&p-#VV0BQ>z5U-^U@*y2*CgMQLIGFtH5BCkqFS4Jw7 z+SJ~81egNLbmGPOs(tspK8*q8-Zuke@b?ruvIJWHMvIG9R1~|;Wv%skxWCQcK=bfM zziQ$ARNHA}q6!5=ts|9TSrl1ibT9bOzOSq3t;|lk@5enCT&dh#<@4(GKx(V7T&;eF z8ruX~uVn9m?y|;v6X45-1`zs*;TV+kW<28#x@dS$2WF$|E zqP}OfcfGG*Asg@j2M(hGI3N<*rOV!ckYSqk0A%m|+3#y|Wxw@Gdb)JaUUVc2{7eG% zVvTMkmfQV5gV_g<3i^i3JRCjD=nf1w8 z4;JZyT(kf-F%P>~_mRhM5LEAHqU~VdDVqp1()jz&@0w5AqK*2Etr#f zB?56ng7cJ+VnEHXJi#zmnIKX`UMsGzH-Jh@X?D96EahftAFYxz`GR#R38+bm0ldwR z4IyBPlU8J37geUK1^n5B>7h(00$i7Z*UO5h=PTqc{o5wN{$@Pg9#&48K`#$pU_|rK58$Ue20k z74rA-idA8xhQgI|$xWOSv8#3mmR&uJ_kMP2!q63UrQ|pdR0Gf*d^knxSQt0z7W%VC zE7*8&eU`q`yaI1p^)`*xtO8 zJ^WLT?QuA}m>@D;gm(uQ@i@1xfXPM~`feN7fCl5Er5fJ69~_vV9dP^gqwN#m<2N*y zL0jX+_uJl8547;YW(OH|>_*W8&<<%=%G?2GddH{N1PBFGswm{Os6Oh0d)y2d;$C*{McZDs zcq-<0PZ|MC3(aij1FE0^P#XVM_HE@rF97K4`!2R_C+o!eS4|PydOxH&y?S2>V4F$@ zNgpiHF;T&epKU@gNJai0KVJbU-= zIev3&9ITQ!t8HDa=m8-{%Mc2yYmwXiMj5EF4}C|Ca?0b>_TD6?ZZ~QXGlIn_1oNiU zq9>s0A8d4Tl^M6&4Y%7e+)@Rmuf1T&h&=E**}ISs>RR#i^n|CMF9xU?OrfpGHEZ&v zTB~K?8j=hJM&!_b7ZKE)8(pjIC5+HoKx|p}Oyf?GaXuwHo+q5nlMiqigC)Ndv-u5x#IQphEgkDuQ$Z=JV8pq3J<&t@QUAS zt-y7`nk%NrC<18ZblZ$zq>wOAGg6w6*9ESSiYYhq;|T=9>+36~Q^Yw{EA@ZVy_sRkh9S!JE75$Y~{n%k74r&o2m(G)O*Z6~`qmSl6wBSqJ!U9CU)nO%@r5*ntX0;?%S_`kEXqBuRfg2jlyuCP;3tDF&j)vwL3oHC|Lh(I@a5b z2Co|rYsTY;LjNqVa#ao7wyYgAu9f%4@x9g$C`CgyKx-vP~U zwYmvxw9iK6P1GA!OHb)I7NOkrY`B+>hlBlg`2RCQ^r<$25D}OcL{Qn^TfJ{}NOr&j z>gpK>{apFy`rQl;RUlMbp*_#_|6XgBZXU9H?K;1ImAk*o)&S#wz6T+eZuJm^VA#Jd z*SD0+jqI}rZ4Fo|n>MjGoAq0P-ML4W1OTM^!J$#-;Qf88$v5pEuEbj}tS|3_5I4yF z3Wy(-^ta%t0|=#+TVS`T(`Z5LfR=LDp4m>DCr~@1j^y(G_r6wi9R*K!Yq~>VtI-yM zDcGe^{*T5?=XDF|`#EQF9^3Ev*;7S9O*GF(j1eJf2CXVeqYt6i67eikx9f`AZAF|k zw!zrJt&cH;mU1_xq?uuf)gXUc@%(bZ>+2P{s*#IB0)ep>rEqOh=(5Iykb$HSxDhTy zv_9YF{1OfN?o0>)p&;10Pl0isCp?}{IG<Rlpq zS?PPQiXf$gc`BGw#Cg{Je@Y24L<3eAtR({+5q~_MAhkd)3raydG%67wgi;0TZNY63 zB#xjM;T6h7fEW{|lnlM9ZASq}Q7Kdf(`iOoR!t=)miIgmQc75AL6M>hUH}(HdYzl# z_W9$tLO`v#wV_z8YF_mnh(Uvuooz#1S1tOI<_Jzfxp|3^=7dyMh-66Z%Ayu-2`E{s zOD@Zbd1e!wqg8a+Y}8ZG1{PFC=fL@<{DOdS5TZ&ooAt*n$I9dMyw@QHgd9;!sr;zz z3`K|T{O+L|l|TlEo(9;ztyGZCLwrkF8WkzCDRv7ZK~Z?r4PmYnhX8c9h>qXq0eG?n^L|zP^MlCev{BUBSoSPf8y@)pK7Lm*#_#Cl8!yNa zIF#|QJqAa6nhMa@g~6}k-TQcr9>jM0;a?pi0(@M@^(pdMXTkT|-TrtRs5uBJ)eJbb zf-0%!*yJfphZRsihJNQe?7L>5=B<^bYZ99E5d`7EaA4=j{`UKOsWRL>AO}ymd#3Dv z-vd`WW40hfJqLFn#Q3z0as}GSb)#Dia&NQud_7_dmbX88eLsi26u?*%!O1)uzWlO;qT30>wvwD>V}-IP7;{P<=Y5cz)7xp!l( zK$Y7RRmo-|ttXMQc_Yy7t4jGcVZvr@wtu#sYd3(k{#DI{b)ILXUxm;B!azO0z%CIP z^CFtjSI^C5S+OjOdM%(jrW>P5T@}!UgfvA=Q*1@7a}f>9e!byx$yjm$QLyZ;N^i_L zH#yuAG1+)J6GZjB9r!-Ta25i0*;@pliVz8>DdES%8Rt3SG)5W3T)x$6A%G|$Sz43Th}HCa=PlhV&3W63L)?afddeMoYno-QTr|+-4LU@yEz0gQfiRAt=l(jUf{Of(oGPqV4o}DDij8Lo+BDm6&4M7?BlU=O@?QI_f=@g1 zBb(L2)7l;#TKAt(vuJpL`=&3{&&w`!wid&Kp22B?B5FmdBFI)rwiL5;H$dWX6A3J3l*kTy{ zZr{%iP)7Qd7i1j4%4ZZ3k!n=MA&VQ4S=e+x?K+Ne0gQ@-_e1yH2nbAp9h9-Xhj6^# zN6LvGs+-RMta(Mr8Q-BIj?eFMzi0OQN;|{?8(Hr$9Q{4UsnRk3bs+g=Flvg8R7+^U z1u3>@PoUPuSAXN{%`)1u;t=f({ymFh^`t7>@%@Z!b5BqEj%BSJKiONQtq3*LI0M(;zpnO7>p+!`*9QheVkyAha~F zevWzn+S!0!1TVDMk1Zh8=j-n^XAr3o`gEF=!Y>&Rjn5fnQ_20=TN%DhH`~v3*^O-< zA2p75+0O!mlyIICrYUM-qgqshkxsqpub~;#`yQMw%kd z4-@7&nQltBEi0a1Zg_gS;I?LjkRVVNN4B8zoHN$4LI5yFh{b>qscxQ^o5o^WI@>r=>aKDSeF|v>xz`lc>M7P;^~a|uOAPnYe6l9`f@eThH|^s zV)Q7n7zU|WazR-2;8}Bqz+w%?Ngu5GYf*06=`P$wTL5$+yC=xg z;l5d?;)`trADt%EK|>3TYzmK$iS#is0mRPxABy~V1|+|?7c=b7?3J_0s8_sG(%Lww z`Q}b2zfB=IZv9aD>S#F*>Kk($xA&i?%D&Y1X*Ofu9DLOMg0*}V&7{k`v5qtnROvOBA zEIDpls%MjXxL5kxH`<=-RJOZk$kyX`uMGYE$sDbW9Ux<~HVLxTc*6Is$iK5~<=D}< z5Aq4eirx|E-Q5AcgsW}94-3ov4NV|cK=Nxwn6I`zrmDQfn9QK0`#pw;;D$Q`8$1}j z#(2rr_kVu4cF$J}FVWY5vdgPI2pO&b(bx8->ld5fe^YZ^PN$^ziH)9r43b89w(Zw` zKa^ULqEenhQi^R5TN^1hou*VTs#i9KLn7MjAFXXcR#V2-iflb(-qNJhqFRdvS{s9h znl5368%T>!^tP@UFE6jixni0$oIs6x#R>*0_?YJjr&CgT7Aa`SMeurA@bq%Q%jKp_ z1A@hQM6An-Wy$6Nv9sUSgj0^yL)3xXIA8nukip=fYlV>l;WQg-DKX|b;4~$irl>I@ z%y@cw#?#Ngur4dkrxPB}Co96;4IIt5wOfNs8V|@J;Nhef$G}(?t%;K6GtxW(#CUqS zAU>XFR%)EM?V99%>i`Z1WLmrUMUJ+7|k8&a?AvBVr)b zqG{Yjie(iNIAsmPL~b{$F79!oVYI624LdM0>I{(}MnmQt9E59DqaDW|ni<4DFQSG35&S05I)Lr=q1Zl( znn=KYLW>3Q`L}T!fCxSSaGz}(PVl#0^Jw07msE6G!vQ&;2t7+1dzP8^pSq2GqI-}d zf=$Zkf2xg^*xjFqdXD;|ariIO-m)OuJdNOR-|oCu`+L|ASc~Sq^{oHDG?m>}jFw~EMfU7L$S5!N3gkzgNkGSVV^}@#R<`?@7i1p` z%7aCq`3Z;TdLjY!eAr$l0=TE0cKyZWQV6v1w74;F-(pm{lP4|$8*?MLk=F~J))rUc zMrLmzu*E*9f(_F3bA(ozI(ak;E`etN;{%LTQ-Qv514rWRl=6-!<<#ju?V0zV_DdAo(wV<9a? zjHeB8O<3o&)Y9W9S>v@&$ue6(y%vGEQ=h5gwp?+&zF06jqh`Ufu2@z^S_vs=Vc@sq zSEuXgzKIOHKL5n^xl9@ExJI#;vX2b z6fhU02s=0lQh?h{19DP2*&yfy&Zh}a&p$B*<%KD|B%>4!d!OZuDMpkWl_M&eiNrlB zWJr)3gNQZ#nrxN)(LJcji+VvIJ=m@-Tp|cDq%zi0@p`-AbkQ8hfeC?hppgRBb%hkb zdCi_9f{2>6_|6N`94@A`neE zq;WCd$+^;JQv21xhYBOlKp_nnZ5!?G1vvdSSWJyxp+xH*Xl#OPIJBTL4IX<0DS5DP7@oexQ~jA0CzNt&UOAw-vEF%;Nv|2BG|su9T*wD|2x3P zM@`SM?m_DXCCM^&4ExF`cub!=oZoA%;P^T9Vg0~Q-4k1j-2Q^*Y3Z#2h=H=uKDQrf zft7ssH}`(*y{~_ZyXxI$ds4`%D(h!1)Ba9N`#;l;7KjBi`@DRhEECWi!=tB5KTtsa zdHaSXQz1k{x&>sUq^^WAyb8)Hl!w~Un*8#wG&0F4Gl1eC#EPj*(0v7W8Y)%etJ zEC27syL_#=-2HA>-j`jUut4mt-|Gs$W2^Oh3R*260>&|9^2~spg4(>e=&X#9GTP31 zsNx@@#gD>c`>HAP>LGOCQ7!l!gO^!E01gd&sDaPCiZ%wWmJ7t{j3{MZYgth9z(m&d zQF8&sXmjh7^gb4)6w7*L^hwbySwt`oM>elQ(jtcD;Vq@&x)wZNZut3n!DU%N95izv ztATo1m3AlqD6mojcXQJ9SF|yBld30BiUX^WIuKz_0q0Xts;7torjj%hCKrIE$CnUk z1~Q|?u2}=;r zT-FO7LIN!nFSm?RE7qkzf`DlPC?jV9kS)(Lkk^8XTyeSFK*TsdJYb$Coacl&2E3wT zS+gqhRVRc)z%(U90#F4HDPaMyU_l{8Z%sf+Fq5|m-BJdAvr7ZE8VGJ>H)XF_{7MnA zU}jZj1b{F>Dq&pUQNQgneQ$Sf0t{R;`@E_4E#Os&0 zmY9H`!HiU)MHFF2*^q5zD})F{5t?y8m60BJs3_KhaTx{(K`0dH?BMd$c^icc=*oAKCA-2~1O(dhDFKhosR~xbc)}RxX*XgAaXgxexO=&JvIT!M zw?j?AxpBWqEhKl-CEhQw^nGHl@7cgiW%bM-Hn+{%=+7wrqpoAA+r#3RCNO*^yUafX z>=guR>ms#yhXyjJyK}oD(lgGDSyBkLwgD?}W`q_2K->9f7P_Iwxk}%&qQ(R<#m!@C z{9XFq?$;i(OV+}lEfMC`tXe^3*6WFK*nt5f{H_~+l zD{!}8M4AZbo>NV*xTV25D-5mG!@3tVOP0w&OJl2s1KrW=ls^Eiz6zl$$>{29k!rG$ zl$N?*KL<;2&td;qDIJUz5Q~|!ok#=(L5vA8lExp!ptKOOtS>>wSydxj&!qL1$kwo) zDLljgj*5#td01p`s-I@eLC~>dVtDA4K#Tja$%G1zDXkU|=&^xbw`5$m1R&VDAk7vB%}y1f>Uz<#iIQj` zLyd6ugeassYD_IQBLp>!YV2Bppx}DZRJ6lf|3QxT99)!5U(-1%-n+9YTNbpdz)AFe8>P0mD(&|R$VD@#T)`2=Lru}LM|CE zuP?ZgU`he!IUr22&HL$eLM?XAR)Giu6p)2*S+b@_Ch2LBYeh;4;9%!vKrX=Z%N76m z&l5xlr+ET}fF);K){NI%!L0&u$tz+cJUmWV>Iv8NhNnx#t>{5TStIkSp<3dEDl2lX zph&1p>M1D<5yrY?oKD)Id7ejo~vwi zOH5g3VN_D2A2~o65U~oPpKi{9bT>L0C(?PI)Pxc51ggHmcoq|NB=0mT58(5^IUR+m z*Tr@2A&~)qW(^*m)y8St2-sL^^mdD>V37wPxEpZz$s=3sHLg}r22h(Am4lK{lD%;p;_vsuYn;Z z27`5{Thi#>YK*S9w|jr|Y@pFNyADWg59>IMDv=E9L4W`3TyOQXAx$>*=iRk7)+1;V zQ`Guf`|tZQD0h96K45Qh!r$mhyPsn_?*YL!tN+ad-)ZwrS1=pWAI2ENTOL086DH-) z&BMNpPahBM?W0{LHCe>e0ij!vhMsgfs;2<$kJ-~i?%u;K#l8<5w9@+1{hJzLJ(lk0MO^Wlz$qiepWQQohkeS(sa$|$yZD(QJ7q((xI1);W}^jR#b z-1hx8K1Ub3ugQ*Jhw~TMuAMgplheO!Py5+5?3eey-_Hw|k9(^0SK3?R;H}SpOQZWU zl1vtMS%tgvaQlUB#PoS-|N0qjfVf&ZU;VQZfSG|5**qYs3%M7=Y2@0vm%4d_cVNo* zk)NM!^}23odez>0;6_aj+-6PV4_(h0>zFWd)~r`u&x=4Q$g9Rkmr@X7#5AeSJ5B0E z@eGr{6Q9b|S2c0l=cQU$RnxkeD%tJ|7`Oa7-YMS|l<~czJrp z&z~2(UKh=%m6h4FR>4w$TM=BcAiiENczl?VrU^eDf!k7WE3Zgv#XJREil#rSLMTFw zI^$MSsm5mv+HprwDF&S83C>eXX#zyFKwc@B<_UlN@dF|i*Xt{CUIAvL6qScw%sMd$ zn|3T(Ae2vx3pir7s_yzdW(J!puS$VZv3igzLpg)BY7EQewqlyJ%6Uw>08%96sMQ>M zwQyV1K^KG5ndr#+M)QMWTkFHXjUSxGKM#IbA6kmG{bvIn?B78bJPve1@asmdJKCW=3L5X5 zK0xnxA6IdlxBjTPXK4)5+@#+9&8<_S>W9zq7?uV;YOmR``QB#z^=Gsi53Sf>j{#AE zEf)Xr``)#$38xJWaUN~ndu^wm#$Aw}1P&Opvmj=k5tPYR8{;9btcM2L@y;iWnoN zIl&F8Eycc_@qW&!D%4{_w`ZKnBK^$D4;=Oj?c5>qAbQg+Y*+O}H`2}`{}F-dtm?yc zU8|I0DT%dc;xuW!4+S3%^0hMg^?JkW>kYTt3c%vUh)}Dtc}muLBjU1>6|T9w-{?GO z602fJi%1LLsd-#2h?N~*L z6iA+fmDJfYXacqguB z&F4^Ac`4QZ-avWP{B@g>Y1S-NuyYVox31G>0Xd1)@Uz_4a-7TGqe(!15zF(L#+zq3Lur@lYF@{->nC80K*8KZVcYJ^HnPXsb+Y2t; zw4PAN6gX9Ysftarvpc3*F!9$Z$j4i+P49u?v+aE=$ge*ebE}=z-SxLwMVdUEEey}< z2&mr|2&zCT`d#NqgaO_=$%Yz()N5nF2d@NQuFgd=s}jy>e24q0BG=BtvwNoeBYT$y zly_g2`rj$}09enbMnH^g021(?Q^B@wNLjLSdFL=`(D^(?#OOgz7Ams<`1|Vv%d#l$ z3yJ~N;~^#NX;)=WhqvmU-;D*3wfAOPb=GkK!4oDI5IyiIdVHRx7KbHh^3EX@s^d@w zBJaVGyq{D6z9ScLwE_xwF9@1yc@BczSnCDmA{hGR18CYZhPWUV7N0{Aaq&+qS)%jT9We&PsMZQkh_^|K;cyei5JHs<*0H#p;j)MG zZIiR$EBlgSE)K;3?8jOa?`hWLzh3=1U~F;KSeggE*Tsx@d}(T8rEdzFzdSL+wl zgS5`8nOq50!VH~Di){j<8MG}f3sSTcjiF{`umV8T4l!+m<>L)N#Cje0qI&b;asqYL zz)RIQ4s(Aw>xXw>@48EkTD4afyZKmPkYNP#G#0y#`#7g}8r$=f3QCwEF(Yag&gynm zvV0ksv6SJ@SwXsI?VhU=JaIg)ih;V>qXDv(XWfWy>$!Kus#A2qa<5iReTU3iYk@0h zXrRryvpM4K3-#|j^xMG4X(mpbNqjP5*de2~iy+{y+v ze5T(+N|AlN{<)J~BGmdI*E7(shn8k^kpFU6L*qbxFg9uylwP@}g(+PTiZjcX?%cD!Q zuAYYZoLB51+Wl71y!oAolNG}h)-nZh4RgV|tl0NdGjgUZQh;;PF_*eG3|^#f+3vt?XV(0a<*lc>|o@lZ_3R=QF92$P?hGfLKBrZqtO% zw+Xip5Ga70Lz0kY6o?Dq`Zc0iagH_em!3+5u#SZn!*J@i6GN*N0x^$-X5-0D#yMRL7=G1nZZtje@P~BAgP`S3#kPdYS`c2PB zOz*qr#wUTxQFF{_sa#tow4Vn+ z-Hp!wb@41iyC5I{YLK~+G8rq?dMQQSeXcF2+zv*QgW>)4gwH77f49kgZJ@jDFY-Vl zuV8^Cb)SRmheH}(caZB&86yS8-4A8t>aVuf9c7~u3pU%50Wo>Pfk#8gM^J^)*&VgX zWhH}TStPlBFCEc*W-N!hC@{A*4xL)6Uya~0z~XRH)2aT6jr7cDiUXZbTjzb!FM z>(Bc3PoERg$N2kuVg23W7B`NtCk`PF)jWHm^R$O=6680_Q{#fvMEQ-ro;pUT~ULxNMW7{^oQpOw!4iWR@Ku*m}oUtb< zUB7Du%n+q8k+DE#DM*y|1Q(*XYcV6Iy^0CJhy^roPp7>D7ER-9*h>TphqPvW%!Kd{ zStL0#9*-T~FXBRW6M^-CFwYZmE;5dNmulj+fJw(%GF%a@t^k`>0o%?Mt6VDu6iZux zAHgFRGMGySk`a)gU`L4~Oa5_7Lzhb$K5oE|$PVVVAdZ=d+IU71Q^BLrt?q!XpVz?n zYWb+-Lp9-KE?7zo&_H>w;nDr3`%C{Ie4d{lO=m6CSH{EFHgN);+0382S5J=GhAYp0 z)O_&i>)HP0XVdN)!Rkm)_Z&y-+K=z^0S5||Z%3|@lY1-kz;fIZHU zX?=T5KWde3g-g|J{=d;K-tR||*gHM9ZvG0Yac)LFTHts`C~S`cU}3%r$AzE-Uow%=+MWoMp)dhNR=3p zDb6E?2p>Fv7Hi=x_Kzi{trZ-!#3;v1eYG*8SsSVD(c|+76!3~Eyu}@o<&MsUX_}+3@*fKLIl zpD<4W|N8&0Shs?GE67=btrN?O&P+fm@Nfvup+KVMIcG!<)qhTi3JI<3dJ-}C|NbPT91PWnq31`gqRsj-RtXEvuJ`wjbtuTLf#74h+mzA zC8f-GEIX!YMTj0T3eyWg{QXKU;s8xd4-0-^ zBGkonj4>n5aYw?MwZjOAtP9aHFgq}vi+nXA(gOpNg(nceIQSF!Xqtwm4f@k?6rOW~>2>pACjL?R73~jOVd-_p9yC8E{YF`a*+O2lbDD-ZQNy6irJx)`B^c541QO zZs4weFCQU+zoU&3`D-0LH-_V#{yD&^wWfh@z@!1JXU8;Xq|pToci{7bk2Qccb7d`# zs*6$&{GbZ9t9$i2(JIRtM>Ue>2BPf$j|7B^rl=M^AP?%Ulnw96&v9-|TQn*M>2NJ8 zb1j6Eo6q$<{vNHjr2bPIk!39V_&HbW5!H8QF(3M~lbGK)MZ_R!ajRm}h;q*Ld@Gt+ z^y+--o>(Cs0W8>qsdNfqQ6qS2S+6AaLB!DFKB$887HcXY0Y*MVSq7jxQzVu*#;9p} zeJw$dQpUC=Jk|~Cx*->a)Ht(ZhDH09xJx3bJ11o>F48Phk447T?*&fD76FKk@M)eg z$DoA+p;jQM66UR)kMtQO!IKe<%G0M)pA3m zyoWJH%u@mrW84kOk-TJ69^6XTDdzs)tr923PtS~;H$q#Cd*4uJ59~#>zQ^%&#<+_h z-YK#Sj3GqiL6Lec#VUB3rNW2ToZninRq%;9(>{qcXj^ zF$G4gT0j)^pi|yPoezeF!KJl(o(wQF%3ZZiRPfLp3c4|j6@qUo`vkW*c=(rjKrG2T zkQM`CW~RszHiq*kEHErNtbjY&2a>zC_?b)CW&l_^?<9rQx4_JiEc(%tf{UKab0 zvr%0FiUwBwFJyIXE4nD%7*_lr*4g@d0zAY_m8I^$v}-Tt&;Is;ezfb?)kDhfhpKGU zk=Z)zL-MMJ4A&H#x159hmAXd!pim1QszTMqxz72hb8NpYEu*QvGwIgRH3Pr}JIQML zr2f0!#sK|FQS*;A>Uljh_P@PUK-)8^ALI(q`C3zY%=XCIpVdC;SEBx#MA-)~TGs(0 zs^NLQ2ESP?+k$alTilJvCd+Iy76qA zY<^g;dcZTT7C#O+n2`Po1of&<(T zW}ESJ_m|Blrf7ek7i7KPZl3Q~nvYk%Njy zJSkYgTefrpQOey#BXzVch_PN3V~d1>mKd~%;V=%(+bDu*N;sVoPUjhON`L{Ij20%J z(gM#nBT}LxOVnMh2BOWHZ?gdSc>PNRK?s8YyAW^~Gy$DDVv8afk_jozmiR8JBf^>Q z^g*Jz7KkBq@0hUPEBLs(J0Qe>B_*6sCmfGwWD&gk@H6fX_b5qle4g+;P55*? z@IrVIsR+(B4-I zR60U&!7lbfOd&Op!Y;MM6d&}9*%m!H%C%4x5)^2*?ldPHPa3N@!~sJH7>0ml(K;15 zXC!EHk#EcbvHBhf7&MejQfhe!ol88BdO-@8Mr&A=!Cs(vktAD!cB`90PL!F9xdA(7 zAlpGiq)GHCr6y#!&8>%S1@fe(jy1w>;DtQk-+HA-f7YZZMxX$;l>a~)`{#46?^jvJ zh^!ErZ7@w1YcAkkYa(K^*061&7Ix#7Z0Wwg1VHpqR%O){bh_}mkd?&qIldI+9#(d% ziFi#CW1rfntkqlQ2W1gtX<=HW8$U>EG3fGc3Z^Pj9v5!$yHv|;)oCmsh$Vy$Mz=i2TccP7= zt6`3z78@(YMp~z2E&E^zr?7pe2cf+~g6=mqgH$DK&@mDN>~)Du5pdC<#;DtS@N3)G z6f0a^uYc6v0D!2X4+4tz%RS_cetWz28~9(}-R{@3>=8i|l-Dw`Uo$me!W3?|N>(dn zuV6@1!ds0M-Rn79h_7S_38LNrC3i}2m-APOSV*n|n6-z*6;~!w^D%It2p94GcjMV2 zl6rpGI0b3|2>u$J$y#^jG&xv+M`FZsk2q80W-m2ZyJawxNZ}#)Ta73hvz6gl5 zj#^Pyab3?MV%`NU>Zfu_3qCfMsEA?Ef~uOoTM#(HX?r!aUv;%oiB>#cNTsv($2}1` zkRZ@vdEVbbHR-(OJh*r@<7+iHSFpq&i&jY`A!n@#mrJpV-)t<~`FB#`4<2)tIi{>@KekVncy5w~cwEP#B=d_j~bziy~+v!3s7s$sYz(9l%jD3jM z4~#Lw@=Y=VtBy?|Gf-zGTQV}GWaTP$7*t;)1zG#U4uXPVjJUtwTaO8BAy8iH-gJ(VWM?LuA<;v= zeZ?xhy067{W_UiRlOz;rtc=^L?Dj;g1rlw(+fRO;R6u0O=iKUmIMA)3k0De8Sisw`n3s&4p`J)P$XZxg3%%+i0l@v`f#y>xw@z3WIj&lZLG()L&Ix;X&#{C!&BNOISj14M6 zO2L=s6L!DLxq!0qx;)_wM&!Q=Q#OrF{IcwNhul#RYrLh$; zb}F+-7q=+u0r8gPS3!&K74szcW_PMy4gp+>F@OOs{&iC}0nr1FEho3jlq2i>ps;>!5^aPgI=^IweZMLS(`0maKaq(7^*IZ`k4I^O!_=HuBIWLHju^UGaYl8lHnsGYM zD8hJn_Yok*G$nj``hwrSobc)Cgr{@DA_NxF{(~?uVaJ5Kf$_`Z0r76XuL@5vm(%fl z#&5sc0`SOfX3I^-A zK=U5PVL%8&4SNY80I-7O*M3KIbGToE#%Kt1>Xm<220;+vh+ z5~p<|)-kBwulBwh_;3aCb?|X3=i|Df>)wlgjBjnSS$9{jDZR%+R^Y=~Y5J4f!Bd~q zt^!Y172VzY;YvU=q^@I^|AQ%R{UgT}hU_J}9PYWRwVes-~(D>-VPym_p*09${B zF9#p@)g|OIo7oJ5x54s@&-&l?mp8I{Ue0jYKG*Y{IaK+p#UmJ_2LLciUbH|j2!SJF91%i9T4pJ~Nlryprrt^aHykMFa z3;b2XaS=kwTJO%iAI-SSHNphAt+&>>11vPtjqg@1os>y+7=nenv86sM;&?zI#YD?jIg7jstS~CrTC! z1Djw~R5zW^ik8Mf3$`o?0fhVakC>+gIVgJ+NFXFioyGxT$=5|R&bhk!0wahrbtSWG zMh?xwjH2KmG*_Q{9&##}rU}o)*g>hymamj6gl3kk*yZ)zB$*jeFe+|7BBLbiP5T$*~~@aMYSl&9-BKP@Da| z?yt&$O4I9drMskHD`T&WSyaIa!5N0o_N<)?+gIiCWBZwZFP87(;m+#|QRUdJci`h+ z|L(dxdK2i8Rv+UfcE6W?_UE}5_cf@PENmJ%Y*-6_v(flcq0`VMD(#0VFDEfKi2F`%A zEI3aKrfIU!%#0!nm@`sZkW$j|#E3YK7>Ci$dM#*uK2A8EXBxkcB4k}7}$bng1wL)CC*xv zbHQMqDU_@pAWO<&qr0TZM7#Un3fp;up0(25$0vW^YaOT07)4#ih37{D?gSN#DqYa zh%U~oB!WncBneD{0MRh4IYW?ZN%;axqII|3l}*#f+-}3m$hBt|DWK9eErD!7>f&Vp za0OU;CI2!p*jcgqwk^zB3;)!=cP|K(ZKBH~>A{C8lsCY~D~41LD_I@>f3l_erRMKj z<3BHz*GsGV<-xR77OEEoxN5a6-JIy!cJA>~1-GDq(I4UY=-*=n8r-cRV;s?(A6|oz z%7g-Vl!XI&uabPxM~j_&-TLZD_H7^C_CC3{s833^dBknS=4;z_Pf^W-!e8nvc-;fC z@&5fsHwUC|Yntfa0AAkgE45y>>%V3lk%J|u2EnFC_zYCA+nb(SIp0toUh5O|{VE74H$lk%h(>Ph*2MEZugT?J2n@-VPi*wN zrQ~{zHzoqbpvmvfkX6sDfJUyMUzC2I(a-49U6H;-56M+LT4uo9<4w3NTZp7bzeuuyR@(%CrN5nCz zRX-)cdCo|=sAA0l^EBh>=?VLhFz%GCXtx{i?jc}Kgjs-mdV(xP1!K`kCnNxh%$Ori z#|e;xoyAd?QZO&AIwVZN^&%&!WTc!Sk}+^Zh-xhlF=8MF&$GIfY$Z_D#LN5ygtx4`A3sQ;eoJd}b7`Gz;A3M^vdJrXZS~Bcmb1}Wt1HOr z=m`OP*BhnUtw!n%6zfM1(qPOUKX#sa@KM)NKn1VN(0A@YmAhQ7j`X13&xIxfC|PC2 zYJ%U`zn8#=1C2`G>z=G;efI18Uavi?_kL(dWBd6MsQ-Bi%S-b>L@yT76I=t$|7q>* z@4~@H4TCH%gVk%l3W1TcDguRq?j2uu)Oto8njxm@(E)(0rUexHw_5nO4}bIUzPqVM zPt3djeTdu7)E|)-2Dm8xJ}-z!^Bg-sdJ72Y_oT~*vbFI2i-eb5_o{U4o>rXC>qnr< zJFoS{Na9{q+cTwkx~rjw$ksaA**f#;{3jJ_xaJL<8dtjg;ZW4k<-& zUKY%$u^^&GArwF|hDcUbd_W8e6iO~gCE;{B;_2xbISUBEJRiltN5--wjd-L0u!pl> zdij`mbw>BzKqdl7K`rno%6v7%h~0j`7za?9Q6!^C21v=EJlDf1==2sv^;@s)!E8nh z)y0QlFymXYavTciiU{Uu!89kG)fkX7VabBiobWu)cs`v`gw-%jS&0CGAX3I51pM^4 z!#{p`kH7!)4v+T-jAKO9oD8Mg+MQ<1#|ab$#4vykz%-xn>GKmf5Z*mJAaKMuG9Djx zI4wKO>8uM$)UqxDOXSgocXxk>=g$etq8(at!FgH`oUhScVTQ;;49KaOb(slb%1a0U zh;i5rn29tCKPUC1z-V^YJLBR-x6Q46^%pHb%^i?UScH^7N%VAhRG;Mk|X4E_P#$ z6?8Ybeznc*_j{x8Glb402yPW2rS%PD6|r&Y`Ph0e#KR(9d{n1g|3yNYnE_dY3HPj( zY8eIGbh#3=UAqJe?$zl&FZ~*@ducBKi?8Vnq zNSYn4Rjb6mg!k?Of5@6iY+GHs{Y5ti)d=*TY6;f8=Ek6$Q}sN$HMehdb_4)zD7~%q zpq06Ri{=NKzNLnuZjv;G4=d~Th2a`E}M zx9{_Uv_5J39uVR{=E_d|i`r{_@ZW2QsC(r6a|aO~iVJw&!MYf(uZdbP)*?0F2Aa-T z3%B62lie5{t>=8eGl$h$S6$m&4oKF){T2Q2DE^<&quw?jQtjwn|R+iTg2q`C= zPs)0gQ-)xG5TgiNvB88TX%%$uH`RiKmow<=#G@4aya*NwFC_t`)DWz|g#B)Z!#Ds% zFfR*YXkO30D5O0BMO0sAqd``QVC-@%prEX+n%o}^0BiI>N~*V~tQ6*|o9bMh=0*2( z7}Q{r3&@yj$3TckcsT6wk6%9GzkYd-caL`%M`eeN6d8dD2M+jI0&@9`r_+KQ1-mgK z?Dv@F3IFH+`wau{@Nhq3jKJN<_;^1e%_GV(V`4&KMm8n9P{EvL?DL3W9F^rjD{LSe zBS0JnMN%mvEenQmz!-PnQYv-1dIbb=2tdgw%Yrd5XlPv15VaH)Q| zrU~3EeAHMJ)ODS)6d+JUBrA&Nc@t8Q!1#PV17pDb?hYX`VhIR^k@7VUfqD`|1WQ_w za>ft`kh#YMssL7+S`kE6r90d2i^!@gbUdWAzX5=-7n?C~ zLjoWH2p|rwFKbaO@fcjP@vVh!q)>AJ0+YG|EfFkQZb=|hLdg={nKd@jh0sQ6X*nh9 z9DqK2NW=L&+|4b1{kyn4yc*+`ZG8KFOVqAI=j@);grElLY%OwvES`ViN-R~2j(^8W zxpU>=N+P<`1$XPri>i5&x`*%;A$_)i&mvaUfwU5>!0V6@KzyMPT_;)YfMN9}(n@%S zz^I$oEm-z9UK~n4Yrm-$T_I5E{vK%ISx{L(mUq;mWXgrU2rE+8NrMOxtK)0b~(9NUDO-<#Tk_zWxp9g;*`$ zR{S|_Rey47iG z#foBzv^9#FZ0jr;TVNi<#XrRmA4prdc>T3Z3SuFRXo`h8%H{*^ft@RkQ zIrOs@BBmz*-B9eh+pn%{if>-oscq-fn~-noo2}o+TLZ|p-3lM2PHu=P>D?b)F>r0q z!tH~|P;Cnn?{+?_o&dCKd@iNHixxMXLXFGgIMzojizR&5(hE9IdiUmSCQ>=;`dLod zfQ%9bNhwOY;Iz8|7UC_nUv3BKnYzTrnN!9*YsK>zl=UhXPa!WTvS0`Scl#Z7 zh)g&f?of(WfP6Y00TEL!2un&@RXv89P_?3{^O^unq9uZ`3V=ll&hvyg2ZSYHzl-3gigikP<#w-|lzejv zL^4DMFo(waP!-!Igk7=cqMou_Y>jmRZ@eNGKkHIP5pxoDE_>MT>iH@zIGmuk&F89u zDEvLQDeUQcm4{mal?sQTeBi`doRIb36;#=cTp+rY+i8mX{yG?FG`gtYrLKd1F4nC( zSKriVBQ!v!1UNYBRGa=V@e131NUr*H0X{bLxYCW+07lhNY@DhDlDc{gh?g#p!P(}- z?LSq)Zztq-Ixe$C#Sx^>xxj%isp*` zmVwL_^Yu~N?xjjN)iv7w&JIWEl_$G~ToRd*FRt$wU{l;ul{o)FR*Zf&lNHFUW_y*) zyJ@tYRay0YrD=5vZLch~ursRwTuLp7S!^BV3QinqwE3(5rq%~-O_3{`{qnW#a>!Ta zp!?`@jzq&f*yS4|dA%ePF+`@M6D!-IpzR$oh9=*CrpUh*t|PE|Bi_Bbe+B(puj`T4 z^Cwa|#aWeljR#+kvnnD*z@?3m*(cAR<a3?W4#5Sm)^Dztw9xQk=dS{iMu0=S1|U8;Y~BYkB|puxh23j0 zW0iSM30x2?!Fw1+#GvT}1V%~;r*pz-%2<-hbuFsd)%w^n&3RPK)_~H_>{fU5&qNKg zY~X8%j4?9C7%iWsg?*~wx__N^-b%+rDS|-25E;WRVxE%G1ZUJ!wg^(z5*^3mjMF)x zPyq3O0>XJ-H0k{5jCsi>7sT>NBE}dHDPtcQzyACIfB)q@K0e;zupb~u5D`3|&WNS7 z12iz>{_cP&36|d%Eb|1WSgTbZk4FS1yt{wEFb>%72RxPoj`M``vS7(t72JYvlplFX z;9hEI^f?CeV3M{!o-_gtV3B#|oOHanvHLjztX!dLnnj}`cE#qRRJu76Wh_&9ZE$Ylb$QRj7YP#3xUY^8CU!d5nOFIVQE|L&+%q$%yI8ZoHrMppl!?rYuj z5?`O!fUp9V7B29*2Js`p%`{~1a$Ro!y$ZW>fV^U4YJj;{!bN2LPW037?)SEj*Y11E zdhCsT>;*$Hx?Og~rFLNPjb zD#ci%xUOO9MgD7D*ch=c=CxC1eq__L&y*lpTm2n?XhQbJ0aBTR11M5*uDwViA0t!G#yvvP$Hs_vpSJ=Xt@B1n1cmCADtn;);~UJ*PBlB8wCpNEc8T1_S}dVFWW99a~X)HIQ*GYqELD z3r?pa;$sHmw;ibZ-IK&cxGJ*S zd)N5>ZiDo4q6s4Zj`nk>VD{{P$9Hn0aG}o2SrG}jN?=BQNG(cPbb_LpK{cif+)xCQ?-dhZ) z+)|9&eWil&m)eWoj~i_ZEVh4o4KRqUl~#wMc|a;PEE)iTFjgL&E6;xEv#%S7D%k7x zx-p@Dp27#I9yozo4Yh6o0HVtKUkrJ05eQ%FbKUUx>onr^UMZGjDx~s&iW?dH>bLZ> z-Mg3B*Ps03nkq{MLa+fL#wb}s;{HvW%Z0veLyX_j{-}5HRnNBfu`(9+hD&ZmkaJ4< zYx`Y+CioF+2zn{yDhMI9nl|IHfi8`zu(QK_#$?{cwnA)wk{W!n6g6^-AcVoJOKyl( zgXu5~7>6B(VFX7FW6DKvo+dmWkC@KN_R1k@E{mufNyh5cdo#Rf@=MLn>03aBNJ%P& zVML4!DGAH6*t{E&PhRX=EvFz!tDF~@?qE`$8Hj@eH;ssi91zFYLcat!PYaG#Y;f4^ zG~s-j@#Q?>cswC3$<8dcJ`f@aA{Fcg#{2gN{QZ~r`1z+t?Dq`8VpPDu=g&t4Aag2J z5g`Hs3wFDRcMk`o<%DTELzbjS354^sfWt{aLyQ<=#Qog?ODaeiDEW+O$p8-s9D$ON zihz?QL2I!W2PZ%(nuihNfH;l-)P1Z%h%gSC{3C*zM?ege>L!aou{gN8;4DDMlo=61 zmJC=u1y>lIip*WZeU)GlSrzDOAVgWQO364q<jpVF(C8spoRZ)eF+QpX;uKR1mWD zf^01!9u~mNkeusyE)J0krDc6c^kAWg#=(021q7=Ebs}iuH=6f{x;fx(Gi|MK&Fevi zh8fWnPo~RmdT&=?&Btw{?Ea`(Ti4Zr;Zf=vJ>Wv&mlo!9tq(8HDXM~~mYe>-xV)Ex z3s?C2F^Zyw{twpukoB)rk9Lb=RaGzgchLt2DfPZ?9O#Eh)pOWC^mU6|50q^V2&nP5 zBmmY*xdKxMCSP;^w=1t(h$o$LcniGfl$(-v?HZBJ^K|QZm!VR?rBZRZRmBd=wSuQ2 zxrHf_t+!(IkiC+!0`ynbd|L<2n^S)WX-d7XEopnL6s;$dvazady=gU+w8_?b{|n&N zoq_PRv0Xm%avxr{SNdAt)pdEVYpBWi ztsIbkGs4&D<{rhvzjabn9<&Sl^WV?BHnt|CL)1iVm+kB|*hsDQIeD==dv{JH+rM9| z-}82=T=Q|j5raJsXi*ic`5RRGhdMqtFz1{hX!)tGuPJ$idKkuWhhZEMhY=x06wzwu zr}Kp4>1=r`Y@yFS-^KQa)V0WF*j){U2wX$Hn$fT~y7uyf2^eC)xNB6vB3e_dl!D#3 z!%!0D^MahSf~#n6++LAKwe2*2N>;r(IF&c4hv(^t&re6>qV@QQBXU~sd_3Xv z^ASsOMmr77XVQdrM#eq{e0)>GFep#67}s+k%Lh^E!VWxu zN^e%U57|LwV{m1<9>%2|#>K44+yfQ3?ns!Xgn3do98o}s5h3T640ItkbWwSN5O)+c(_{Kx?DF`~L7~z!j*y(*54M(e6nrn&(m<6d zS8S@G##j~CeOP;kTtV^o$IL*rS|13>r_*yQo^SvfV$?DZz3#Gj(m6;krA*~GsdZxCa`1@63rJW~V zAq%&y$?tG=ie8&G?~C7y=;Ae9Iaf@U$a-(Cul33EGu6P7)n~eTU&n2}KWg7~@p$M} z2(+3*)r+|1<9SYQWw^SSuQkPdKPpg*Kp+Dw9wP4{zRE1y*S;as^_omtmINuv`WRRX zU<>pd$XN?EkK+!z-5$ekr?k7mNGW5QRA*h1`%%dZE@E`T(tr*%uh6BYgsRh3neILA zKyNQk;-JQN_53i1B8V0&OTsur1R?|?5Q;scAd=wy{XKsD=@EbX_<+az z0Ws*Al@`J2G~=J2p77;(!n_p3&rc_W#{*&*?B!|VGmQc7A0IHE&-ncL3Cprz*hQ@z znFYtwgx$EqI1Cu#h=&7^7kx>oWIQhgxy%SWKnf$3q6}7JL!1SuC3qvzf2q;3e1 zdPIh(>8c{?e(F&o;5443^vKL$XZn%tai@SI=Y(n@GK*z>$>8c{cjkdoL=OTF+3JO3 z5zJG<`Me+w1I95RgoqfERiD0~=Bb;2h$;x_t(mT5w-BwC`0X)$uUn40jawBzYC@rV zo^J&B79sEG#W7{`PIdEZIOP_gv};r#SQr+y=db7F@~CUY*_xnqMX82^NcUY_5s+<{ zyT`l?K7M3tivMl!QLnY@Jv6G-pwPo>RrwfzD;XYds`rp??MPEBi&Ox8Yhno4Sa-jp zk-t070dWEW+WgGzc<{Yvhu@|0+0?;5qOA|S@0^FamPPCAbO&r*2j4K)?O+C&WChxL zW$orVTj5GtuaPSU-&geL&11OLE~;Mt0pR0n`=PYa+UHphxg=0 z$&I1Gm1=(nN=oPm|NH2OuNf8J)c3k4zt1>wv9Di}7jL^W0If%HS#YppCD^%geH0 zHwHvA_G*sG1bH|pBPGbdfilSsgGT98LvduV)y`X%gn3Cgo+cC!hH;N!++)dtpKCMx-<&rEG=pwAy%DX3PHw7{>v_5K&6TvMeAX?E_jkM__^k z(quvNfRG3zNi}K`bX=vzg6m6nORS^TJ_MjxeS}g9ShT{WBXDQoNhxEVW~BXqVX#E& z5Wzty(|QYEbE!39mvgRRU@L3l>OgX)R<(lR4T70~TBNFOX8XAZAj(E%1tZ+jBE5Uu z4u{|hBPwX?Tj+!VGgped-b(RWMP+u2UpH^AB(~g!E?&Q&W3Ra}Gp_)FAJaD8kFR-V zH#WCaD*!29tr)N8rOi`cIrgOk@U^4uwJ}snE`UDAhpB<+N;6$!gr&2Tt=;=S9&5ZZ ze?{D3fo4UnYKvd>>Qi*js5P-FjNgxG8hEZ&UulN!Mzw$QJ*~VB70kSxr`B4tQhR@X z- zs3li`s-UfUvN~V>9h$!DOACG}+|Bv-x8_yk&to8F7d$o_tB zZGRm2Xl)nI#+afi>z;D3XvFacV)r2y5nrVHJtTpux~1?Oo3 zM~#(ZR!>OD1xZpT^Qil;_fLhQCJi=$?DCSVZ^UXk~wO9j+N`Tl2W8?2|-(`&5goGY7LV{z)q*xPfYezNX&uQs`5Upi}nvOc+j zX7wR_W{FmJ^lL6fu06-*P4CeiKApw7zWeTf7YCAswN#cMk9EcR9(Bxp|D7gRP{|VX zcU0%9>vDUf0~WNTJpoqUk0J%U>93XCSpD43*X2CjZVlLS2XM*g5e<+U{b5r#sjlsw z8nD&Ic-gnVaEa9@hrTD|QpV^u!ZQX4FM(wY(InwVA3<+z`vhVBlVp;5behifm!o0#Es9YfbeA6|#__6hgIxh%x zcLdS|-70OW_1E5klx{Ga@pa2h3R#F{+f%Fc@|AjdQ-p8){`Fg2v}^ZroN!%vbEWR< z4N48Dk*=-Hc)t>oV8wta>t9_TM^jMUkn}AeM2jQ#r{*ioMb-_Gv%0h@zVy|=WNYM8 zqibmI4Q=dRh&Nj`+?5=@CJs$r`}?gCDyDo?NlyOUrbIKu4zex)%fiSt+D7x(LWp3F z8g8^`$WhL!_o(M%)S4i~mR684%`?u^j3p;y#MAjIcFromvU&75x^fn@x$%5M)bZ5$ z1r#i}(KdJ(yDADo*{V?KyBmV$VGl7N#sx)El@CM;&8U6Sgm*;5yg%gQBTO>u*b)bAF=%QJ5F-~7>FDJVmv>c zzyy5YM+{@YZXEE_`*+AGVV)M8&KV+%Y>DDv#=Okf4a%t;C~BN%7NmK>GA}yO0K^Iw z?hiXH5S3>L31VOz4tpH#?jazYP6>(p@7f|Im&Udj0$b9M*AFROZi2LfR=pvl^*@Rh zlOvJN&1j393>{AsD45a})w@HMqf$}}wX#3yw;WIuk-Iz37pE4~Q*VPnIj?J_^exyx zud)MV<=z}{Ft<2JJxi?uW+6>(4?*8S!wDCPOs&a57E8E^6L}QGf{+ zx4>@LWn?2cy{J~T28-9VXd^H1_|Uc2rg=~N+Vz}yJgSHlgrs#6SRaE)Z~@H#^*?vd zG9b}nLVkv#Iq#x2WxR_!85txa|ua zKs3SyDP+LSCK>D}BD_S_OU=Eg-AU7Vd9Vg2weQ6gBYo!OI_(cK0z@t3-7wdRj_3c} z>?1?CKnm-wQR~L4KOn2`AvzaYPfO>jz`xoWDKK)5`VPg)xVVg@?*3Oi9;*)a>VwOI z&F?Sf@$Fij>N&guPusq=i#^-|COl0}Itm1!WD8-vg*J+s)BX0H(90SPDY*{Sk4j|q zxh?~TfiCmf-u7}I6>M^`nkKeaT(l4w7!_={&~^d!;d#~4eC3+AHCnyv<{>rkrxJSg z7u>D=9T*`YIuT)r1IBTO7^CiG zQGIn<797tL=A5+dNMO?ywMLSXAzjYd8i3dy?g)HJ&UVD;b;;Lr(R|z?4v2v@5=pCv zs?KV893e3F`yG%INR$n1x7$H$&2 zEg7dJ;k0B-S+EG9NC1n4y(1%1!NXy|uOA-q|NQb3-an}4gGf*^VVVj)|32aQ%Z&3` zb7(REiX+A`A{G#)CE?STBgPoO^nm;Qs3jbYAZr*1_mB6uJI^>TpOs2&j36SUT=4XK z1P9>Z@eUjaabP^&-(xx#q;$k01;i2R>Cggur4$5WWikK(VhKSO3-47_-Kxw(02Mvx zLI|3BG7JD1^DF=sjoP%yugT@Ez>97^)ptX63AQ3lxO&(LX_!y;O#hW7TgNdk31Rm9 z7B+T;pp?9{jM#zJ; z{OYm5Rb5G~3|%;thQF;3Z4NE`Dp+V&Zw+{BJZv9URKu>iBx_Vl*kwVpb$tJ2t93JO zTvHE2v+TcS9cuk*{k)oZUqO@!6j?K&l~<;x!(H{~)jWNrP48P<|33Q=DkYa_wAGPV z3m_5IyrFM8egA;gyZM{4R%)W;zg9CabhAkXALWHL+J}Vr;wd7U+}tTPt;UYi?n(u` zRcjV4(D%JD^8YHWmnBLutX4ucL&1%}&MWu$`nHAEy__Kbuh0DpdwUy7*UJ|TJt6a; z1MUTB#+6G2^~FBdKK`*X=3lE_Qx5ueIVWxesKBi_2+{k!RSdcp#KFVAN{d6p5C;GT z075$ta`v3W?#wR5jC8J$_r{5K4p$u_cmluJ|HX>&xt`*3u@!P&fm&uIMp`ryWf%q! z6qJ;lRo>@}-EN1&-5qwjT`f^ipaA7OFPP?8!=1XZ>mJL5mkNAK8{tMn6Mm!$& z2oXq9@O+x_^yQ2%pN}}57fj27B9@D?8}RY{10qpCk%DPn@afAFhEd%)WAu8(`l5#z z@c3|tdCBO4*rp1mIpfQ-fCs{UH-eb4KkV`0BjGeGe7~Qz$#d( zl~e%qae{z(DhgL7DlV$5)dfm!fKa;}Gc=0Gyb5Z4000I`Q}mgkR?!ZGTpHty&BT>n z=YsE*FFUkSN>~=gJS!Dlu(~NFWT1q~a^082b&PoY7p7{5FO=cpS7}Lwxn<{%ZGm2NKroPHRxN zN?OP3;G@5tWR$-=n8Bco8HCH`H^|Q4={;S)X=?UFI*;q-gRjmD2Y(4ns-_`b-dTj;2cGy$%8a9%L zmNe==>)&r}z5f}_kFUTA5*8jMsL^s{Mi4)8K-vUD1Sm+eEZw0(Rx)Eu8HV&TC z*Qj^bhQsb0zM{JTHWq331+@fj{{<10=9Ra)PWwBiO_&?OWLE*R_SXTBh#;o~4gvea z0pmET_rr{tA_en2V>(YrDRqWFl@;0YK0J41+j=AO$~b`ayQ_OK>;8`+7^@W!%?UqS z^>{!%9Wf{ol~|4Ay}!fXfBAr)Kip%-z!(D-(gKE`zkI>(zn?Ho3zj8A zkP(K6-G0F1yLM(Ijp+P$^iJ1=GYeAuBMWaHz~$ zJqQu&liy#gxKg!z`F%HD5F&L;W~7S+-!Epj2Ka__YeF#QhhnZn=71&}y2{i*4Wp@j z5lyh|mCN-r*Gj4v3VLhv_j65%T>~H2H1=CwjceDqvauCvJ@^1=$k?@0V+&@kc|TU~ zeRVC&<=piJmkQ)d;}c-TsPRWx@YeuD4;{}3S``?qpri*RuaBvF(i{7AMHzbK-4j!N z54soZPoz5>zTteQCPMzS))!242EEnqS9?J!n0WQR-GBcBF~_P^Lste0dBSQzuFBAl z_kg%`f6Mik9;Zw2po9gT7bF9PS%W;KnLMt^ysvN970ExkU5MbX0ET~My8t1;H-ixM zf?(wZx$vUl&wDFz7%;>Lx1#k%P&MkfFSnIj6-aJtSv@9N9i0OsTrKKKaM4z& zg8{WT`#+ghivN-pBl?MOVc9LjV;ea|$4wQ^Gu3b@bu@ zq+rPfNeZHJ@zxq9SqPvAFk#6joTq~GG-I~feoHP$9ia^q5CiadzsEm*{RuyPxW{f} z972Rp!Si&+r{@zsKcDb?J|QnUZe@#&c)Z`^=a293c)v%47~*5Wv?ToV%MsuRrU7Ay z7(DK>6fg(e9}Y-MLe5{ThDXFWj7T}*)8{Y9rQpMdcUCSQ`1R)xNK3*0`TYw@27;kl zBAZMRL=MPHLMf?&4d#Hu{(vIclDaceUXV-1Znp9Y+?Azg2i~MFq%IO?GcZ;wjc2p?{6d?E2n~@a8JMO89L@+aC^cOO z2&UXHcVRbZaL1V$Sc)Op#uQa7ambUY9B9WlsQWMHy1~g}*?YxTWAtGgmymO@gsbZc zp@I%SnAX}9akDNE>B5cXBXbJN;Ma&ia)yKkVBaDcS4#SoL$+^y9Tx!AYWvb3^_Jq$ zfrJX4xEH2U?^Qo5tx|l>xr=k z^DS))nl8hwoRZp^=+r95R>;V`noeKp>xO{pDS2rzcpgdr+!s%3i=k~+>opi`Yp4NO zz>FC$^kX}J+e4=Jnl(Va^;V*N{~C{qyzXDP=Hc2})h=)AYr3X};{;Ky*USHRLw&%+CU4~gGO-SEj`e{v(bMr0H;uLk_vvj66qt47qu zhDEGn(%gs)4=r~kz2yfB1RApSbUfia z&4zKCVY<->%Sy)gt(bn$8|3_bG0Y%9WNR#gMfI3yh612qKMsIo3_RfQc(0%-X4&>L?(Yv+rX!x8zuuD~&l1B8`Ec$yQITp$QQ7%`<2 zrX*N|kwI9>f}C4ToFM>*G2r971OEFTAMszmyvN;+u^$4!g!7VcUJ{;^{60t9QipK7BbFn-fEJ10uBv9EOO8hdYR5eER$a^P(P; zz?vO@I-U_DW0w1!ndb?6t*|+-7w$GA^xL+OHYt4ZgTkZ$1fJIXw++uPq zPIhy{>cana%j~~j6OlSyKce!%E$?5yCTcM-s9v}I+wE78OR%y1`k&a;(<$v80u_k- z8Ee?(V*B8gJ@!Aa)phRcbOkgT>2Qn?bK1k#8W4T)cB9uzYp zB?TTSB^0qL=*(8b#EqmHntnP>n5Wr{?n)3;AVA3%vhJ4mWA%mgt0KJ!ZoGyR#HgO1 zKmm~oq$C6ehB0C{4p=6}l$TYmA3-pLfV=$xd1Odlz(Muh^LatZKw318bTP)q5F^Ij z2;zX_wBYG@LMnt*2ssnZQ^uS!77H`0Inq#_dOrqye7wVd{rnEUe7MK`A!5*4jhIr# zd0z0_=g;`^e8hQCosw;i4~HFo{^>nFzJJ7F&^muH=c1YY2oMrv2+m8v?^YiKWsh-G zYd)iZNiYsUk=8up`S}QVegp*blJN8_2!mjJe2+kc``r#7-@n6oO30^4lUc~7Py|a( zsC1poMt>_c9ouBa7583qXARUk=Ys0~9Rr-W&m5Ml&}2#!G)uhl3KX&w$o&wh}k zQmO^b7jm@#L4i;OX`*gOW~w1SJr6z57>TZWJ1k6w8I^{&h!O*MZIHF#UAyf2m`@K^ z+DgRU;@tn;OKk-{YlIADb*Mw}8Qk0FP18Bu8O`D#< zL8x2s`Z^8`oUJW)f3#9q!5X!F%iPan&!}?U?cCjL#X$D&)bxNc?+4Rn9r--nHh%o* z_8R#3-*(vws(AjUwwhNHcf|Bf_j9kI@_n{5f{Cn5@c+KUw>B{l(R+;kUH3xuf72V?rw+z*jl*QV<#?#*9;kIrCL<-2v6Dx0Q}{8;sLqUL3{h#`1UX0nJ54eeT| z3BZa1YR-h~OF+pQLNCxz{c#+y+wIMGt_I>#7M#v!Ov?^$lwsPXd-~~altSQ*o`AjrxUUi zOliSV5>m>b7!bz+AV$hs!){qJvJj>O9M21KVJwA^lZIV+Kbh5g@p!k#-+p?;FF!rt z;ZCcZGZW^V@a44N^V2ha|8l}KFOZ^uiV3*8+u^5=@A31;54hith(zE*h)Z(Uu~WP% z?clOxe0e^BO2I+)7zd3<)0jqJ7z6Gf4p`EH^J!Kz%t|LZPcxp65qGCY?G)jqM&mN62WT11Ayj<*aYihkJt?(1`c3iO9D#J{10Fp17uN<)QUJU#uyN} zsDe*s(SuWz=>%uvbY4}ng}Xjsbhy@M%LS=nB0#lNmm*k}f@R4V$Aaj^V3;evvjZT=b!?2*;40`av;(ce%rh~4esSGDPRh;YRga z4^3m~^5$NF7Si=Kt$~l;MSki1u({xldiQ(1AKlki^>hP5zIKGQUPw2;eF4Kh{HWgl z%JZtg??6y5@M|CSAAkq!{($BO50i0~2h_tsEn;CkpYAHWyio@}E@ldse8E4e^=tC` zp3y?hwh8!cFY&GM!}hm`Ro324f*T;db`M=DSJ%Kt{kd#Ev%YloLY&UHf!+TM_N#3Z zs2DF0)Bgq6X*9aOdEuRYk3Z|*42SnoQ9ZJhHLDdeq@H!NvH`Y!Y|jBdyD#QcbZ{)5 zOR3?Nfe3LNupcAF7;Nq`h@9-k^hO3m5>h&&l!V=G!09v_D2W)xL7Bt=#5iIcci<2( zEgB*>Eg2xKi-NQuC(Y9?MZb$!w2kR`5BrFp-yiU|_Xm8q8?hU7EvHg&I?wp@<%r)u zKVh0PN(sht8!$5A!~1vm_~9K6cYDo2LBW!edO!d`q8H%P;wyrJ=i?ax1qhl%6k>!{ zT@VBiVo>FNW*~cL5=AU2;rP7Zb3Ehma6n|iI5OTn?lH|1K0lqXq=IaG)dhainI$y_ zEb|#zfaCFq-8h0n1m+pc0pqa8FpQX{Gjh&g2I3I(PLja{?1q3b1`rrCC9o`DGjlN! za?p5vCJjmO7(+$YEu6-;2{9pC5<5g7Ibn!ed`c^B>Xk*^Avm8GaG3FUf2UP7AxKW` zxN`e8kA)t24mj*U^jP&GMnO+LV*otC-YoA>FyciV-J3%!oo7V=5)4Byid3rt96g5L zg^Vee8-=_n&8|2$w`s}OljE-ek!BfK-&=~;=kU!hs1*imJWl^BfW>&yH+es{%5g2G zN4IHrx5~cXCosbq5?wu8`;UA)Ry)Cdu5YknaBf`n{&$4>OfBqGY92_gmKmq7_M6ox zYE>9bk!*Lp9M`&Sl+2B1LwVY@fN5l|3F(2jdd74=+SC4j_TIG3jax|*eFH!yS({Xn z?9+GdJoo?qvHQ$3-KTA9sUn$)CFVm!0LWyr7OCX6dk$=>sz@ew1mcZ#kquY~SM?8Z zjp16F@iiZvuA__Ix0CvkqVvK<_m}+s7E)yc<4Z?;RGVc70yoS_tC?IRQLaWdHls4t^ge4cP*jMPI0`}EIYZJxi`FA=4 z2xlgrTIW(b+|IsJp_hUA!j`irgVZ0PFvGJAVqHIkcJ#uT4fJ3>N5+&%>Cc!_0!Kg= z8tBMFH1(nokp#gKI@zYDf*cc=DYsU038|(VE5Vw{mz+McK2w*A16+YCtT{GJIMR(D zO-OdR@QM*Pnx*fNn5U9zPYL3NvnsteDgpy5~gKpbvdI08j9hYuXQoGP?zd?yU%U`7sVl0?WGj_v>5RAAM(cCCe zu!G$zQN-zSv7hfK;k8F}!Yu7Su3g|Iu zZe=FxcY3Ms=ER+qC$$TPLl@H5UQ^Hk1{iUWWNyznp4TRNjYY?d*w0Jgqko^Ro>CU? zJ=JE}(&~WEo;zm-?1GniGB>bM9nqN^_*k`ToqHuV+b)%-(M+n;_6DKtqY6r4S^zBe zv#HYS-_}WMWYOg6!o-1)2de?yA}f2-@A(Udo7oMPg}fuCUd!g@k$PHIvVf!fF<_o)v^>>Ko|9v|qi@&&DBq zM%#~eHweS5pot8aIL*_+5(Gi{A)AkRo;5HM)d-(4iQflnDu+Sy$9eAo7K@#nGv+u; zczi~6yR>uPlGZBt=>(7c&wgCCe(RtpV6Hm>B$h|s`wHwF3W9eSRCCep5CI^WCAwE) zalm(>z`0!cU+-j$yWJ@FuSP&64FqsHO*lM8lmd)CAf|#8Gg6X(MpY2%7=eY~-w(j2 z_j~-mzrMpS_t&`D2Y`g{IK_yE$0HsOrz#E#x?W=-+}@1%cz=tV+a1CnbTj6Rlo`ha zgv><}1Cltrni&9-xL2I!4Cf~eEla?t5pDqZFyQJ&99-ro*%UJ}$PZw0IGtvEdw7(r zjk^)!AZA!GF;YHBYJ2Dckun3NzhC0jV(Dbe zt1$@B=m|ocpSo8oBSlM8OwM=Dw1-QP196;Axb}OPE>R9G*SO9`WGYcHZnI}ZbR)9; zH0eV3oPmjT6rom03x^kiuf8G9t7e4F+=W`Bm3Yd}rh(D7y36}VOJ&k)cx7d(YG3Gl z%-W6Df{(3Y+K<=vxoIYrYj!p^OX>#g^pjBcQad&;N*LKpu-YD|dk8(l`QzxL5<+Tr zw-yYcN|T+P5!IByEfW#e%9afGOHJN>xY&HvSnz0mMt?#VHck8#f%W7I8^2#FJ^1+X zvL&hP*qpa$;xAb2(luX@c57^(N897T79IlYQFK@nwZwba-6L1mVcGE zC@(n}uRYltHPpM__Fr?xHemX_a+JnHZ2=zC)B=D=*Ui6FB3!9+x81{3Ijmau)>xW( zs%u*C$^bWh%aQR;{nHrhw5`;*+QTU z#CZnigx%=We7;n(t4tZ7Q8KL)0UANI6ySK8aXignCXn}tF-ey9m@!8Ikf0zSpTrVo zH+a0i+u_$=-r@fJ6?S_MKZ=ljI?ed@@PMxmM@$hQ8Z^r~V>dXwd$-4j`zzes>@kiG zL=u868+nlrqcC8`t;7zVFN=OhH{ZokJ>%*Z$)rlOe= z3Aq$}{`P=jl$g=Md0bsxAtqo>z$rcos8H|$pfYmfI7&}6Nn&tfGF6xnQ-*g=LaN!u z-zuaTHzBF*onwGJ3P|G`%VzT41GyUdY}ZxyL(5T;xrju`kT90giJS{grz3{ZiwP82 z^saKI=B;zjOSHXcA9B83$IvaM_sdJGf6)A4h)|hUl7TGN{1P{2W+jV$7HoWXErK~8 z-@0BaCfPTpswgp&(1T0nt$~o|vy|8VKJmTpVT2p_urPJ!W(QUJVT^D8-i!JTk;?K? zlENKOv;G4b;t{ytX%hsH9M6qEHSm#ATFP4?!iZ+t zz0^kaXK$T*Nj^7TM*a8dIyJ2gIY{)YAm;1SJi&pHcLBM0?7~jc88CN(Jb+6=j57v5 zK+K#)5N@e2hzMmjf=Gc_&N!ZCeEW6;v%~f6EebO}fBA~jG$U%Z_*4pr)DN1(#Oh`j zB+=l9Ta0@T_KZvgF-JTekNEm!1fca3o;?h!dNxHwFa z@a5YH|MA;r1lxF}WTX_;_M(k(&LI~8AQ6D2rkztp2m{pBT(uD5 z;KeNFTsdce0q+BLBO_%oDbwVS2+o7Egw8dhtg)@?g}!D>)=VlF7Q_i-_WB@iM9XaF^^jHu-Hz!}!dgvY7DDJIqmaYk zRT!(LA|Q>F)goPa?^|>#t-BpKW5DfVG>yh8#NNi`SG8E^?4&P9P3-%?G8e;BB)FsHEU(+1f94#l}T=IW; zFUVNzrs z>R(D{E{C6P5A@*Ydo*RX=hngtD@9E>`@LogbQ4}J5XvjMa7J&sslT-9>*B~EcUjVn zMn2cBnR_q`=w{ahV_RjtZ!Q(PV}1tcm0~oF7VWgYn@`d?FmA%XR{DG_Tc2+;-1c72 z_&!zE-&miqrD?BevrJR`R9Ty0^;!_)18Q3gn9q#$VynKF6*X@rXtF!K+Y5j$*I0~m z1sN4A5Nf^*$v+`}CjA){?YHYR9i|y*nYP{P%=R=^-V?@O%)e5-QwkTQTSXHtJB))x zy&4dL_-;&d#57HaF>X#)FXVIR@1iZC>w2CEsCk!Ud@5J2FXzOsC+CbbPXLnoX+#tr z0O(kH(^&Y=`yv4`&H@M!97zRmeF~y9>!-TJoC!D4Xh6fi78s!N4s{tSHuW|qG z8rRq2evxxhpu_m`5b@7H9`MgU9t42MWV0?J;Jt_}JrN31T}Xe7u!MJF^XLsa7FWV#2nlON+sZom_v{nK3zb_B5ecu~nNd_*~ zL|E-RI{@OGlRjHAp1vXO2|Q~&@T?(%%_;dQ;%8CG+9he^pRoZ5o~UW@y$j%@X9!=y z*sRUNw!p`_PYb3`hxJr{)^%_(GohB{ylGRE!NBTLp%vdp>o@dEPnPz%x3$*r#db`#RWYzC4K69;J+8(K#xz+{|Rp$Q;e6TSUFNo-4ePJy2-izm(x6gz1 z5d^grf2NYv^WOzR){M+bkU`y=)n~ewd>54o*oXu0Y}o>|FJT53eaFjaRZnNYjKU;- zklkyGsjYo`vR{|?^IMdjNq5dK-m823!ZzHR&z3T_oH5UH-A~M1&E#u> zU2bzI{Ki^f)6Sh!y6?OD#KyS*A04gaQp9u!mgv!hEFGWZ9EM@QIF7ofoc29po+lB6 ztd`8iQmTIp^lnl?P`_LolS=)-OrVDbJnRKzq!^LYgdsSLL(n{Tgp?vuj53Ex|HbbT z)d$np`I7kyjsX?S=P6>EW*iPD%yWhx_5gW&eK_FZ@ez}T;AIft0vrgz6GqRtyBhJ2 zk9YX>mpj~E?=TKxwx3JE;W**T!x4|ih?M1d3I4>mx!&Q^2XQ*z?*g1K?I@aK3Q3|Dut>$39EgD_70_{NN5ugn4a%GZ?01BGQ;@i-x- z2sn?NGfvZt({6@$V!|~J9=A7p#F7xfNM%A!8AM*3ZAw-q$E!o6G2aqJC~Ii7yb-{? zN~?=x{w}2;&Vtl)5j7!OQbejbGL8IhMH9W=nP=-&&uS!0x$UuH5VFEc^G!-sU$PHreG{(>t@oK-oQ=z_;KQk_>`xrypIw&wtlx*{3?XftJ9eC6Zc!5Y3%=>ZW@13rXjQIOFrR{(ZB4f$!k{a5*ui!j5cMWISB ziKvDAUkIn@`LnFtwP`23x13-A2txl=n_(vKR?Ol-_F2!-uNkQZrb!uU0L>)JfQgRE z+Be}Pe@!DL+$88b)5 zG|kn-%J~4m;qmc^$HNg*jKT{$57zi-AchaX&D9RSe!RsmAMS8}dxfk0APK=s#^EsI z>$ejgj}b8exd12`&I7xB!2SIVK7D+T{a%1n$r&XV%&FjTO8ETsfImJz;NfvX%+vu8 z)-aE%`m|82CQ&6;e~_2~OaL?%F*yt%1n1y`$8NX7RZPf_khmZ!SU42}1Y&Acn5(MJIS=n7^siHYkeD)ZVP&Y7mrIjaXOvPf&og37qO2kU z!9vD~13Ji=kQ)7*^bBidj8zL_?^WiRNDSxfoOSzlo+HQ+!v0DQpd)y=XVvL_Kgg-M zRXwPtr8vJ>S>d)x9V|<_MagQp1+1^j!`|k^I@AEx<>OHqrIT zL?mY#0@IXSxQL(9dbIJnXBmuh3GyX#uJc;VJc?B!X`xJC0vzg`bo;C353xGW)E#eE z`jEs`17^U`FG^%#X`g#;nxeN10VE~3-fPU%p7qWT5mDTufGVl z%X4*XX`QSKJqP^vC@Q`Ew!kuSLx<{;0lBE3vH>G4au8jX>3Yl)_+t_VvF} z>8SOKkHcSOufEm!UGX9$b7I#35hV070Lpn|?>|jY?bFI6cM&xSstti$JL2Fi77nE| zxhiVrX#!6&(f?`;!pE42wg@*tEkr%4aI3o@>X=cpn=>;)2xme$@m2KL?M95d5nhG9 zIcLmM#5_ef?@6JiD!4x!(U64guz6$+eZIJW3m!m_3UaAXujQQP>S zOoeQ?5CV3)5h+RHzbQ&GK}Nyhc&f=s_WK=%;Bm9xOHL3)veO*ZF4t8JhO)izKH%zl zr<<);Ybi6S%(ZP$$%rWqHd!-t*2779+}O4f*KwZjwMO_v3xev=m&Up7J_qdolgZ-Jzz6qF|{87OX+|yh|ZGrH1#CwmbYgtGM1UsR3M|@r~Y{z zZp}ands%h>GqsVNku9Q-!8fjTtPiTp*OCAlgKhSTs{ECi;ErtDxH_$PZE`^jwj_PD zF~(*lbcsP}T8ED5?_LX{%Ct<1h#ss!!N;~l=-ebj8ffZkeM`Yd4cDV7EU0^y`8>bE z#>h6_+hT&c8~@P`3c)3&7CGMZ?dg#b`{!`n%_!k>(ewEnb`qqe0U7OQ2NiqJ9;+y8EAXA=fYc zkk3|eyJK60(Vc_m%KDs~zxP~CFVTAROJH|_2@UEC+|iznzEo|v{AnR>xpe!WpSxF- z`t2VCP?cuEUfrA+h*5&OJ~{fORgLS@`N}=8Q77~29qQ<8_h#MeWc%ouc2`8n&Ol%9 zbLV}lg~jvhZ*16zZSDE>3|=0~<^5yxL6##1>p5H2^OOe>)m}91wl~_&jhBGh`{!I> zsStqN&W!mzII0m4x6a{$&KWH4U=%L_5GmbLu&QSKvW4YL)Vr z>Ia=mMsTBs*^P*^SJQkFR|OIPmKm61#%Ye2V+LG6822b3#5^OFA{I?H?`j%PB;kMW zZ}<4+<2$^+y~g$EB{fG8ZOX&L5r@Nsn9Qd&!+FMT9B_YkgMWPdh})|@_TD4r1kOMy z9;dnB^VbRg`Nsjje?8$aGcp1SIfTY=%S0CA@(RP{Qk{_jCtmV!$N6*v;q z>%B*CgxxSAT@}QfaGYmYFAsNrcmmw7xSWB!dY@iNE@X~;MNkt>3h0qjdx#w zmGe=;F3ar~YrS&T1?+4ypNtI+B&dt#R$j1A&(GLdJI~aykP+~?{{ztIs--<>np_Dt zS*XZ7Q(L1<%Per>5=UT*?8Z&vNlx=?y%tVa32Yk(E!INYeBz*xh1b{Qq>ZU3z(H8B z6#%!nYIUq8xzgTl4!#F~th&8myqYvX*pj%bnK zlFKR=5v@VUIaA8rzt&J@hUgPi0=?SIJ$;Zjw%4ScY2CnN$yGbrVRT@%UnHqrvyy-U z4Np#9#(zKp`UvIu>TY-Xo_h zp=g<_a12nNjhY0}A%p?LxPuFW2+vbe$MYE}vj#5`K1!1L0+~q@3X}L3_jzi2Qp}D1 zHy)4cerrN#xqk?9KMuoy!3Tgd5{sxcjH4v<9X#@!QNS33B=ai)NO4BV1)jv01=oy{ z6PPnn%;3ETvseY4=7evL6Q)_*AHujp%ms(jjN=>u=MV;`d!E&AIb#UGyPF;UpHJ`c z;pQ4QJTH)OW~=A1CN#x7XY1&LhAILtHr1LJx(;CjCoMw@DvNlapv1)oyH!|_nnVE0@Kc+HfgkY<>M9&9MH zN(8EEOFNqdIXk475qy?N49e`IaEp@kfa%F5WNHvVaP1UHUCNC4DrYR2ju}(jK7JVK%Mj+JaWLO{1jq^O>!yyHHytu zS^fPJi!ltHM06c9{&l5~`~TN1m(Ss!Urej|+;`;X`S~l0Gi*$T3IWg)gx50QPn4eK zE_m4$ll$7j1TH`!aVD08em~UHzd_kPZ;QM9zkozp+h&zv()gcYF8CT+R~dez?J78u zJ5vN*IcE{pl39Mp-_`6$H*K@ZFfE4yl8#rg_NpMOiXlhx7SyAe0)I3lE6l6? z4gtgHaDBZ)EEzd5;;AUpE}9A|1>@)eW*m=41TT{pQ<4p1$WtYK-h1r#dw3U+lfFDS@b9qVp56%~!#PYAyPxS5;VnMo69`j;4M!38x6}FF&)m zeZ_DVw%j>Ty5+i` zTtjX<^_yD?B7On@u^LU|(w(oy)Lr%gdAbOr<*Ei_$&CM1faA|B{W!Pi)@uU8{Ag)+ z-1lga-oA*o!d^{KCwziKOi}KmoZ&-2F5+a|nL28_EAjLsB4MNI&0K8rwxB!n(SB>k*f7_+z#nZt8h zwj$cOsxDdXA8|V%41-sPa?5!pxkrNb7evS|VJXHZGCetL&@H)90_F-9DYCp;Wa zIA-CYNlm{8H{kAihfnwK@&4`>yWo*?#559!OA2x-m~w_Ahap4^A%KPeax!_rdAz&6#&kL&7V(YP?{=8y8IQ+K$=V}SP_<1mV<3c@Un zKAtA1rAP?l3|hq7KAZPC_pPkke+l?#l9=bCUMVBiJG*4N%~7P0*-& z5X=kHoR`#f>D#ZV=dH@x6pTP;N+d3f;tI$DpRk#O0xD91nKr)2@8B;~)+U*vRTbJ> zO~hV1obLiYZ(rVOCHiywA|clL?0#oQ>bWZg>;&FAJWoe1^$BKbRo}$3%XDUrNKFIM z-V+o2QWz;_$v@$wt)yB9(+I9jiferx&}#zFX5w2PQ^6-OE%+Ln-=sM|h7gcgz=I_% z_91}K+VjpMvqeFTh;x+oT?5}1^vLpUfV;V@6ZpK~bvcP`-W@SK0fPo!@&piz?}wO} z%bZz+u37oSlv6bg^g;NwQY5Q8vG`GkVL&MvbBdUbGmgg-!mz{D)h%KwcsLyK?eU1y zEE(3>I_4~7FR&XO?%!SE)B9VzyV=7bV;Dxv^Nfe%gf9<|IOU82u|RUAz&pat)gGVj z-{ZsG9rmLKN=8g#+UER-htmoF{`(jFuYdn1PMMH7AhUpvep`l|a@_zuQy~lG6fxQ+ z3A$05)F&n2RAxAqJQ%~fTLcn^o>CHa;|?D`+~e!luQ;7fARplTAW^U8h{wYbATgQR zj|2D`a2)W*@858o5(>nzyeOAX-l0J9fSB_FIZ1>v9V4bw1i?vSlq}J&=6S9gpOKMs zQXLqu8+I7S0dtgGBV002O0&l#)M#95$;c)Mfm~7|F>^ILrQLcZ&60Q~6*vcs`w?6+ zQX-_3Q52CkQx{_fo3pR?mV?DrX|$NcH0d-#SoE_e0OSZ`2=G221kG?tFPHjUBGQdP zUS=?bYB|u^y_-$#Q{ZD!H##7)S_fuY*zxumr_ZE_-F)<=sgt!otG@LT9vj~}vo*7b zbEwx&Tqw_7GEEcIX_hXS2QJU(wsz>H#XxKK4oWH54FTRkBZqas143jQY3Uzb&G8f9 zZ!4e8R^OK(Scr8Eb6lDqOf^|sfH&0r`J=_8d~1!yYqW)n`qp)9M7CR(O|#7vnAiq6 z=K%hv7hbNNm<_Dg_63Ov&$Jm&_uWi5`f_&mokiQRi`gMKuRrCo7vYS*D4^+cRxopI z(*&sNmE7`?>HiENplE)Br^iR?7k2qOrp*z}M^#%^p+c|yc!^epNX>qG+K5K8{0oIV z&lN5VxHYYBV;XWIj`e&0E=n)J%@6|D*CKTm3X4 zC~2fa;UmodQucj{2`P!=x7Ar7zESy%rgpTkYR?<^sNZs|=T4!Q@|Hy6UmBd($WkMiUdyrN#v~EZjfYvYC=U!`1b7pW?3W9 zfG}R6fN(la`26(&bIOup!oVP;4~~G{=<(tG6@LAAix2Pi7z1NB1ONrcIpXuT2OMI? zoLdS42g2=shfnwK@bTTP_TBQmd9VjQKOFJD{{27k&)>da%0SA5#L&Gd`%4%H-{%3j zEa5f=$DDxVJ1j&Dgq(pGGp5swgFj-w8%5pZ9N-xHVFX{_fSD1Kp=YO2vSx(If!M(# zVDtf3p11YD%0{=rD88t448)m+fuHvod2MOTATR!Ywwv4zTn&ZWYOR-bUL{g13bfXy zQe4W5uCu0X$!dxvdoy?8sg0pom|(q8sTwl{_7d}5FEb}TT;6M70|+sD3v;3-ar%3e zw!cimw*(~Z^?g#!XUm(deLI6|zpT=8eE%v^7VCL=U71;!K4;P3-nbhi_uxg-xgz%m zlQ3xnLLtDZY1#K^Xr)~hBE61J|5y2o7ULO&_MhLuJuPSs=gZH}>RbqB)`F=5A3_y; z3jc3@AEs$Hi)JhY6mZF*DN2fVLG)mSuU>x_Dri9aR zhI0Y?>l?TbaGD|xr%57gq``w67_wR9kyTi?Hz&JX@GUITJ`0{YX zVU7~@77!DCX58#Xe7wKIrw=!{z8c}oQUelRW||ZJ zol3!JO7NHw*keBoxEe;#o)B}!oDyb^GB^Sx#E3Fs9av>%#268oF()+zBS>U{pol9o zpyAFy66t`|EKIZuma&`RSmMeQ%@XN|o|Mk7L5SSh1%>7Cm`{a$R}$!m8CcF)v=HP# z9zbE_F1ZgN$$$E25Par{%BsQewT-0;RWfIq)V8jp0!XJBF2S7|XKQ?KP?iAXWxf?Z zTxjXj@>E%xU9FS-_00D9VIOdXF2gCsEXh zD}4HRjqCjYPX#3>lvGd(@NhWcpPwJ`zyJ9=9!~|aIAlj~6hJNjB(4sngLLOnu#{o$ zH;gtA8AUf-P@8oh)WMvyEVd>zWFk;Z3&LwJN8&@_ zbkPmyJfrEaU`!BwlOmd0+AZQ?yE>OKM(L{$Ao6nAxU#9D*A=S2HTAbCBjuooL;13A zy?LECt1!~MD8yLCoBPgUdc+IWG&3e;)C>#7v>)fm zH-D(MPP%6ots^YLJ1;aMj&K&);z!EA7{0%BS)9wFva{E0y|vJ|`^6;1COsD5pbf42 zpw=x+LPsb&&5RBW9W3T9uciI}(t`XQirwp%H(uj=%;DP>ROzxZ=`A=)zsHuhTTc%! zYCj-iQZXGY8rT6EbxDKbsMEP40pfO+B$a@Gix^`K!R<&x+=ij%%_z)B%3t*sJYbTL z|D6d{>%UXe`>xOJur0>mJgKjo&O~95Z>F=Amn1b&_RaT2gxTiTy3Kd>!F-?&>p9Oz zSwIvs!-atT)fGSlCWmR7a5znfSuAga(Kn#P0`9!Weiv~6;ST@yZ=Y~|6)*+{C?cke z$Kwf)#|fucKw$s_aLDig-d$hgGg>f6X8i za-cwTUt8YF5#vo~zWUQCS`f>;fI z2pG{EAbNWR@1!jPh!`nHB?vEY7$)(j4rv;5AIk+l?b&cSR`^J=MOD@#VX7PDi81zljx>U~RQmzCgoXVUVV00QPl$*`zx-hrBU*}{Mb*%b*s2OB?q3hK-JiG*`XeEJ= zWU1y?Ci#Eh2u}B~CE+p?HZHvKt!Wo)1>011du&-x_c5?= zLa!jdKbP9KrS17z_^_4+FZ*TZq%Y@&nOl2LX+53q)jD3St(EmWI#|vY0mx$B;`C*A zh}6e$j@NE>=Y6V_y-73Mm|I?&`ovmTb2ikzCaY$7W?1WHb};d0{|I%A4OUNWl^4@a zC&G&=kRm&8_11Y_Ai%5rz;d>QQH11ObE4T2iTlQ0DBHBD>!?-^EE!wNSi7-*fX=!? ztwG2t5%b@a#k5^aZTiH?T8Gkhqsz1=T9Bp%C^!v68>=I_a}J)ozEn%0pgNU=S))@q z60?1YD5=MS2tgxW`QQ+OXc$h@gj6E3W(0L!Lmd}@v(UU;GFbCnS-c?UsL?Vufl^No zl)f>P-yunPRu@&~GUbHA8N)cJ<^$qGNeoWvhwOkb4hX}bd%viOm5i;Vh^*tg+K(7~ z5J-^|=2$Stgrfdy%#5V;J7-A%ZWS>kvX-NO{eF*6AKu{~zux2ac8A^Qz$M{v&Ukn{ z;M=z&rYO7zC-I9O#{nPTU*nhiTU_r4;q@VN7z^W+37?M{|NlR~;mctL(+(NFfjMHZ zX^Q}m8$j^LrSv9PM&M1wgm$RxFmP!}*x2mRyhd=!RjSXiz#XGro}paWzeT`w<;hL@#=^il0gp>*jaf^npd`m0^h^(P#z2x8^z4Qnm??FzS zm$KRuR%TSgyVWey=%kuZX1~JR)#cqC!g`?9zhxaxwsE4(*~$)cE(MMYNHt3it})b( zL~})eWfkpdT}9ifI+gA#A#kR~7vg4w%@DafUtrnVFBM}bjHxJW8Um`H+u;?{fVOuS zq*^4_tm(G5DvQ;a8jDP}7y(FGv(w{Hf&D}Y_(16{R$>fD% zxX#QL))|#Ix?M@r>gS6_umHaeI2ShaNIS#rZ#lyol&=FHKdaEj**~MdXJF%LrfuEZ zGeN^E`?%SLGAw;pUU!bS!AC1k?JwQ`7o^eFtp*zDTsZThVmyND{(J@qZ}#M@#^tvv zZ{7nQ}!iQ^8i0pb_WUih4z+3)A2X>j|$*`h;f2wMXe zSey?PoK%p$4gLtG2>_fIb8wUlssVINgRCG(*ET7i2%rk7dVfl0#yF1HU0q=q22Ci< z$XpQ3fxC8-Dvw(<>XqEp=F}};_1;2nLxo0Yi&V$d@{|~_QP;%o7;g6>89zprq?hZR zKq{;p7>F4M%m8Hs zzk~Aw$V)^6=Lq9�`s-FU2U#9~U56#_|9k28?6C&CNA(X3SHl0?EpFeaP9&@vO455$uybaGPN+L z;_)MOIg=)}wnGS5st!>Bhya+TF{$yqsrBzX!j5bd=cqEn%f8xQ9ROjmE0Duls_C91 zS7U3}+PNs>(w)s!6TS4bpSQKc{8PmwF+J93jNlSPt&7DrUzkvBlqwzR`f7HlmR)pP zx_hbJru{I+t`wOsJLo2jc@u7EqYzruwHm>L>^*2ab{V}$tiJD}Z(0)6OQ+wLKc{r( z?LyY>?iX~Ozp|uZ-#1RN z&1Voh4N2dDUKN16mL`~>r<4vjzkw;`s^`|)8`k+>Tb5G(HeWvqxQ%IL=>6fG%2D9o7VgvZATkKZPoPPqz*i3nrxxVgE)hx2LQ#6B)#-?5mD~`zamr!onFRYzA1}L_nwj><+HzDw{+U#D z0Y3UpugaRhzV=?M<5Vtr@%Tnv*4{c~>vN@o5Cah9vLonolB|U$gq9Jwnq<1$&p)9M zQ3D)Asx@kthJA+|nuLD4raG=OtpNJ{Yy0u<$)wN*_^^G~%yhc_eqp{A&vnlIKfBa@ zUz!hAi@eem-lNU6{f-{st-#=4vplo#FL?fpX7`(;N@ZUubq5*%aROvKmiNKG z%0@BJ_vBZ;&m2}jh|0xFsUQSI%Q}C8cCinWYZ@?X%A!x1DdzKIYtP!9WeNlfWK#2z zsWW-iesr{ynKk6EgrRmWoby2d#$iOzfJ1UV97>LeF{0$GijU4=ow-Jps+tVKf|s&6 zaGjO{`fXAT%5&o{8nCIQB3SZJ%eJtXFZa~h$zs|AXG)Vv_VfF)@BOKxh zC5w-=ccT69B+;(k-`(KXkN5bXW>i4|-RXG3TMIc8Iwk zrBgeU43Vrm5}yw)5)wESOmjpDPSKp$|IIN~2PbB6{0=VEJRi4Cnxa$(YT~+PPcDmd&hS6Yux~SHXN({`5J zO=Vn(Lenmk+%k>h;zhG;xHlbp%B13wB&&4mYnl%8eJI6%Td!>~0?2*kufFtp-vm;? zcpkaQO#52IG;i+#3m|KyMSFWWbICe}TGV{cz{+}3Tbos!vQWuC+Zxs1URCUDUwmP? z2U~(SnTz#&o$YxvVL^M(E@Tl@ECKwI;c0dAi})1Nepp{FX*;&w1I%32YFhSbYkceS z8qH?y^TyfV(ig4keU@!n(T2@lU1T2CKbO7x{-W#JiPM})7EJ?U$HRa;(`CUxH7%=x zpngpojkK}7hM6X^>1pDG!mk(Ho%MNxrb=7f{oa4}e4n@W-1-^&Q|__lvs?MVzPBve zRRLhVQ=q%2+w`=4W?i2F80)JupWdZ^T;1bD%k!#qMpB_=^#dy?7H+<7^|esc=)T?Y zE4ofj6Ir+CB(r)Sb+L_2@_k5@Dzgd!(H^%bDdc zNQrN~b0AhVDChf0_E@(b9q(fU7h$Ysb6-pq*El^r{SdInnB$DXkhB$QRWa^%7zPC( zC1Z{e$790b6cID4Z;L}p1#^^fd-db-a2NyO?q-iq?{D$x-3@N8MzwS)h%w>eaKdq# zB|V1DQy3&l*2i~uxWBu>ZgAj~5tBsRniAvTIOF#(2mJo^h~r#P$cwKO159N}`M8a> z6h@2*)06~&EGPD95Yn3<6`3{FFfhC_#n8oI7PG3uG=WHL>s)Xcf<&0|jxh$0JO;#= zL6kNBf%=B%!tQn>LU6dc9x*3b0G`#Pr!F{9h(PdSj+HVFhZ90@`XvI$BWGz7Y)rjm zGe8l<8Ukr_1ZdbdR=)}d${Q|&7Z{35>i1G45`p&~3?Szgt;*(1=35RdJFbkeUpT>P z$T|fxc|{OSBH%i;r+2>QaIyB*{aq5)b7s2%4_DuD3Kpy;;n8ayaSkq8mkKoc7qt7-zEjPzTvL~uSzmKnZB%@zS$ok;bpK+pUuwOZdx+~tJu_u( zpEFn8-b7=*>pH%$^uV@_vyFe5@wWkPbN;+EB}++n$LNx3IxF~Cfs4MGpIjPKDh;2_ z^K%uf7{%}H!`XIN1Cn!u@`8%HGqL$lFAQL%1uHJ*+^c#8Ql*|(6^2?JG=|%e*j5V& zv9oJ`Z?6M0tAo7+8d&E6F=10bbuR^y%kMWKNu%4Z8}eD-x5iTYwp^*Ld*QkMZ-I}+ z{oVJWV^Y6I)A3}#y6a=XOJE&Rpks8gWN^3m4o6gz(A7H@P4lp5Tbbt!4SQR1t@j*o zx3UDR=YpyJ9Mt9(xgktyjz#F)9z^xp19A@II3lRuhkYKSX+O+Lqunnx&loJglJKeo zY63U(U>f4Xu(8a>U)9-ANr1iQwO%>D-5z2sig-Ltm}3E)M=2hus6%?uoFZP%@)$hsZg=?Q{uZC^Z*g}$A~=$WwNu37;|Yh; zgegUENeWz80L%Ry-rrv1dMA$WDJD#(ggG-F=Pcmk%K;CM5g7r+gz5`Ax1^0FvnU6q zgn5$0mJ1WCEfCUw))!e+&Et@TKzmY@%MLYdU6zdJ{wPf0?afYdQb3|Hjl+O6FgP=2 z2ooGr1cPgGzTF^+4R3DtC}!JEA0FGaJ)=arQH?n%xgb$eP(jsS24EnYe}$SBJ#&#Y z=8>x7`hws9qGVh#Zci0viyG1Ii6{Ets0D7RC-D4%va0h^+cvJV%?;GKc^Z7IK}-W3 z7PhaSkIjB8eqTz7Us8IYCysr+t$oufon>Et34`~>MF6g;k#$}>ACZ23w~7$^ALKqR z)N9f#i^PT%Gu2FrI@3Qhk?ZRCQ6{1sSRzB*8s|hD)?AVg;)vI#--~X<2%(ZGe$=eZ?fj)iss-180d=pXg}?hlI8_ zSo`;rw()nB7THb8_pZ%b(+|^5ZGju!_``%e{a0VoA7lL28Or;9aFM82Rh!~eL)hV! z?YevZ0#zv0Yd$XbOPSPCG$)znE+g}gDF6W>c!UtJ8^@ZI4*+tCm}6Yz{Wz0VDC1u= z;#kuL_28Sk*UJhsFZho#gqpd%cn$Ew8e9d!!v*gVQ$j8oAn|7zh5R#aEp)kcNiSR$2t|{RFER$;S}-vw@3W;`2k-aCd5o&)xxycjcX~H z2=!yknC6IiiilAJ{AWHOuLU72>IXSM&f+)|N(R6Vg~d5$ni2w?up1l(Kk9c0y%JR_A1&IBK4_~;OJ!u*W80XIyNSKsg-X%zpbWG8f)U@`(Zs_?8F+7nvj87v6jKhH4Zp1hauu$^Ml7PAg9~U|X0;w;u2&HHtv9GhR zdhaE`jw*jv!hktX-FSS{-spF|_n1vv!z`hAiK=CU05IVZr+LO4GfvZtQV2c_@Z^yh znC6Tav!=+fKouGLUBIW0xA^tbJG_6l!#Ie>Wu6lr9u9bTJYb5mo=@@l*zG*--(BJE z?h3n|@XRGAq*yRd1*e(u_1h7@eg1~8j}zty6mlB1RV|-{sT2i*-nn9SB|aY&e5eIe z*dn^;AjB47!^X6WW1UkGsi}8xtP~Ai9Md!7G$jm&8G|2C6V5n6UAahH;gmCwWHM8c zPLI(e1X-YQ9B_TLM~VrEMIF-BA3F4<8kCrK^^smqmcbO3 z)!Y~uOL`-cEQzyi@x84ywP(R1m0@dyQKLIqSlD)qHV&alNmwO1I^X^IZTo6ly82Z8 zn{BFu#lVM|$1H$I({0NS4R}yZ$_mc1jQ7RJk*$ke+7?FH%mJ5AQunk;ch2qU``_hI z>ig=%zs|Q{AZXFQ)!)xusJ(QGD%ZQ3Q`=9lDlWA|wo4C4qmhsVbAoK0u!PxH{K7P2yKYqEzyLWpGL4Zw)1&_xgzCAu-nq$3>f^*pK2Hd~9#xEc5 zupbAYWMnQl%`>J+oZi146Mp}8#OH?za{`JbBG)XdY~i~_@=)g|<%DUL5WfaKjCO@S z0>E0^u{>ll?dx+y+E@XDRDw?~Ao!s@#lo!3DdXWVBWPIOaZulc;IP+@Gc(Fjv=xOj zrZ{7mJs2L|3%SeiNK8MmkiH(1W*Pes;)hLr9 zz6nAY9MM8D9bDVoFdIWKJVC+?dx>nY4Daq5(I{MVj}){%8>3x{fluw@g^bRo&MYXc z?p0}*p8>e_o;oLP=S*3Dkee#G2Op~%x;ucXgIzP_(Tn23C25RpF+fnvgh4NJXOi{y zd?R=^?ha%Wz%n(~JRe!=EpVD4zFkHA1Z}DQpJT!ns`2i^sZuJtkic-xBRGkRbrNY%LL!$M?nLFLYD|!j60#azn0pkuKxb+ z-jWm$m|-eOcQ_qE^u zZ7(jL;VniEsCMIak=KFXeBNzLV9On`Z5q`2BQIfEl(BOy^rqxzTACJ{smfNr?d-R( zrXmZi`gB_Te4?DK{}~7q+DUrvF3xlJOkdWP2kWURDmYwKvhG+X;Mh7~ZC-7e+8tLk z$j#G^>oSk(#0%6jW?C+6%mu9g+7|c_{={j}0*UTac?D=hQf?vNOA%Q*Ilpl}El3M3 zr%8PnY_2S)l!HpaOKvFvnBcg95C;+gU{AYoQFh+8is(~Zon`E-ZWx+SwEDjUq?|ELGh&P&@)&3U5fJB$<6)9~9~N!P1?&glAHUq; zfBugTxWC^ccnOEcJ zgj6!3wrSA^?04rph{dTr#t3F0hXUp(S^;lSp@jXe#Ta>wkL{cXm5kAQWHiFE7C|g+ z38ieAKQ3ThjUcg{QzHl=NM`twTk=GSAR%$4y%WbFM@|S8Hz#oCokqZNXP}|C7w!@L zvJYIP3Z~hS`P!CYc)4wLdlsv;xd1%= zs|%NACMSQteq(NdTB>5amvq7btNb{B#nLmWXe_{4v2$bl^inRRzh5yh(*JyY*%B1I zwigWp8e7%B+kqxp0tRdUg*|OFhy;4Pz+`{fR}sI8>F}B0^1omC8RL^Gmx3mVn(1ps zfbTUGv-^=v3UBu~xuv$)f}!+xP8B*fKS}rGnP~TcXwf?$MWzjryU&{dPldy*5w4)o z6y<&u)2k3Xh9PKjJrUT%dB!xI5a+q`*WH}h);njQO~Ljv+2zTqVB?v%3W0|~gzTj( z&DU-1EMQO7#Mib(u(k^W@*qO-uVK;cZySu{s_cz$@0>|7B%H}CXwxZ| zsFEG;JS6(nERm#qkg#RU8W~E%@j8!1j4PD}IpI`$F;xQJsf^4SeVqfyey*Q?X@0EQ z-by&LNYLuU@`P185|c*Tg6--4{*&NF1*BoeFG zSA}5!m@v;7)2zv~!C;pG#~4PB5AW{qk6-U`|LzvMVSpz>DjBEggs%?=JRVMnQ3TtL z97gYOeLdjA`y1R|??5CPuu~H7ahx&^r-I-AIO2~lk2uYYTnGfGJdSG8*8=pylA|v( zPN!Md$TVlmFTV)@sfS15q8yxp#04$QUyGGI#{}SnVF(gZjR3Fy43chWqIL{f|KAaM5;`-gV-24@7C-6 z@)fP~aQ^(sPyIo4YUzjE&+bo+#8|Q5X-%UXk8ZIqY{+oRPh__8F=ZQ zVMs!uoFuW2n1rc>fP4cuZ^-yY`-*}d`)gfy6QDHk(YMp@@wM3+t@`s^v1>e;=>TI-|eIxa5bNfv4M})_RBPIPWg#^fjc1`2(L6#&WRt2%|}XU5na@Z@sP~w z+v0>166+D+ibL=iF(NaNa%_B?bBHn4R1iK4D4OeIib*YgAQACUFpLuM>fe67$Gdk| zi}VvQN=k}{Zzr5$0<(nn9RlHM7jS=jgPWTjhTxD(#HkdVCeg4Q=7KNZPWbvT;czTS z{D0W{vi8VrB-;}JnTt|YcF#P`|NnRQ-sx_;O3EY&MBImn0Kg>|sU*9)+wdn#l}UmC zh$T)eCjt;+kw@#h*1xL=txU8Uygn5|syecDyfzaNwl8xvs5id%&pd-B5h35X8ss!8NFPp;(} z?p9dt`)L23kD34EoFSYMLI)SDW9&d35ZSCZmfk7{_=L*QS%GP@? zn&O%Izy+?F;d7HYOX`NF0rjo{pyIzT!Zps}#@^}U)>=OFbJx)Oc=KAj(G;iKb#@QW zo8o{3z+E>IAEvGCN<<1{Mf+KOzEcg`T4S>QlK~38dVL0CZlrIh3}_(glPT?OF?P zYk{uKt>Xe%urnZ&v0nuPKVn=h%kCDTRc6ohxjBEQZef|fm*srfKKJK)owND0pmcff zj{*iFbLeT5j5NPs^^BZvyFC&Bx_8YzF9KOBL1t@4lwjvArJTXRbFO6s#F_i^HQDW& z+!kUiS?|5ft^Dgb*?zvPx1P_ngfO3HWY%JL;i~NqNLdPVRVZha#~hjKQE#qSW-$N= z!!Y1-xu}9#8!}VYkv1G)S&h|toHr6oqrP)-w^`HPhY&0)xTp&h2__bBKso;cfN0W~ zuo5f9V6iu5D^vw-K#Uzur#*&o&}&n|%geba>tl#u2FA+;)06>Gua^)scs!+q=hGhl z^FRNM|NgHNc0Hkssyw_56Mp~w72jUYxJ(lObFT*Dbm;Nlzkb2EMulHmqUro{i`Hji@^@LjBEkkfF4Fn3_<{|2o$abDBRmy zn&f)5F=I7>wuO&dCu-M4MZ}N-GF-SUTI={&hT+*k#a!Yy3IHP%A#~oS#tkY&<*_c6 z&g;G2*ZZY?P6%`14saJWOK`ctXTH(l)h2r_>0rIZtsj*+X={_d1@8PXmlAXiEZw-; zaw{3)M-|sv6s=ezJO`rk-hEE%@6Xq&e^e=g(86N1HKZRk&P$=}Gs{-+`Oj9i=J(HH zAzGHYXW7_p$eXUwWWZQ^$~OQ$M(Mv4hKg%kV{PMRUnJUas=oV-7Ep$5MxzJXxxi8n zJr}sWSN^Q?<7X9(+5TlY6P7Oa)KIsZVy3mWK;8X9lY^{{ zH}N&58ec&dHTENRmRF7$Icp~EoK?xatw60HdZsYi7^`I;^s}ZSr?+lON)v!e8@yFE zE#fqnYZ!zqtcn*>7AckYGJ0dhN;zQ|E}HuTs-&l8oSsex{Oy+`e)+n`@j&QfL?0tA z!+_s^f5C6Rf5+=(M9u(_AV$ISafko<w|NV^Le!t-R zIbq5H9F(;yEGe9==(|xb4h0{{33Rhn0;RL7ZPSTG0fqhRQ8Q#Yc+s38u6ZF^X&2kYqx8R)1gOcFx>@JD3(cRo zse!q#Nfy-2!u?$5KnVaXtRT(!6?)ez_4|BUYr&TPu9_j{{HAz>_XwQV4|E5`?#t>d zOt6?DB{rZ?0mw(Z%|D~aR;<=1m0Q-2k1kt4_|fv{{_rEEXmH!l7IWyDmFch2Xtx0< zAxcmUvUnzE`;4<`A)#N~<2ewYJ7i$1T4oK133P(4p@&4i_Lx%aDxdNIeLc zEcSw`!p{kUS_`eU5blxU4c=0+mbQoke~*f*IlEjT5Cd?>fL4?U?f7ZB2i+qRXK+dy zcEGG|SY~CJa>8XCAkv|W!N@2zJYc`?@$1(E{_Ahgcs_Orl+lHVX&mtF+Zn(A{({%b zfGGpYImy`XI{f;}3BPul0$@}>R0FD^4HtidxKQiCUuF? zF{{FD2s#-f5#X^6kr23g7+2rfwUFF0Ps~l%HX5N7vZKBrQh|`f42_~7ql*!ea}B}< zjIAgZ|G{moz*@sJGrgkGg4lGuD)3uyGJ|IzWNH4d`nk6qaIGM>{Amxd5L`3=xb>rT zv8!ck6RKu(&lBQSkgc{pQhYx8Z*^^|2LjpryLKqQS4+6O&&RHC!iU&sHUvPcSq-LX z{nsh2^}{V6I;PvM2_I7Ky_XLf!@2e2$J@Ui0~HZO@>Pr9V-?>Y-=*u%&smx_ZfV;7 zfTt_KP#BI~S$4TRqU#%HWPu01$$%8wW)K0j9CKtltwwh#(X}#;oe+(>RVr zHt4#YS$ZbT{{heV?$T677lG5U$JZ}Me0e&cBLX;M7%n)UGhSW>yj~JgRzaH}O1FDD z;Op~IbA5=i)@Bffl=1C#!vFpIcl`6)fXk3EOboAVsMF=(pG)6&IgtXDqy1(FyXGlU~32J|~k%IFim#C$+Z za6lPf{nPx2TXU!T(7T&T*|gNRPzS&Pnz!ERh`I zEXT<=MUhZ69QC~j+KqNuPb)mJC}}{pjfIz%9RL9V*{;)#M-xP9$_}n39a%0UfBt+|-Gy<`z7X8JlpSL-O+t`CrTmg{)EuBG7EYk|D_aZ6f(swtEV z=2!r|USP9o#vbzi&Rm-xZV47q@{lU5v>H)k$7Qbh*|}XD<_-#$pyOkT9|>MR&!dLF)L~b znhTK2wRaC-i7cK%SXurx?y2S$n(t%!c9v_QjsH-=dYSKNIS<>lRvH381GSk55gHTV zbA0WJWuV?VM5KyIVF9=*!1nt+0N^rQ3W-l~!}f|$>j2PraIE9wJgK23_tP^wSPX~D zMcEQo2UCl?6)pt#AklMCgwb~aJIi)XA?kX}Bc{=`>|*U<^IEYNS(5@ASlEckbveRI z=Za`QlbAsebf#z}L}RGTmM<%U+L%ho2z^9O>h0r6kwe#)y^8`Nb{(jXU?L>jLy`bf z%1BvLMF4~lJ0up2L&7j6qzs5ed$$Q)2lPGg<+Q`!etp81(*b?#AXyN)4#PO%^<~1# znK4a-oC!e2t`GS7bimi=C+vFlGGf-C_K^j@y^i?bf4<_kZzI0FP8gXWv8(y2Z2uAn znCkw)3`|qPFpL;3BgQdlF04j?W>J|?=ZKDn1DWm-0q4=OQVkc)>_r%9Wv)~IJys@CaBSl?REjvzC%hH$gQqeWd48&ec!7a z82rqTX47TMt}Kma*sKsi4?>OiP&g8ZfEY1ywg5b3#A*WxtH4*|K-%5T1-1HtfsUDQ zW1BOZzocNM6$qXjt-ON`Ar|#8sGlwV^TlHS6F;O}s4Rl^d?A-0^A>zGGGcw^+C^!r zy=yB?{UOUSvJo zdupv8Z5_+M-#4bs`dvxF+N-x(sBX%oQs*Quan}cyCBUmcelW8MHa8NCenPu`oImgV zPgd4O`|&xpd7V6>d-%YXEd^C+-peD6kWvFnMqq%EL2&jxM!=HM>rRcbu&}lR;sB&w z7b`(wOLoBB?O}Pz!$Yu9Px}vER6=N(^t`>&cJWq?o1ffY%asn>pGU0gaGTtt0U>r^ z7XdsN)MP>IRCrcly}Df$Ky+oyZ|!U43<=Ag&d*YU(zv2LhKzZxzWWfB300M<>Y$U2 zXecEH+aAoQw7WA)_qugg$I9DI_K~-d07N_Igv4?Z+L*=wkh!@-5QRZgF7*-xbsaNd zJ*NppkqQD+Kn8&rm{@awcxXTjn$(Z8U`p9syE;u#5d@JkVqhGPJN)v?317ZE;c(a~ z$jlkzkn!!?1uw4&8P?bo4lJi;B}nv&zB4S@%@Z%FA38G z2!bjqW3W7VH6BgDgIcks%$DVR!eFsEeT=Kvvepx*ol74IK-PsS@)kjcROTWV{H2-z znFoL}Vh0>{I`I0^30+Uv@eVBNYNd<;45ADnowWU~$8O4)#tdcxQZ{!RLCTD-?-8Q0 z>ac2~VGEXX1{v*}|I}Ov<~s-|XtEc;mVYCQ2Cuso!-YB543GmE0=CST^Wvs<4}d34 zG#EC7MQV+M%Y$j-ZCnv>Z@s>`);AQu!OS`l+pNf9sxNEHdegq$wg;P3UDkXqE$S@- z>Vi?~mOF_oe@ivzwn^;T_TCD9ccHk}u@PYI!Z02>Z{J+Vn9zLjJ9Ai>Lov+ecrYf9 zN^@NM+TyG-=L#sLa-ENh_Up&@mRsP%4;`00JlP{QG?KN#A8yjs@*elNdu5BIBzsS3ux!QbS*~)`) z4Sf8O#Q`Gt3d5k8$Lc;GKx7(x`#N?3B1&!}l1-_kXS-XE+OC!JzkafQtUYrMK-#sV zC6~m;>f$|f<2FTB7bE&^hZq%n2m?8>S_+s!A(ogCG<#!l-+G|T!LWbkDpaa+o6D&I z>h9?0TeQ>+0u9cFy2l4nYTXz+8|RFil}Neu>2n?FN6?P z8At#jBL>28x5L-d6TY0DaM&ps6KBCNW{l?xUcQeQE{w!L&I!9d;`#Z2Uw>7)-562G z2F5IS9TI+fx!|{N7reYq7)ORO#a4L%Xx_i=4aJZ1^W~O;u zhs8~}miXQ{+x0O94YV(3f;Kn=n#F*)W#l6ZnYz2xoRg|~A*^m!SjC$(nuu0Q?M=V? z%tz*5Mbe%OM<@OKfGE_v9(>;ThF&YP@0&hA1g#UnZFjo*`~p5a3pzKTXafv=v@*aC z&h|hmi7yG|y()153J@W;fYjZ2ZIzSyjMfFL@VOH~J|Oe=P4MCISPRX)mbHNWW7dz` z>{FMF+wIIg=j2NSp{~-)>OsJdtM34LN2Jfi#D1w-!xTXXJAeVEP_7t zdV!`F2Lb3}K;Q3_0g9qpqXfuVmCcz|iA+Y#&RCGi+<7k*NM5swSnfBo82LFF01LoY zprY~sZ2d4uG6;egHP2kvX|hD_BBm)A{l6-`$$*lxM&+ErSp)7fYmO8BH$gdX?HtY- zyBP87*Dv_x>oX4fosm$e3)JO2;pO`U~>>C>6x%t!UxFGFsNr)(frQvb91mkrSeC|bJ_OQzp;n`k=quZxocVla*q`8el+dZx@utJ?D|gtSZ^+kKm%aD z&puqo`uJOxyM0}><5nHMR`2DnnLl@(=bud9xT<(dKyPeDFGDF?>gkHtgO?d zZM8CGd_6tk*I%CTblM^I z1SYkfr3_q#gx|is;{W~g8@^u>E~5bthFy*+pcV66UJe}G_g^9H8DaPV}4B* z=pGRgKB|alKo#X|o2O+}j0BAyQop$|Wqkks3J7pI1?>8$G_w=|0=g+8$EbD-Q3^27 z1c1=@8oUHJYr=?$)OS)%VrJ6vpxZ`pXa%q$hxvhz_Q){mW_Tc+25$381QBB%a|=r? zu4UH0?b+qOrO<7tG-^vY8Iu<&;1Xiwr13E>(5|-oX7X<(XKItk5s@+;G%c<|bmPv1 zHqV7Wv=tOrGXPbfy*KZC1FQ*e+5|YYK9qOl;K-8=I@1PoeJ`_vt+cP2Z|8!ZG?&%3 z-?o0t?=86~8X#1u58s$1?hT~~E?XN3mfOVj6S5VAwuJJl2bF7CZL z5!4`4*}7u+#GCE8T^4=yzO@4Gjp|)fTP#Q}NVa2m{pL;ZR6+Ri;-9269ioPc^SyO! zmOCD5RFU#Sh55dvbXnYb{?@W|g}Zqc+ngh+gb)OCw=p3~s4B_sH(H>^LszymU{>mU z)n}KEZnpiAvUOz}<0CQ$EQX-=e)B`4E1QJ9~i$H|F>#*;4Y5}oO01;IHPf4v%L=hoIj42_Bj!|ZHw{m%E z7Xp5HKH_h`e8KUsN7t$1IdjG^PIx&F`1X1RSP(>Asds&kUw(PU^XY)TCxFyLCo^y! z5`O!B!9Tv8@%wqgm@GYv0oP6NVI^lSc|OK5BPES}o=eD&mk*-heY~juA@s$P0Hm4H z$?XOPQigE?(HS8|1gKf7(#>}0JB`UmIU#e#G`WBn(OdS+{eFj(cHo>aO$tyn|AP7} zQXqs9r0yD|DjY~T`KcLR2NQxorp+c&YuRi|ZGcKEMEl|hmS>|Z7#FZz!yjhZCWF!2 z1qwxb!JU*$x#|me`!s%JdU!jw_S~94&sm%NovI+%YW{1$HvtBpO1gJv;ij%)$sN*vn0*5iY?%{KeK&~zNah&v8o_2Vc?T=I6s3S<3F{yrOkgt zd>?x+-1vk-c7}042#r!jXV3m)-^!n5X{-v1 zxzaE;F@Da%MGN*VJsXQUu5n}5{)C`!zxw~9dMSF|;X8VA1s zf+?bdG5&}EDP>HP=9Li9v)>hoCDimsz{k9`-e3hn#r-|#S`Y#S2gT}Y6^(TiVsW~W zKoJcEF$1J7`hC|SWR17+co3hj%o%B9j8oQi7=ooh0E9F8F5=7S3BUgO1^dcg zm{P)J7;wG}czL~OzC8;7KOPVG<;w|ArvqXaAyBt2W?&dHe)~S)pWn{-{XAl1Whe6- z9|3b2tIiblJWA74T(44cWd~tf+PJ?y{y+etB8qN!49T|98GAx3LO)8vr7K3)!|H*a*(j-deBoydCZewA?7_Gu@or05@ZBY6Ek9 zVQdDRTfm!xrR$p#K`f>PrBBlI3XP__3f(KMPYb}fx?0tLH|}cw9HPzl^%3P-e;u&a zLu3gbHs((Q{))A3@!4k|$g}#(d%Abf{J2n3*Ts#WW&1w76@PG9D?QdF)!i$8Nr%$jCa5T>{AhIyZiqFc-|3jqgXf^3)9h85(I_I>)yxA)`w&mRRsehN27 zD@3lCt+5z>#>|F8<^lFCknyvq-OrcL0Q`;4y%PX`R-IF=7*feW?3{qTGf@wbuItdn zuEZSV%$O$4Cm~MyBe3~tRInlli@46WgAW@N|4o|UIfNR2%K`=Xq`~`v2%Y6S5fMn_ zIf;u>Of^)=8r$M6Qsr?fPtTGywM4R!4Hkn8pbv!S;{jj4KI8OsMBi(UIL;a4G~(su zjF(>+dz5^^5UuMBLW_Uk?# zjXD4hbwT_4_Dcd8Nwd8yAt;4ed2C&(c3%rX77D}iXo0&NS+%H+UAL?W?yBO&sbftQ z(e%swS{p&XXv%^1X$2&@Ri*{Su2E^n%L%>|yan5Mn=xog3o7&#)OIlwI0KSvdlwjO zQC9=vHV0_AP24Ls_&y>O3eff#Cm$z&{Z+V{x67?jbL|@nK&!4r>$bTDsr2Cv_Nrj5 z{tcjP?rnYyKbYFzdd>Xa*_hNGton-Ow?)}d0hnX@N9{i}`>{Hf&Da>T+nVQj-`cgO z|6aFkRa-m9qSg8=Eh8XkGVy;H0Jj5=$YpY}P8TWb00v3IZg z-+Iq@w2k!|J6BNWUIEB#GjX*&Y_-qc7Xe^WLKxo*9BAQ+3_u<~>B4He&$iEQ=gD%z zqggw*`hRa!+cQx_R{1G_jeUBn9=ZMf*48nLcU)ZLM{hZQXx)yjb-S!%)<&Rgw7wMG zQdO)UZjETpEj!^B3d73OIji4f3qj3$B(u($3XZBOu8z30mz9yxcwJ?B();wxQ6;mW zi=DBPcF?>MjFgilv$J>VRg+p43KM6^*XHx^t>DArfjthy5`D%;<3zAO30%0y&mh;1;X>;fM33z@cgvLuGcy^3#QCC zUlPu*6NWJ%XU#IR-}N}1jtW-x9VjU9m?UGIGR`jvzkeI>?fZ!HkdYZ0{(yR(0dDEk z_35uaW%WuLhe_KnWj0{7df)I03IN%1`FIZixmJg}KLqEa6@g^sbIzphOd+ZZRp>fE zly;4UaTx~7j2Y0!4#)i-BN4`tAx!AISO!buJXCn@`W{^$v6mhC!yYs-m=%SRsQZ;i z{TpAf%c(`6_t{Uu6LC=!0A>`u07_01s76WCeo7IX#TG|^T5y6<);WK+qv#ldPGo}R z9&s&(O$w`1M9i%WIP4GF2Lm*LNzIc2IK84etTC7Xik(^tnxtdedfRY{q*qz4gq?RA zMaprvEB(F(wE4Sk-I08}TETR!dDrjUDy1OAtz?a9usOLz#C)I4Z?pt^>+{qiWE=Nv zp6dTt0O5?*lst!BM9%zH^WZ?{u|`hW?4!D(4qfa)!ro>Dajnqj z2cy@>;@|^N$3&aABt~A|KISb+;{4j0w6w_vr}r5%%Dvr^NXI@PnzpI_7vKkN(|@VL z%{7-ick`?No(r~G&%9--6sVR$7b=$8MBf$sK+=ip)XvuT`j8or`3UgA54gE? zvTW2*vT2H)BQlptVe8dUsFeBVvzS&7S)YSj?`1yTYpt{Izw`Yw5Fz_Mx~P+=t^z=6 z64mB8rLpC2JqPar@#R=l;I^^nG;0;8`&rYH`_G}}7_Of&!&)12F5j7lBCZAf~c6m5+;n?`NTpT?^Qo;UNRGEJ!&cWlfu)9t3y8#E3796q`I7PdWy1NA zzZ#G5!!@|fu9)|~sdKCT_Svl|*puhkMVt3|@Humyv4E+7 zwb2)m;uvDiAS+n*cU{n8i8zHV1G~V!H{oDIsMJmN$A!1s#RV>)y2N=u z^?=$M=lkV-hrq3}9HVmh%sz7zywvoq0p16;Z1MZwrL;one`@h}ssWZ7piM=qjnA4= zKSlYVj>+{i=HI2v!Qr3L093CH_?cr0#VY~i)$&mjY?jL4F$@o${p4kWv7a$miU@b5au5cxF%tw{rl{2M z>|zIw3aGl+AxWxjwe#Fl0kBAk14(H@W>rW<3n>Uh*mV)7(-F_lPdMy*bfjaNa>C^@ z;pKI}d6+OJv4Hc4r{e*?e*J={r@ewIXzsf)Wn9LD%aHJTNjMJ)Q&uG%08@Msq?p5W zOqo=1l{4c!jCj2a7^h@9`j6ZE{u}_(6lVttOb8ND*)Vhi(?A`;G-aG=Kp-_w^<6~Q zbzosk%m}PLhzU}dwuBkfNAz8fr&9pvV#!|wl2ubq(K47X+9k|LoI%t9o)9pEf@akZ zaiK}YHMvc!FC9VLGw>0b&)?Xr>VEYYczsK$@iS|IW)@R}^}PE$ z0P^o$maqSwrQzSIn08K#%Dvt5)#Ccsocd2Me>(WkMwETm1zokWUKbHV$=#(4SS7BT&He(*a*k zPdFSb(V(am>1^QRav3e=S%KVPzr)wB&v-f=3-|y7Qv%MHgqQPx%VokaW@IjAt=W0) zjsh+CNStvQM!a4w7^bZ8Fdw|{{SW}znpkhWN43}PE9ev1O@sjqAO_w1$CN>r5iv#p zI}H@-_vqa9Mi>$LVqG8=QP-}XBKG?pIV-1hE~Y#+U)A>LZ!VKZ9Yuf%jqo zh}CMmu@P-lR9rdOwS{#cSK1fc7Eb+p|EY=deEjMLR2DnXylV?Ozh^;s2l&{iq6o#x z?8b;5g9D|=)8@47P1WhB)sB3AX?#34ho8(E;{6SnB@x`Z$EW@$mnA6ngAzb-wQGUL zU*#Re=69Q2es+kKEZOzD|A_K|-F(kk`EUc?Q0-wDf8cDE}7?d1MN&DCAkq3=~*QMpf$ zvN9~?oDqXrXtKanlbkhQz%F}AGd?b^$ki^l;V2vHCzU>7@l{ql^%QRQ19L1y4}obdhijLR^Vd-r{Z{jS6F zmjj-j4mB`d00v<+0P^}e;(VDfC9`}GuF^Sc=yE{fglSB8y=a2Mwe9EqrJY^{fV>w~ zzIC#$eT|)Vjlp@+RQAVx4Gxk@&8u+>6ZX3ub~~U89s20LOTaLW5YBe5W@-e0uI~X- zGuM8_gP#IS5Eekz(vSwL<&- zMX(1tqth+P1cRDPgSmSF1c(LKO8|vkGlGmRR7%=lf#W%41PbVPJClxelZjn~Oc~i1 zL!(XR#+^k-e;4XKs>~wp;>5yD4vQ*Cv#5Wku$zHpwwbop3@=-jofxA5#@J8_-ZHU< z0yv~u@VAOvB+?=m3O``V-Litzwub^HoN1(h>PDM_#kedV`=fxQ0Jz3-)O9wNF(^aa zMzH3r4Z?`Q*wr@KpTgBdSncn2bK7z?bGNkivlbZoh8==yi|Xc8HIP)m?((P9@xZuU zD}LZD$8~nJJ8r+Te}=_4F3SvT!N)^pRzK#&yya!W%=fuZdHp#rf%x`(oX2)ytr=_* zLoT0DTKWN6%q<$w#Y69&QL3mhHYn6REfnw-Hs)8kTkl=edChWtyQ%B%mx>CvR0aWr zi<5{7NW_p30k-|#(fPIK2xfU4I5U!UMr_Wz<^Nt;w|$`{$Gk1ATMYmRZOqzLoy*N~ z`EJQ#I+{!FO|jX#v^uEL*b2+4j^(`FO8k58T`%L#Rxu$g$ihV{Pox3bLK-K!w7(Qa zL(4+}_ZHx4orobI=T^b1`-!_D0p^Aj7h}jZznIol=c139_l4Q>eYlWI&3*NcF80uP zvY@gbtHs3W@Kqi&B9~FKwg(bZJ_9Ldbd~_nm1QNqcs9n{r7ANuQ+W^oU5$6s^`!Cz z2Ucb)FaTC}tpH?B09akHGH*PO(7Gb}C*NO(On&KJfovBpuFd`9qjFqcmQ zBC-h5m~p<0NK>-4TV>=H=j$1bMp+zIXH+}hvw47@zy%9{ownq%1g0@VdeN+yF(OY1 z)Esy(mr;Y9KsX%s==&X93J?PwE@&1IveWz^fC}>$9?GWcjhMwK1X6mqE}-i=#BS}= zN@#_O03euurSWSM)vl3RMWSo=qk{;PjYDyi9SG_Npq7ZmCM!%pM7fTj%wCj|wmZb4 z(T*X2Lyc4LpUgFjIbajSy66Ya9AwPF$MyzWq(eRYh|1)o2M#d+ZvY?KE)|%Xz_c3z zJQonVk!qcp(!kFq$3xEtGI{Oz~8Abpeu_^U-U>iIqI3oleVqnaFnR+%YVIy?<9wfX<)DKQYQRM4ivb=srQNl?Ud$DG z)pxh9G)$k-Xm?e)nK4bHF=d&e55Ro?7a&$T-Yv$nx0Fy6bgHb@c1V%QmPO?lC-Z5) zKvC`$InWg5+Vv(c8EVe9EEyCm`Jyn6`yOAuobY@+V!!VYVnEIb!#Ln_zToA2!7wEd zNJ9|%9#5wO4#y6=T~y^9RC#n6Grqr$=B|~Ia)3xIO1la^mViDWi{LU$YWWxvk^mw~ zqGa9u(DHKt5CD!1nFUjtAm~&J8={%xI*2Gj9;Zp(BmfeXqBzc0ip9Q638Alvf`UZNGzx;4Su~s01DI>v8Qg%7 z#G$9*~EgScnpqX8*QS@X`KzTQj?LFF>V^Z7sCS zu3rfP%=fkAR|*j}tsgoEK2O3DUk8ACs~xkz2-!>I%2<5@F6T1h6yjPXIzF0hG_(|(iZo-4yV%r&(BBfdO`>m;BJWqUtbem zzF+WqQ35A5ga&{VKq_l!oNwo%;SPf(Hynp#dFG7q&MkK0e0c)TH}fOqCjua~a290B zNGYp;jsS`Pa|duj$_$zC?y0}us$7nBKVuZw0J1qr$sg#~FEaoF$B_dCScfrOA# zLe8n7vYfk&jET_$2vN1zT^E$$NC<*lg_5FiRv^J9zBJ|>$!@CmKTGoTh%@A z;bj>Yvi{zgv#7XmEsMuN-Ud6$eC4jz4nA_u9;FDg;{ejSi`$tI9~+s?X9FspKs2vM zh$edl#2mmd&xNIh2x#OtG;C3AzvdnXLID&afNXMs6t^S?DI`i|+mU+aJU<=O@(~Cc zK9F#^oH1Soq%lFV%5nQ$kJHl;$HNYtv3BV=W=sNne;x7tWyEF37$(j2QH)xJBERW_ zXwW%~^4l#RMdsAEvB77zt+afrb-a6vAMPWQd3I=S2$E{*bN7adtk@=;yX<69PUw&V z`mTe!HAxn*WQ-z^X@p#Wt|uIJhvI+Mg#hgKz&I5y>M;)(x(i3&1E$(Jv6vB!ix8?Q6DV$XZaOR(Bm^b#V9vV8_=<&L zet!j2Ew{V7(#@LL&R>Bs-?$4(8<1-*ah5MZ;~J#oxL~*su?yZmdN{8I50Ry1#w-W^ zCHiKJw~Xu+>@LTERMAn~1ARjxADBXg=pecF)yLS|AtIJbK|c?+?pq7hvQ0jYs~<7T zvLTH9->td&p)HPNW0o4jPw@MONlvz$em<>`{gY$m0E^!iU*lZ>0N|u2t9ea%XFu>X^(XS-a@0DNCXBo&un;lWV(HvlO6Z0*)fgf=HC* zlFVH_1arHBu}W=$iS6e*2qDlcjeyG~`kDy=O`a#Ki?b?=b5`~$^0UYL5dwP4_o2!h z-Pfj+O58_`oo}W{t`HUgwkJoMi<*PRp1U>YL=P;Ld33xyuZMR%lAtrFkv&+RHDMx$ zfIjX}vY`i!5z(P!NLJZFZBD_~SkT;RfW?<87-OKZ9H^2*m_Z^qbUnHl@O0SWbUI?c z+nJ(R!v@B2!gv`mUM8?;oXLLI%W%C1K17GBElG^bt7hl>@o&d%%JzIWr^( za!yFoglQTJQr++ONNGe$6NW)wbdZ1=oMW2E3~(T@u?)CXLAVCrR6)0b1%bYBn7jeg zB*d{EXe<2HAZMB1Yk!u2mnz`)+}89{6tN zRha*!x`r7AFY3IWyAw6@{LSS`KWF`z1CeD}e=@%=$oilv-oX{AZq(oYdwu|lToeE2 zYk&2f$o!}2>s!jTv8DoIqymZ37c4%j0EfJ7tFx97_%&{uuF8W2-?CAWX z<%?Q>dvmN@k>Jm72DhwtvUv~8e`f3dW-~siY|roI74NCGx(ZeW#@j0UQAI=BmUeuv zfRFht)`SNTI+{z0Ql;77X}uSI#H-!v#gC8Jf=Nmx!ufMod;d~qnYZu$412WP8MRwq z`@SbAiDOHWWVn(;Te*z&e2aov6`D>y34N1QmKkmUM&h)7Hh`UO~)6F-!Jw2f*^>h3xg*MwmsUxc-e<-oCyZ#7?9v+ zkQBf~1jMehyeAoH%7unl-P*`JRyR;wPO`I5KEPzil7~;h-ZW*5Q^Gh-X2ojG zBpsNV(3RT#1)DPb-5J-swx7-gOE*B(NHGLcN@~%7S`FN_hed;jrj+n{zF-&CzizkN zqu=i#V=~j13#f|Fr(rbzzYcvLa6BGt@sP4&*`#){Au}TcMqma~E}YJ4t*J@!*f)W! znkZNXS`o3urGY}B(~O%+wTs7gx7MZRfg3y-Bf(=UU~wuH_-)nWZXfPmrApdma>4aI z`o{zP*zDTe-KF_vE*#ot5HyK>lquxuquaw2b#s1iS%MEWYDqcc3MldCxG-Ao|G4Aq ztz|X7Eh7D(HWh)o720P1es8G<@cWy34gTB^(H-$_fjC03+I>X%>Eb=G5EB2KY5S*_ zKl6>uT~K~Je(ljR6JYlfoV#ZQZD2;EK=kzkB#YKkKo~uvtvZIsjUF2GnPS00eUpgjrxn$bcoSo^_>X z^S{-_qTIt`?#T_wjo5z7SzWU{rZcm;*tdmvtNQ9s{@w5RM3>xiQovCYYuPB7$AN!>;ckJ3&5Z7SKx` zz>+<(0xH1PWOF&|f`UlnQ?DMaB$|c1@1pZXsOwmx;otSW*Qzhr`dJsCEixZZ@`X{+ z&05pE_3qG2Mt2<}Gu`QnsBQ-H8_U6$Vf}305!JT+2pebB$jeMS@0t*^{F&@SWsEAj z6beuKy6-aYNAt642Wo7@tunj6fDsrF(SgW~`P(~l4ZLmr{oX>&J0T+FayN^d=bM>d zzlz$I>BvcDN(WEQ^O zDgf7?pI1P8OR4XpzRULt+RHIsZjIG}t+{92qE(&MA`hD+%^o9lznq;P54Ls7&6Jso z#{o9l{Ag*P5$uF)=X_DnG-W+z^Ahe|ur=53+_C~BFiSb+ZM{|JwSE3xncuU1Z}az4 zU8=}@m4mr1`+IXGhSzsZp|@y-n>h5=;%TAZQ8j$jGs0ywR@N(bQz~QD)DuEW6)a6o z5rPLrLsW+I=7|3RvqF?%^3U{x8c$r)1}w+Ss;?FS2Wq4-Bm^Z6q8Lm86s(W+*`6pn zn_^bIUP6tN5UKIM6wn3I%p}%dy^0ZtvD^3f`sIW#rzh-s4J(+Yq*jO_Ax#-MvAU(l zh{OJX)9HxgVUMm;YDp-3&=fx(V6fJ^~G!N5otA(b)#k0gfPsOEt~>~7=j3~z@PJF zK#WS?d)V)kCwt$6a|8hx0QJ*LNr6pD=%QLxc72c3Gt!g_!%fu)0G@ou-D`+q5$sf0 zS2JW6BKp_?WPl{L3_60o>&!f-fvlhapg>>0>bEMahd%_xwW|^NP&cVsr?+uF$U1hO z1dl?^k{V;D3o8q<DS1f-)j~#Q@ODo`AbD$C_-6#jGZg^7tG?})O zKsBybb>mwerc{1C3O-uPMgc=y*_ya)uJfb26%J-?egFBsDs7OR&z(YW(DY9MS1S-T z{;@vAWKnM`KwlH6w_8*iK$|dQjb_tu7@K3|7FS? zfS>@{brHd8A1F6ry&tXxQrUP7i-Y-A9j2^lbRY$MR}ix9-MTmRlU87x3)^kt#UBG^ zfLr@%{){_gu~pV!;vH?83mEIUd}ZC;YR7w4!&|HfT2x6=T&jsIa9h1GICIh31NYCg z)sxB71Q=uD3iVpJOv=50F_H>Fj1{YrP^g3i&U9S+BXF<$M_MFJVYKuUsP%y_*dT*ib;${2G- z%8c6)7-W3{h9Ti{88KO$3B(lov!zjMtLJ|m(?`n+!~`l5>`&{0RW~f7qa@WhGY1sL zmNF>nUpVEA*YjCpJP6qDJM`U7ed3tGS%ZR7O5m&%a7qi;q2KLsU>%lWxD-U|{)?^F zSVYPs1Tdpr4YB&q(UwHF>}%8KTgCO$wa!}usD~3ajftSww@u;2Iy8W_%rJ*aC%mKzpOB7H65Q^?DopA3)8C z;{4m@E@6=a{=;{X3PV<>i)j@MkMs><}n4XRlPE zop-t;xU3aIKs4bw%mjZbYof|yRo?N>i)%|XwsFQ$lfx9S#rxB^myak&g8Q_fQs050nQyS~HI@rdKo5q%c{bCaK@ zjA0lm0AV)E#~!DrBMyfHx;|*-Elrvs0XLKE*>rO_y8MHWP+&7R`E|vTY2^Wdqnc zL>>{~W^P2cvTd7oTj0)iD4HB+;%WuQp84fn^2^fB|qPnQL!1>3NADbWq6nAca*)8K$Wv#sToOOQ*mJcIDL_)EC z=+%T^8b<~|6d^K|tmriWyC(7rF|6V-m}|E2GUo3Y61j1s1&z;%3q7@i4>9)4^$a*oEVo&#^sW59wtmVW8!Q*x<&=A?an!47&PX?X_Jd0_{K5N$`S2) zxh}WOD{8$nbWAD$`Tqa_0RR8&z3Fx&IdV044RH4$v#MHZ^{1nw_kX)@M$Qx z12#WwfdTHZaLLT-1$DAWM)-o&%&x5;0w8opxR#3^dD4NQnCQ&=VqG`a8@}@q3Vz3c^O%eE#o44Z>MmdpG!RS5U=)p(qoc;QC=EoeYH{DP6-^pI2K;B4@uBK}FIecnA z*dN%Q-CG8F%ihhNqSJz}eo4=7 zpC+2FY&wcB==$X6s}qLw&qxnO^YtDXb~iM-(5KCfb3r&-zi}`0kC$_xJNAeU1SLv? zvGMD-@AkXG)Wf-ckDn9ojzjX8n9lktuS<6JVM{kU(>>UDSQn<|2k*Te_KRia;V{E2 z*Cn=|+?1&>#NAFxQqK|0tv!+mOC$8_?aQcTmmIzXq&y<7MAAdlYHW{v@87+%Y`Sc5ASwP=0mA~#>6%WC2nE{OO2u=_nWMh zw4Mm;{A!VCy(;Y2idN@KGMG+z0SF#DQPkt1lq%LIwLqDo(t4Ljz}S~BC;a-`FF2iM zpaNUn#C>foxLht0lU&6WY*`j8rwPwbC(P5N;GLgjzGgkF!_Sej^WWM;I?P<5mti29uuW54?^$#T94aNaLL_( zlN|#cIvoX{-B`452_73k%vj3?CP7bAmV#y?+*cP0w@oH{S!OIbBTos1GjiQ9O&QxJ z!el80udgov2+MLpN`&P!qtp|mqG&UVwJ{n2(2Y-0(c3haoz5asv~FA zQc=q4Lat2*fe;=oMc|0_pT%gj&K$YUbS`56N+$%9xC<#^kt$TJI=L0&swT+TxP}ck z;_dxj5TepF5Fr&X#4I{tQt+c=?xZj4zkm-TaxLz|1pK2t7#WkkZ+BkA&-0dX!pR&p zx2|gGceM}9B=I)8KJ@2-o7D2bra(Z`4l-BGjv@pzDM?LWDO^eVJ3%Y9wtdw6V(YPB zd94R@Ob_Iznc9sMCFM0oZUVf!->gTzx3A8b>93S+x&%Mob9}!6&^8arB}+5e+d0;9 zY>h!j{Goeb+(Zqi5t?>NNwTN1Z1h*?TkQKDAm8&`G-ZJc$;0T`ia=Qoe7g`L*j8B3 zX#;{4>goYw+5k9pLS}7MZ(P0@1?+hJBd6d2^}yX{@Xh_h_M(nK7OtRLwTPuz1Z{3a z*cWYZ(oe6)vUOvLBp7427<(h^{%i;sA`mhdETzy`_Lr2ByHod~+YkbbX`r8zE4*}X zdhx}_+c+PhG^18iYDC(Pv~ewmzuR-ed+*ZQ{VsdN9lED3kL~8ZSmkY6C|k1iE9FoC z5KZ}M^O$rU`$Cg^7o<-)d!iCjUTwFqzAuv&C$5^v2h_RMFl|zdPqe~FIU}X)DH=YI?}NM~zp#JFNqK_O@0=%}KSl|B&S-v5t%N_y?>S2x zWvz1BKtNe9sI_3*Byka#n+!@O*ozzE)6;_A{_q9Q&nH;5dH`52BA;F^7jP9-@jPdo zo=$jve!`pue4t7ly30oRwh~@9;B_lFmrc%?@_S}Rp`!h^u}Zjt#F$8?t5P;i4LtIz zK8x#hQQ*4!yce@n#eMV|>J^9Nvi;O<-oNo4xcE^`4w@l3*+AR2LAIG~ct{0Ct^hH% z!g#%Gm|o6c22M{4mPMXNAe>*#=T2&kr1gSnnvtiBi6<<}qQOUYUFjxM00^l@T7_$9 z!NhKXWOIu}B)NGTe0x5lmj3?y?4Pp(f&-JOp?bAlABASA=nWuhXWnQ5vA#F(p=pSN zRjCs?`w^&sq}HlhG|NX5oOUMNH}>sK!(nK;X7vxP)u)EJqHuGau}auK>b#Hxi;)Nd zJId}KexJTQqR9BX)(F{K$I}PZ{r$8N#Eo?hTE?n)K89zQ1=s}uQAoH8foA-xs@asZ zl6F>;KO?TQ^rM%s$Eh9n#aQTiY-82#9K3!bIZxuf3NYGhv4J5jjmBi zV_cIKR?Vsy1YDeVd8cd|3lWs97R+tWiLtQ)s`dU|$HZz=^_n;HVpyD#tuHL z>U=YGGzHjaHgmC$-Y%otb5a?(bHTcvN4d412SQ9BC1IeDYyiZJi4thaUQLsfxt`Bb zj1P%0%^6dkuq-+$OaLwlE~EnGwp=jH&zPpkH(SmbDd}cm?u2G5MhM8HPSb=bFUVXUy|7%vJ&pD7=Wf(nM9$(T*5#WcXd$GaGg3dWwo6vyL!2J-voLst}G8ue-k9 z87s4EL2g}j^V>(3SLCe<10kf^<;sNQw8xZSYWQ_nwGYq2h7DXZYxm{BZJ_pkPS zT7Ooq1Kr2mVX+egn{qI90K_tHnyP`BQA&&XFeMkOAYM!3H91SN8x!1hAi`URF1kxr zHTOO|G>*5lM-&8HxpU=6IW_*6)IF>i-*4%Tg&ycVDZ#R&ifo_p^t9mDU%%jVnk7}T ze#up8(QWGr)PgA|jrn+z0QF@7B?4~>Rw{5=H@u$DczwO#vNrIuA6x2vHp|EA^-0vM z70BkOt}sj6F&1#}cCaVSBZxpqS^hpE6^RH*JJ4A`?k?7Wp!cH$Em`o~7;3GkNLW#j z%ce+IGZZAPl3ogMUXc=E&NJvqKmuj}E|^`ka%I7KS+OjOr1H%f%d#{Vj4)OYM$)(s z(Lex1(}1{bqU47O4{klVht^5b`q&X<8UeM*1LJNmJ|>-Z^;?}Gi6J1fEg z0I8<6^F2B|Rrjp}YVLoU1+-aG4171BYL>s&kKqg+ymL3YJ8R~a#t*eSe2oNnq@)6N zQuou!pu(sm7NAs<3SK}Z1}75t2&=PmOT%q(6+c04zt5wO_n5@IaQC}EldtoxAKIAT zY?c3eYRv3K2aosgWAybZ_z3*iXrjBN>b)CRY+H4nY%y|)#3hTFozENi0AyE{Y8f`K z?6FRP_$WnueBX_7#^LjLFd2?pD$v2++d^oexvv>bB?6Xt!LPqOLT4nD?=SMP5` z*N>g-(~6ulECSK>xHa2%z@0*bM;4J$>qS2K*M;fvnX8S_(ilQwIky4QMq2rW%Ao&mzW*G|5=( zGa`~gYFE~hft|!~E0yjM*f0QL8nkgQH0`ShH?D24x*ujb^c>!@7ml{nEvD;2ayh=H zb2VYr^tcIDg8G@IfRF(axwfCborZYOL|s*x_Th;I^BN> z5TDI~R8Z@ezt8H@_?ixD7v=$gGN{|aqiD_bceJ^4L0IFb#(Vr;7Jqnzdu{yJ=P!BR zqY|KvlfqoJocrz%fAt-sHYjy#70*Jgqo=;!>@BA89M2UMk0U=dv?VcBZX3bQUvt);IeqzcTRjie5!@6D| zAUz`ixNK6wpw{XZl&m#BWUSD#2L;UH+C-VUy&D0Ic!u<`qFwxvS?yJ>Dz$(pJMFG) zwaScEH#w`6iZnGPuFVUxq%YoJ>3L48C8JDvpM_Sia6dyU`%P-IP<(FT?iwBIl$-J(GaXxSO>$eyD_3Jmh zoHxmml?a?BwaVIFv~@Pi2T(Riq$J)NO;*zFxogSY?vD3qQV&(x8+J{vw6(RHa{}tI zi>fE|A&IT~a2Bli`&Gg9yEam@RuNN|RS6{#u((=%kTa$%u2J(WmWo-ck^}Gqb-7&p zV5c-;o@OuTSIX9nS+r&*R4}%(A#Dj0O`Xr?D2U1pxT?TVX|?|J==hi)H0&Kn{MUg!#wrRh}VN3A6D zKqBCQEDK-bdF9~DCl=QXN+fj4rMusEGjJnyKn&PIt^7Z`cmG_?U8D}J_Qj8f=#{Q7 zP*j=-_^{Xy^HR`kn^m48?mzXa5=n~5N#CYv67WIrnrQ151JBtNc*hDQR*be_ur{a9U~L;~0#}bLTnZ`FX+VX>ssju`}yh za9LLcPcr9EPYb?$IpNFGNeFH!*tlS;qOkt<`igHaXPnn^mka{t`H)Zxu&z~tIIl?mr2lttUNQ=~TQ$6K}I)}pp4=A{W>x$>Hf9Ncy=4XYSNs0WgL$v;=29B% zl~u7S+{aDae)1lCm^TIsk1zp!Ed^WIuJ>5C}0`0UI}&xF3ay!HpO@iIZf-CI1d=^*-3=+Py`59tgD-%=7F4=hHNS2&i?_RJ$A2 zwW#+;l5AJ=gkPSY@cjG*^E`oHUa<*E!?vw>c|GIX%PTIIRSQi5w#xlFh*efv^V(R~ z3yK0L&l1^DE(1>byqJa+dpX|RZ>LN;U(xyjpSwhaDUkw194e5^%Ln-6LWKAIO+WgA z2$f|xOw5+YD&NNS&iAg55 zGCN|f>LXM%#jXdM-*64`c^VeljAcep9#QaJdq*siXP;ZX>Iz9CoKlNfF%xK2Ey#OX zGlAmaj*pt!=o)5jSa5;rt=Tf(*_hqOwFREFdGso#9tg$THBhMnp|fRSP=*M*T@U*Re;>WKbAzduD}Z z7W*L4k$oeIJhKi(1<*gy(*L}s8s+3PBnkrZyIB>WoJTa!w;sw~kWc>gya7;|1FRLm zH!oHvia&;ANz;8zq!~zsc9g1hF^;s}0esv~_;O8ku z@ar#6IGs+Irx_^KWA4_q;Br~<@^Z%IBGwQliA(cj#eU{ZnIW-hKy~-fb=?3@>Hfh3 z8>(felrZI9D2*O8nSewrwODS*1RevDsBOHlpQEmaNKGe^DaCu({}=DN`f}j5;Nn`t z(V^G~Hv!Nb=u`<@7$vh*?)2d23PMGqimhyTIRl^yq6H~0$cefMtaU@Fo0^$4y|4O$ zZ*_*djBwTR8M2wBPD#aOTdi;ha?U6USR|)B0L>{4)g}>#>=R_>Rs|qh_{#x>PTRc$ zJXRV9#PZ_8Ln2ShD~LW!t-nuEYG8EZQdKGdG~d?FK z5!;#8ajAJT#vmNIQPi-v-W9)Gb46Qjo+}(R6KE@2$4RRz$H1OV36z) zn#9Vh47h}vE`=@qG$T{)T(X2F-JnYhAJ?1Oom+@|P$x?r`AcuzI|5D?C~KCgqChya zfQ)I%QVW316)j1v5M}Vhq&1ZX*Hcx($dS`HB8yqVNXPf{*IZ0(KZN3vDqB>tJ5`&> zsoyiM**Sg(Fj^i7Rcj-V(tk3|t6ERZNjE%UH>X5Nly%(pcy#xT=iG)r8rR0?or*q? zmWDGJ=c;`$#z*e&8QacPa3|A_^Vs+)^qVn>3*B`)(r1#;tDFcanblgloM?hsGdEX; zHm?t(1;o`Am@-`L3yU0N;3Jdz_OsTYVR0qgO2JmDf|@M*j;q8!6nh_}z_X$miyJGd z%<#f>HeXd1m?|ML?Q}~mtUwi@U?dvm*v>H$pHf7et3|9DS>8X-vjkUdThl(cFs6i|6wtOdhGjl~Xf98t zVMuHst1&MP^Mg(CF{ta?A&BGTx7~Jc8H8l$8Zm{LKfbrqSSzyyY32^Igg`Yd0ffXp zM{TVIoV(0CXka@)bl4y56VtGn$2~XtCkBC4&B&(lU=_q_6@ny!SqVb)@w{}*^^@BE z>{wOv=<=h$#0j<%{$z!Qik{iz>$963YUhfqNR>a=s;kRqvIll@^3~SO+rhrvY4Fki z-2`?0H-Nkmd^m`>)jR-J_gD-n^;->~jeE=A7T%?u(|IA=zoTpf*>46kx^W(jXZX9H zGvo8&Joy~<=Z%QO`cZ+cN~*_N#Z{GS3iC{8>t%woJTu{Fu5Y)`J`d8X@u361VEfGa za9ljslrE$|ngArW`X{X(K&^zDG;u>!p&an|2!qm5$-)qgVE0-c=ZrBb+M>YjqZqJp z19sFt7&oZ%f&Cin=R@u{wqrFxEGDqB35LTQ_K$UTaI~kx71*!U!6jGUw5Nync;$U5 z=YAV0|0tm}87W?KuU~EbP08fLYn0!cGG64i$z%luOO`fR?%DvOANrUCc;+^1;#E^oOG|tN7I1sp1#2LP7E32+ z+0b>b^&K-ZjcSdLxM-QIzo$X$T8B~@spyG9#%$?%RWVenl(5g|Gp3v|c{YfiKr^!z zJCx@WCCu|I2Om;Fy_5~M8;GE0Fb%?5Hx~>%$jj=KP|Q?=-C=9O<2zd<7<{CeKDZ?? zI}>D?g@8S}WVR1ob|V3Q5qu;q#>h&g*_noA>@ux*b9;&c!e=m$eZMWU3x}8w10U>u zlmbetYln|o4?aFm;_`J#Km!_Zu-B^0H|<7rt&?kSyRRL`1PyzU8d0?IzIB| zR`bkcrs{PkEVeb}49cS9bYNRIpUEX&khO{zgUFo? z#dBq(Oqk~xPfsVz^Nf_`ePt_HFBhE8XUTlZKvvoD`RRn`(}ZbCAho{KZ9^%9udiqP z_2nDBzP{qTZkmlsV?xq5M+-aZ14_zXnH-X1Yc|Q^pvf* zp9!eV1uAO8jmR7co$qajfM*PVM-$u(tn9RacHf74bllvsp{tdW1b#{Vj{qJ+%H2Md zZ2Nih9RY;2=W^QO z^n$Th4AJ~pv1r0$Nh2WmZEHclg)y@GE&cxj7p$w)`bZ>I&Yzx6 zcs?xxK8QiET6K)FRs8MSSA2Ut<6JgzuhTTcQ5j^ODLMiWNv)Mq%ssAj{CjMjVLx2s zswT~Zrr6BWGZQSWI51Bc)0F)_IK6AdB)RseeS=O|9>IMLk{TP5Urg>hnjb0TR>Zpb0!N z@ZF008S!(l3*B#I8ME{qwHI1N3(&Mj_ZtrZyrP}2#^_lIQwL1;4Bdo+&*rC56V^0k z{$&FgL3`HjjrVhJU9(3jut}gHP9dz9KTOT`qe^@?A6JT%8JKeb(uAw{JlDX5zenrG zI7jimxb8a|Gi(bQ$Eg-gb2IaMx^WDQmeKYv_R5#Ns|yat(F5Lndg5CakOqv=<3k$H zD)sk)n{Z=e^Vevy%JAN#@oXIvyQ4Tc8vn=J;6m`uWdYx-EtwXBF5jsYX;3a@ga2e>ys6@4ib5dYUuR zbpPA}^>vQqI+G*X?^QGW0ivKwISB}WWKf0ibo7c(V)ooqN!^fAmK3_rPfwWU3AJwE zS{1-G#kIL;olY~JPYdQ*0U#Ka64tUwcGj;Kv3zVBHm+bI%{nLqB%#$Q;xGrbT5Xcz z@u)`tBhI14Dz&QR@M4Nc<^`j5#WHC^r1tleW!9?F%bbNtTapA;xjC7v1lBc@&Q%gO zy;=&kZN(%T{IDz*c7BWQqe!4kE{t^}>~5jmo{7Tw&E^`%Cg`{ZmfD8wgap0KgtAg9 z*2{(r!aU8ofwBaivy`-dJ!{>MoUtsE6k(dlHS%JT6soM^ zd7#m!CL7dR!NuJFSkLwT{jyK&H~pW}HA$-AZe1WRuqPF>i|;jgXDAq$M3~M=3ULxb zrE@lA4R?B}#+O_9)FubqZ};X5XmwJIFN;AiwYdpffNMpi)Ws#s{+EOyl4{%djHKbU zHxmds5%QEV&tm&90GfCvTFAL zp=zRokR&1Se^F2|7?RzKpfzSlpT~rf6LLwnU9d*CgU%lj8q*M^Vg`7~K*YngsX$-! z=|REbxP!pHf~vhy2<5g_oOuN?Nx>XF*og?0#pL+s^w&Moqo1hU`QFVlV5X*ov?e@LT%7b1m&Jg%6!#a!fDKu(;wqjXrnM)cUZJ7F zwD&DbJs5_@!PauP_ad*4R+%5!TKyE(jIQK-&XlSqBSrU92Ouzy7j<`@5>BTXr_&;? z_@wa{+lI@!N-^tFK+Krt8BfazPm5&7nsNd|Twk}c;k*`{*A?GhUh#6ippay{@)&E+ zRGL)jPD);+)1?--xQHxbX@n2RJ85YL24XQ(@l0W)++44q$!a87fmnZ9hAW1+kFlhC zCJQS-;90US830o5=bf60WmZ}IIsnmZMv>YWB5w$~_dq=gRLnf+brh|7`;gD^(S59w ztk{jkj40U%G;ehlEned6=Ew8-tbs*~nwt`)I{94KVmD0%X(e?AOsF0;np$rI061wd zj?#@p`D(F3zh5cp+#Tn=w~KSOE+HU`~VA7f04{*Kmnvyl8AZzL19b{}v)`)}h!w5?53zQ=eG!E4wy_(wau+ra*0(+OeHVHMhCds8 z*z{gAq5iAAqctSo?`fl5>WJi4yaqh3wYrnjg0OGr!RR8#^Ws;3N|Q`i8mBt>8&&74pvJALJE6-v&%$kA^92W6y`;bkb@HY+&madaCbGpL@a{0WPVCmmIbHN z2~SUpr2Q=$E|;?=9a=pGL*&RwfyyL_jz}#=T(Oplm-89lUeEaUaz)lItXrBZ z-)tA2E?C!PTSXp~RLml$TPXobJWhjHD+c;vd@$C75%(*}beEI4X0rm4W~nuIG)k$h zH33}3!Xb)xQOMYSPG-dcFsFnmPndkyvIy=3W5}!mOY|onTol+UE!u#_BSt%+{7A8G zCh92ETIrQu^J?bJcDsXXGVDbIqb2v1lL-fA$^l#@FfrDxf?iiJ1M{*VYn+EXT+Q93 zVABQ5c}C6&%##$TQlTsoYlfoPDhtZ89?jLtvZz?+!~AtF6L-#TKgG>8E;6Qm9juND z#oPQrE>s4MoU)^CbZ&a{AJqS&q<1tecGXN}&D?3#W)#RlQa@T<0%*Cfo_Dvzf7UZ2 zFfJBQ+-u_m2IgG>&%9OH36g32Bqbj;)9lTxZmn0}vpHF!HUrfChahyCszC4UwEsqw z9e(M4iw-Ef`)R!}c+6Eyf&6=JiUu^*?nj8fKW3g?&pC_ z(*rQ45wW}1^j6XqjBIWMw;yxR69>>oH`?yVFs|s;p#p9%$Sj8UJ@_-|4(X z9s|U9T^YLr(ZyyAPrO)Ra=Ufy-_o4__v-K!~X^o&o^{?nd9qS|V7c1Y`#vV)@8P(#dY? zif!FIZ9l0%zRVMzP74;TJ3@MP%2vfA=d$7J%Zk5!JL6m`kfq4Ak#XT`1eep4hub-~ ztXDBMR^Fc*xC%|nwg`|2d%-z_DT;&jLw+g%rBJ|9zZaLR=G{_jD@3U!uwb%i%9Fd{ z5lKe2tTOMEOpG7~p|-T1w6E@(pnhPQF{%{Cim(xZo&Yc;>wuq30Aqx_cB-Oj!;=YE zY#LmGRZL?m;HLs}Wesvc2y|v(BmvBlVztyF{(1H}$r}qZE|Rtilv&5w7B^?cRyQzL zlv0s%)xfR^DHl*FD2EzGW!fx1P4i2`Bt(IUA%Rg=OvZ(3XACjVWe(>%34&1?H@S4Bx1t1HfmwsjMQ#(XXR71yIY440eglA^*eiffb1t1>lNvdgx{-R+PKUve$+^0x2 z8SuiDnOfj@j#dpN;<6h1{+K4iv>A59Z2F7{i3ygJ5VbQ*Ab5&&AHIT+&~||0);K9z zLn7#S$9yWVu=%KU>%UG2u|Y<=*l7eqv@`Otk?BX?8`j{;7<)5+53ayrZ(e=H(UX%( ziB4+U3-2=W7R}06EG>MO+F-;3ajh5N7O$ks*f8+j{t;rw88@g{sAzrCMw18KY z!=t6{=6Z)eg(mHwz}?2<+V0WC4v&oVq7E}UDlLxaV|>AAb-8jG!r~mB4|3ty&b`Ks zW|1>;%1FpE@YX=Z*2jfe*6rxr%Mm;N{Xh0IKy}3HxMP>S+?~brshi zFVYK$t2#u(?{B zeY@bYF(@q}YhY73TrFU%vQ%YME&xl>*lnv`9)gl&mZ~NovAT?r?rP-)9$t`|1w%vx zl@U_XY?|#Ja`1uXf|f;QOdw1aq@JX&R*1E*Sb5yDVge{-lp;!8+3(E?H$}6X&`wt2 zZC_2>;v=N9!t5BphASy1sT~L&sYSQ>bRwUlLV-Hjp8o>ForQE3MN$ht}FhCopLCqOYYwnV<1@l zKKCZ}apHf-T)eZ5u~y@#P0nhTpXUDHZik-?EDsye=f8%b8gr{(qwa0)>d0V>XQ4l@ zCSUziHnwv&3QSG1V+Z}x(p`Gk07w7XbzzsZFRaQ1zkc^m=~&<+{l7P4sA2=4)WAm- zutVW|{A0GqElTftI*YK{w8%WjQ0D%2yC1zCGth3x+L(5m)QyTkxLhutttrX{j8|0QA75X@?dq~tFeV@oxNaiW zlf_V2Rd$Pql`~y7vAR@EilkZ)^~7PMlwsDirkY>}A6YFJK1`N9le5VQ>h(lY7cW76 z8W3?y5;9V@xS-t4HwO?Xj}jo5r;N<@ce{Ry1Vl8SQR8|5K(b^5i0R@00U>NjO1}{E;Rgqe`W8+T27|G!v6(_rx!9y@*C#5O4ysq@X*WYgglA zo+mk=l~Ix$=x&VN+ zy)ngV#Or`*y7{h#{b3C6K+xU$$nP~}*BB!!LXUX3F>dzf*J9Qnbu&~G?HefLhnlIao2cU-}Zv#wR?mw*)^Zg**ncFF)gKf zQ^XKMl1SX3v48*V`+^U^1Y@f`M(V=7Pq{7pu^cMYI$EBR79np+C(&MKSfy{Ggmqgz zo>ks4^b4Y#%9F@OIZvQOs9UVS-oIZZ*|tyLF?SJsIPLl1_Of}&8iQe^tZKZ66<a z#NL2X!ZgoVmXm`I=9sQ{^LiJQDyB5y>GXuB(}Fq6S{3z{@r+k5ulV-zBAzzNf4K_n z5IM?(^Msad4*FhsmMM^AE3g90JoFtfi(HczNMk$NbznW?TJuA!C#>mqv&6EbEdCB0 z*WH5BH5|j9Nv+I<%|AdOO#y8l042o!HUL5@U|O*_W6(jHLu#t=_yvZRwikB=@gg`R7|e`U4mpdndv_lt}_wi}0Wv=+E_?G7Ed3qtH+s7hU= zAESovy09-#F`>p&?%KgZ^c^e^lY6U>eXZ>)yAS=jGFb*L48%EsFA~Tl2fSZ{2u$b` zh~oR29^?T)$l)djh6X6y0_25+#=Ue~crZ(VXNfv!aS1-ctUt`>dpxWk4f|mP$&?2U1G~u2?gxw-%$wz2N z>np-v@r+mXK$&Uxhya?^YM`#E?n$B61XGCWW2){AjIz%GjUHRf04VVck&ZvM%25^u z5W)pyej@lduBRgTg;O@`hZU)w9DLY6Qv{_X9yL#=1y9R@WuC?MP}R+?R9x1Amv685 z`tpkNT2PUKJSD4bTnF)xb;yaRHWt5;LEZnOU^inVC*9F1dCa6|)PjYD=e6M{@4CHL@y)6V;vBp@nH6?LV zTi4YIz*Y(v&99YIBhYDMcguL2lC>-%M`L}zW~+gs+jAM)Tda2X+}Ij8hr3fXt$kDy z&`o0R)zh&*Cr8$-TSC!2K`W;ujEkGqYVR&`eO)A7qNrQOD$%hYBcOOxAhPQ!8etcu zLPDp}zB8xeGbzP;QwM;K`ex}cj)pj1pHh086@-U5NkG{)w>a8aw3OH$3<(ov(nIIw zeFFDK(I@cZfRSk@O}CXE@9qxFKYkW1PPd!I*pdIe1Cf5Fq9!?}5a&UF zcyQ!V)BRxEy9K|!-J25{b!tTuH-{_Ht8Jp*`o^S-=w!OpTa9~Y0fx3oiHu4xDFA4O9ht#E=6KOrWxnU3v!18P!WD3*`yMdl%xQm{b?AouH*D)SW6rGI$X&rb+7MWwp#mjT4z;R(fce z7&I5xh1W(9@6Bx!1Z(sA?tJ^AMz|3x`FlvU%ThFHbO9Tgq|HUoBmy(tjr%sEz80*; zEPKyPu19Vt4%CcsyXN>l@c4UgQCPM)ru_SIv5>kw@AWT!tO+PP03VOstEkYdNAZ~e zb^z?%eWV1U@9!e=ulBYk>meZIN36~FCP~Z+skET$--`k4AK_fQmqs*2u4=y36^JVd z{i!%C7hw0AFh5O`)4i(7<6?0K2mP!?WsIbn^-o#@3jEwH{%ScmQtmOg>tNOx2ZlwH z-1PTUl~Yq{g7?$Y2~W=_oaPA$P?!6)1%DG^U00+;c>eN)FHcWcmdUM5l^KPle%$MM z#mnmjua|;ShkO(POcD_&6Kaup-8PXcw;~0zxr*}Kk^~VYOd-*Vjo+V@NP(FAo-DQ- zV)1u(Jy&d+dp9s?oOdP*x3GQ+K(IA-F_{crS z`=|M?<7d|II&jWCK*1sS0Gc)CT07EyN01RUh3&iQed@LIwd>E1ZXZ1pMo^+Vqq|s! z@BDfr8s=vCI}tdV=3e8;_E+FDq@xBtf;IMylW@=a5!(&~cRjiDjysm8f3+XqqLSwk zh0h1y_Mp||pR0+|=x6uhbJ`<&D3MmHCQHx*w~R}&AW@W7$*6*@mYd)snJf7;fx((3 z%Tm4`02ilS%2Qdb^7Z5NDO%{ACYvx!5+Lp0DKC?nBqEoXMcH^cRIyrJRx)Y5LK9{q z%1EE(&x zU)GyUj8bI}bE#N2O%$+ISeXDTjx0ic?mU4^dD`Zw6;U)9J|#w`#zRRAD(c29bGb|z z6G_pqHcCzj z1+0b;LgFeuZ%`MHkckD!Y^dCiYG}=_@>xxDK|2U3V&)5GLn5(CY}*Q=ifPK2^W*|r z){3BJaxbO0S(PYZo+eZ+P;;$4%BcEl5=OU}16#qH!H2nGF+&dVVBR+LZ2?a)2Xq`y z6zWM$ckz(0rn|MApX`Nnp!Xds_8zf*jC&{okC@ued|x%&0E$3$zp$s^W35k8N|AJO zg?-#|)c!u3Yd8*Ob_8uX-LQ84g>etsnIm^|I=WbWu8VN3)cDNUL=ikfAFLgFA0 zDt&?#WbkGXkfg!djKIs9)sfjUxXN5uR!b4iWtJMvB#5h0b%n5`+7*|4OFhrEplrqE z4^cR8nC1yzo=*Y-b9Onff^gZ27aBZo8;aF6BcU1Oa$~cAq%?uKNZ@*{C<^}CS#R=U z%A^Sfl$StcCs0c{Oqz1nYIg_-k@aR*4)=*ONQ)09jRTVRv_1mp>LJN2N)&Zv$>Nn5 zC~8=6nXKE#>Uu_!+-b6aC5dWpF(3fE413OuoEaW);`JNs1#0d?()b5LMG@lFWtUMf zQCkrwsf?)FR~Wh-PTueg>E3XSb_-B{KJ)aEN5WEpcE#A!cZ(JR(88Y)=H91AkLmVVy}mL-j{YTWwWrtvkN?$%H{ z2w=>Ln&V13kulPKQ*`xT8zp#ug7z7kc0j% z1GjscIWuRoSiloNm<6%|m~>600xE+Op;Cp^FqOI%?X9MT2>l3JfA2*->U%djX9QQL zL77_x0Cn%$U7~X`3R8!3oDygUME8Pt-yZ^ajG40QWiEt>2Fo7kp!V1}AG?w4zKB+( z$dGNU213F&K$mc1)IJ3wcdHTPj8=dXse*6N{2kqpZG**4{eU3c{$1*=!_KGp<}TXbO& zQu~6bdVM4TO)5yZmZ`mRbX$#EjA1_Aaz-iu(z+BDYaoLE0eu+8+keZ_N3L4L6uUTq zvF%)dZeHy-^?wf=wNk%@*i`2?9&zxJaE)ctyfS`u@f7fpbROkCE8tf&o2XfH$nS6U z6}oIj&YX=N3r#JxqH6Y}trV16kSQzXF@ag`&nY3#6DpAwmzFq@)f2PmeAS{nuqp`e zGcW-XXAs2;OTFLCm|AU;GB$C~XWavAW#3#L5Lr|U>4~U-tSS7dUZu; zn4jAEDsmo!GGm!f`0^})oWQHZjF4uUGzIBAE%@^MglV3UGGW_RO{N3Ql2x#- zR%$@k%hS)Q?Cf45Hc_I`E=AJ(g2Ww;P0^G9uH^E3DcLT4Rc&A1+1Xmn6fQ6dKfY!L_1*+&V`@m^`Ll)-?gh zsK}f!#lzR8$d-&1O%tX$$%2}h2vF^I^Y>z6RG2SV0$KILVT&55G97v6_7D%NZ`QqM zeh}ZkA8hTI&F4cTU!K$ z{yW3z8gnZWXG_Y6DdsJ!&7HX>rF~v{{`mT)xClLSLFDL$_RrimMjqJ>_RbXhxJkg5 z@Xw~Gd3Pa!|yi^#=m z+3Vr*;m`Yg6*tkxc2u1x|gjPJ|vs;G)c+^h%+ z#!>?$nM|e?8kJnRnLOog$_ZPoST$ajnQ^(SC}ndCn(Ym;WHuxzo-3P+7$iBB?JELK z%cSpXW4Bo9+b&onzF_m5v79C?!cruYfbN!f^lVUefXqK{d}hBl$v60;?NO$5TMN~H zE`EC(K#=O@nm9V8mXrDQ^{g9q#yrpZ?8%GT03t4zvH{TS0$!L!t7Z=UH!XQp@3!hp z$y|l(r&$E5gT8z12z>aO+@P7vj|w_R_jRAZeK7TVw#+-O-R%8E`}JxmK|kKuX%KX7 zcNQZ>`;Aan7c52r@<%jtfwbl>eg;5*@N+m<{Qh#*b_wu(AImBCloh z{Tqil{eCYzvhaM*Cj90d{@B6A_S5?Ez4q3R*dGkl(u?*GxW`+90_QuPsRouCy^bKd zC2siGB*oac7U!aSn1Hu&?|;r_uGuCary%n*K6Km7*j$`!jm_Q2o+%0{tqZ|~QVO=R z$sSOt)Lf}da?3esyo0(ig#vx02Ft&8^gSz0pq(q+E|HG1*Zb$CsRk@1uvr+Q_m0UL z1kL)SpMec{h!{0Z6HccSo}Lz&2O>?OYpHhGV=Ja9YsS-zd70J9EZ}1+8!nd>m-C9t zWy9u{qsVX-P^lu3YKx;5%ZXSScJf!?gQfr{Y6Y|_#FT2D5duEQ_eM)IZ%QQvs|G%j z@ZOjWv1%5s%jKd#Gyr4;aRLDfQpys)!j@DifvjjbNicrSggIv<0_K!3kyu*p0+5sl zlM38b{qt^VX>|q>t9OCag=|Wvh(ZvZ`2ig?sR0e~9VodVHi^Wx_#I zB4$SF{;n1fru*dDbJ*n3Pv~<48a+d;+_WWqO$2>+uRi{6&DPlVmkAKo%pI!{7Wv!) z&0>pZ%m9a28jblJkLPEL5m6&-XX8f*m~t%o0#D!VML!6{8tD8z#fgkDWB1Yt-?c`m zGU4~oqRZf0tRNq4ZGHqCr9e+VYcEH7+1v)*^rId5@N52|o2B+{zqh?z)g8w);)lEt zaQ;muY`bMZNQ)?drQU)CLa&`Rop#G***#mlgsgW7mKajH_3oWS2&r`83cl_VvbfX7 z-(pE|pn^7~RMmXn! ziGZhh60a&SCUO_5MrazUhJPo>YX&o+Y<1^18z(jA>>J!OCm4Zt)18>lSZ!QUbF+W$ zfyhn!KkU{hvyQ&xmYHiW*EZx_NTT}A z^=3*4yV_jojEH$02sfn8TqU@zws+Zq7~4Hf(7k5!3%9hi&iAI|R+PvNv|DF_cCW7v zeZP;7_S3Ibjq|nml4i1(s{yz*BwiE22h6%?jLTi{+IltDu9!OSzUKCku^rU{=ol=K zEz$9F59a%kIlpcV+Ffk=#L&Z|?I_nAt$lQ-pg_mo({_I20Ct}=WbF6EIVB?0f!SFz zC5i8U9L%Tfvn$g~{@q@H7zVR{#2dlTT>xVK5g%-((d)7ks?xFXr+eoip>69Huh;WZ zdSiDRjhE-P-{UnF*dWPT;<<+I%P`i1wd?l4;XCv?`^Ups40O^TbB})51ds3N&e+XI z+;1v4-?cmrAAJU0AFpZrzVD0wJ8-=MLt|(I@CY=!(W^kiN$QD}vSBS7N|l&gS6Epc zp`3797GxDJEf%vWKHGE0`ACV}W5ea9Fp1F$ao7$&76n<`Qb%&&IG;2CFO@Yk09Et= zzH2pvS$|IxrYYl>=VzSe8QI*fY5}XeymfS%GM3YfMYCf`VL~mUyfN0bVA~jlCAPLw z3g<}IZvb6we3FRA);=q+sa1U&NrJ&G(M9ChvAVcTPIPr5sz{(_@gdS!N1<0qgP{$u zfqZCmjfzc{mw(VcRDCUy~ zIsB^^FdP`P)1+f&(6syewR6pu&)Lu~+r+kR##keRB^7GSoW}q-1=C#<^zHQ{5R8@x zS6@8P9B}wkBY_XUhg{$eI!e_(76&73Lgjgn2)`d{POxqO5N@-z1BK(`BBL_m<3e)W zGtFHij{7KM)?t_5*rK4!E`#*@F*0WVJGeQ<>&9nCIB~rXHb0T6>;Ab)ZxkYiK6L;+ zg1NhbbL$9&bJx~b$44U@mi6x}ctMds6p}*(%cJ$zuJ?293Wt82cRH=xbPtxso{sH1 zo}Lf2#*mW>Xih7L@yMnct&o82l^AFWyaj~p_s}(=tkEfOPmg_1JW9Jpf_&HymKZwH zL@Fb>JnZTInem()(bf;|7gLgrf_-<%kJ*)O(S@DG2;}O1dGugLMG?^Ru=pHOTSml ztt+M6T$R}@H$hqG<7Yr?VWcE+A5W(lU!I?klN1`PrMUK<^q!m(meYdM>4a&XBvyu! zRzxq@wt~yX*ovf~EetTFmZgK0-zJ0vDwc47wYYR~xw%3;G6+D5F}NDrky^&DRu~#C zXA_Zo?hqwS#w43XVlpgcv+W_}6PiJ5S!PLAkh6d-m0$ZZ6_VAfLW~h#o}N%k#rbl< z%j+30=d)OVG-*{0dmY4hy+|?Yn}rAgGZo6tDy1T63b#jyi&d#L)AKZj6q)?M0WN-*TN=%u@^+m2VRsz&l$32+S17h3@Rjj_2 zV!peYsx;sL%XB5#GYmPmOi%WXcxxjZZy6(C0cV}yYBLapHm;t@L(Ja|eAGI=_XmKF zJ8d-C-)f>LFhJk5^1bFPtZTUUy_)++TO#VtGBLt@vjcaxmU)~G@2x2Z;N$UT#5Zab z;Q{`83MZejz2`un*#VCS`a@8VVS>_d-}haO-+#M%<{sBi+-&9^V*n%yFpr-7BcZBk zkKYeMKD!MV_ub9TB!!2)~tGCH1*2DB+9 z%xO}$tJD;mTp^nGil{pfky$H`FEgH=7d$UBrkq?>E46?uMZdmYfXae-p4Ykrm-7d5O$A5-e)W zE+gcWkrFU3;;AD5BqOJ(SJ)|xT1sk$I`&ilvb_ zl96q@pp<$8fDr41_nKlmw8Dt*On`zA+vv4ct)F3=R!ml1Xw!k9Wu~9&@7i}|Cqj^Y z#_a-Aci3Z_f}S1ozfDysiJHRSfg1V9kmn9Y5`;21)$(0E9fyY+d{e{4jy&7-Y5f z`>(~|J(ml@w)FZk;>!t3h=m$hhmaJ7%6j9Lk6;pl=DpU4e8kr4$# zDeNG`?x7vk2^u^iASaOenWbd;(_58k%TQnp(2!d9r~!a9L3z(G{r_ZHsoGYs+939L z$wXm74?7^StO2DILD*$No)SERE+y2m;j&(Exy<4#XZ~^%uw|>NCq0GJW-#|!Q@a$Imels(-X?`4f1i}~l8_nNk1RrdG=J6i9 zPZ%=dBV%?UHZ`l8?^*rVevKBxc76z=ol1=6^J!?=ELeb+wOJuER8Z4_R)Cyk2tNh>Czk9v?6cO(sLGj}dq*keQQW>DiNajKu zltYmwIk^o}H3p3`0~T{{$NTM2>Oi7DdWbDk)t#$uAHrMv%tehdhV%DOU&1rU;geeF zTc;?@(ak4v4?tWAc=OFiow@V9^OeXN!&%tNeh0lB_WtgkBVJ|n@|c>qC0-R|ha`AJ z54&cB+!VYL5M8utcjT)!!{%0lz!R;|UQ!uODu?PsI9D^4Io;ONHh>D z5wLBJ^qI(u@Qls1HWer+NYFqs-McXGWHUHW)CMwSzF7$qRog_>2}#8562z3mM#WWQ zbi8aEq>?#%Myr%l6J(sBuxKQ>S&%m`1N&-q77}g+e~6{#4UxI}K&!lJ z8b{z*_~q37Sz$^E+mH5rWWK|?ptSKp%)MY^zD*))3JINSFT}}GVt*@pYDOuTrRPIdLe}L_)0%WB8YiZ6VWU0L`R(0C?tOo2 zkG3MHl_7<#?Cg1gy|!Kq%cT#DlfQHC7}{rOnU96C;XU`n`Ueo2^T43|FojF9qPwoa zt;&E^II1Qr?7;#%qmqMTXVp)g(n$+oXYO5VX`Fy7XMmZk$|^uf_#&Gg7Fv$9!q?S? z3qF*Psx5}qtR1HKk_wttK;#^-0G8Kt!Rz^g zwQK@DklXmHT0pqL55ZmH7Pnq5xLj5d#3_RW+n}sPu}F*SFkoT439K>(QQ$Wr{w!II z50S}|D#a7^P{>lwXVN+#Q_^u0CKAu3M6CBoAiErJbx|dP7eIws+oZpF5-N9^GBN?v zBw0GYzP#e~yke^iE{s)8B3E3nR9Fq0y}F66O%7~>byQ#=G?rb*pKL;>R5k7ih_VxI zf9uTZ!&K1bX1S2frWGw&$RVlw*fdX=a>ASwo|hS4zI?%)C(O$PBF1mO{fhtdpMSz% z|N0mF`Op80QWQyRHiVRb-~RBc5)I9(y;WS!7p)1RHBagWE(A&wQYI`*2J-}5Bu!f> z1+>X#nsigm^9%qeW%EPYO@#_m-Ry89YCpUMr0shmW{+Y9>~H4q-Cnrw#xYWw54JaOS9CiYm2PDB69v#w z0p$KVw*ZK@g}d|sfiJZ9rj(LGsI{+>OTaWfYQ6@6yEO8&p33Hy@om@P6wr+e=76tM1(-Y=p*7y=p+H9pt zp~3Tp%cdEg$kcbuJ}&N3A#V<~#3IU#?6`K?$com00V05d2vcaF;5| zl46(uK*5y&u>^C=nx_dT4I!9PLe4-Ymij1U>#dGc3aCi;DG`CP7!w45GQ_&2sisMu znHG%|*6#CpMXA67K-SmkL)4^8FN!*0fuvuL* zWm*9-pG!D9E6OK4J)Q8&^B4T~+pqZb%M*V4?#ksQRC@b7>8cl_~B|AD{! zLMSl=@h*9fS8 z58Jz1e?)-?0wdU4RelZ=wKEspusoQY$Pc)?3k?9|v2OSGysHuKnC9*hq9*I&I}6y< z=Hw83+`IXWtyQQup!hz;K%=93p!u^JbJRxut9_(72*Jmf_S$)67V9Q+MJe~!>}SM5 z9Ee>1xXtKLbfe{<%(YzgHy@D#jWPb#-xK+9DMekWMm{D2yHz|Nm2l?CVnOU(bE_YF zyr!q|y*mXINWa+Ntgp8%NY|cc|H!=rwsP_3bUfTs?aJ8ml=1X*!j~^!kP?s+P`Q9B zTrdZKWtwq1o$&npEM*qR;$#?GVXbn0aqw|#M@MBrsRgSR8uS#$Ljs?4pqPloqhO$g70WzfnP+t=VVtHJ|MAB^;!pqaANcS8`cM4(zx@GE^MpVB=}&k)e*-aN znG=5d_dkG$@MX$)%7m|9|0XU?DFGVH^y_cG;LGzD{NdmJh-E(E<>du`{mWPU<*$Fm zX*prtHvIX|f5!j&KmUXO{mWl*UN_CdJjqx}6?d#sP}UX5gh^+86zF|Uewphf1mC%B$}^4`}>14s@D3W)>}cw+uF#ZtLQ&sBMjgpFtr$m zC>PsAH3rjYf3H+Z!=y$ztPCD1-E|5+K;+6ou6LAn22?C)oR8&|He%lW*+57OU^Xkr z+slVWWshZ|YI|MI`2f&J;t-ZO<8)fEEDLH~MH^o%^Ov~BE%PESTF=i|=2?p~%JWJ+ z1|O1vxD^{5!Jjp$AGdb>-Hq;`)}8ZpvXu1#K1jz(ZUWKd0bpcEh4Y*Um-873Eb{0i zzFsM{Pz1^?p;A#xM^BC zbpwDFgl=Z0s8y|{v~;>*q2tecM3e%((z+)iMkR?Y@QJnO6CtPBgO(V{jgDMoq7zq4 zBuF;rgfCAE{_xwc`0dwUkW)gr6#UnJ{uloHzyBNm=a2t}*OymhO8Ec&=YKRtBVkGl z{_?+n1`%QU|Ji%bE=i6oJ@YZ)?x9jw6$+w(Zg86HnIVVdoZbKb7kKu=p2J>l3y21~ zbczt}Zh{}KnY(#JWK~uHg%NDSNmNE;guA)fHM9F(@!rbv=;EC7(-rIWExX+o?*lG4 zV&BuN4QnFy?1IzfjMZ$$)5n*bot>h!;dXt?v*$1Q{qO&OzWMGuc3nz3U(7L8joC>2 zTi*wqYk>x9B`K<^szLw`+kJ<4wv>DCCu|V;JLT5|87o2<72G(PyV9zje)I?)^MNtY z+=b&IOWweiB-dmCNjv^TY2@ke)<|2z!Y1407s2Vs4mr>wQ692}XYDwtsF7V5n&C z_R`0ruXAD8yl-b<0ARHdmZns5s`#>ex5yw=N(~I8llv9&qqW96u}0?_TRMg6l(nq# z`R9)W- zfzMul4Wm)c7@fkYEUV>$rmFCvhY;y~FDz8v6TPQ0HI=EbwocJ2qHAWOYUdnn+ta%+ zf)9kgS3Ct{A3{vI6H0PYx~`*b+o9G+_Wm5G7x%0m(G6p&^O2%{j1pp}5V4g;>k1zn zwXqm$F(`cK2ZOv6BTVEPMm%eEQ&U@m>w5H1oYrIN6s}g)1F_OTNCn~<(we^O>H3bo z?`fTkr_lFWObW4cZBNu1Q#Dv^XevvLGlB=#-QoIf^nhd(qZEn4^XHAte|r;TCq0nY z204&}5@i-ZTd@Jx%1|4{$;q6@R~LNt^b;=6FA~BHw3{8yd0xGG#q%HkO7D8!ym^J| zd#p9gW;OG9HN?X`dVIx~U;c`dlM_~}Q@r=w-Q3dmE$f?GVvL+$oKsti3X!I1xV*aL z*|TSa5UA>!Cyy?;Jpb=}_0_NV=7EKI#dZ+o)rwHRI4 zYN#Ik&}W2S!YlFa_Vaic{?{L)SO zq(KMb1H%LdPg2eyn4AsaK;km6)kyxqys20&X3S=FGQb9cbBV53`cT(WWN?cvx$NQaE_J)`{1|Ry0k8wHl)$+Kd2EYO07)Mj?4DT4{{6EanSX zfa@J?+p^tlY1>ZnuZ3PxJZ`Q>S+K@1t1F_9xP=ff+37=$=z1Udet1h(mjhaZgnHG; z5M1^le*~oli(F2yRVuPv&Uo_Zf~)g$7IjSwp6z-~+xBd?Et_2n5!Rb6+ufGkZb!Fk zsVl>YT5;F*wC#r7cEkCLS6tt2_~gkGmdgdMckEiv_01hkBL!$)zqz699G`shl-X>~ zuI<_Ewk+mzmM14PO^pwcYBp!KSn`)2pYzSPe*v{d>lUpos&n+I=7jU&C8=yhG#Ve( zAr1lgjjneWod}Q#dFwA9Uej4ZUCk~2=j7i<;t$i8UGwH`T zf{%d~nQXxm7ObgpFz*fJJ&pqqb?pAfx~)X{(#2l)cG$B2{i$^?dkRyJAAFq;ZN_N4 zm+-fr;syCz`}nQ2?v{J+$5g#J&hf4r5_v4pn z;suegoQK}G1RvQLI>}!NM+{VvYYMn&f>FmN5C6jC0e}%w`Q$C58CJ zQxown&~;+n?}ASi&NH)VzMm#2P&RpMQg=dB^sX1iSbv!Bla2jTo%kI_Va2fDE3zJ} zh8l5MHoW%`GVsXX6AzfJ>f|x=Ltczg3PXxii6KQi2}{|?Ln2;Y3CZ#VJTZ8J?uQ71 z`K+R@3>&jc`AAV}&G?aR%^)mnvssPvf!@0kfT)3OVN`B@Mgdia3=YRXCsu$AP?S=` zJP94F;_P(A*=oUJK4(6k;bLI(=7zi5H8(eR^xolo1Z{^zqiyTiZabP-Q+uHw+q8l8 z*0FQ1*>;W>uitR8T2WV)7^KKdRB(BDIlO-L>NSzq%x5#SF}S{`t}QVvQCeaRKl|({ zr)TFZmn;7C&38P1@e1yCc;~5&#c6S|JLeOZwMHvLQ1QL2Qn?+57#t68(OTT28Gzg~ z{L-IHk(UzKj%}JH_z)o*8L>hTcdJk^zrB?p6Zka!vm4_oAIi^@0`Di{o!r+E3JyoNEGblDk>>vkW zZ1UT24}~iD?F(yg_I`ZGE;kLY$#K!YJP5c?k;&Aif5RpJa&1yq<`;}6$g%y#=wUxB z<$fBfUwT2NeN^|zPK;6L+so&Qqw+PZCsnXd6t~R^UV>vkGZ1JnRMg7-%^w`}2W z8c8^Bno0`YM$u86>!l`JGO!z?68Nw~f__YTOxJa^y%X>OlCL~Sck&M8voz0Unp;;E z8B{RV`A98tV4QpRfH0ghd2L2=%?&-4#qeP$L=17rgAuPs7{EwvE36W*k@cjk&nW^f z<&qgRgF|l6mk=uF5b!ETC5XH&<}>=<(bCfUfHfK)B0f5zkEj%))l`=Gtj309#T??( zwI)P`bAhUonADi8=icRN(W9cgzlq3L$6?GB>A};Z4%3hpm@G&$?0B+JB+k0RjBEia zVvWUG!`0;ltL2=ksoD96Z#!PSy5;4Im)xz_biIR^kZ@IFt!3ACT;JYdHx_iod_HH} zMmAfA?KR4@^zjWhn+=QkoXTo;n>&;OTQ@9b4VA6&KCr&MrsPKuGKqD1E@f zvES5$%Qxjc44crXe3wlt3C0+UPUadDI(O)1VHz-1B2b3MNpRylkPBX0{M?& z7%O5~S3uqyWF;wud8wD_$sv0OiFl?|oSCsF#F%#ZBcM*M^44dS9={C%?U%?qY*EFx z-bHb!DF15-?+G!Yyie;U44z17qw!;G*&`=LqGJRPk+`?V9D~mx>He~B8>`Vlyo_EB zmt>Wd>zU-b2;|~oVzJ#{5n*y0g^sB{L81gDFoq5(mBdyRwx6^QtI-ppqMM3$anBgz zLFn3tP^HHr{3WD~Ws>XnGkB!35kvxW@}8dRRD_UJyd2V!*Y&s`s9|%DpmK4-G`oco zZnf05h|Wv(?SWM(8x>W$e@ZmgLc<#42vkDa2Sb$FBr-)g|KxDYrO$6a8;^Vel@!vL z!m*W7iLOgrsM##RR(e3>uo;wyyioIkAr3_d zY~LeCHj@p3>#ZsX>llqGFnY!b1F22yeMO0D^dXyHFaH4RTi77QYvHc0oL1& z{&vId-Hz9s-b4mGx%K|jU|NZpA1 z!5~UHSt8_05*5PW$r!?%k6ceHDJm%JNr|m81SG^YGf*XvpcwSI7>DV3`!gTtBYS&~ zA|*a6f(8{vIZJ3%be%As)%iMu=OY2x!5|sqD7(ekp8I2zp{d2N+%)x2?FDX{@vdjLYuRje+}_^NJI89Z;^O?AKw!IF zW2`2sh!2w36oX@Zd&6eErte!GJ^qB%a>Zitl#BBf=a*;r82I-4A7LF(9-McnHi-e1 z&~#v^Tzjm^rZUE`KZKY2U2U|&mkde?UnT{y1RMANF!@~(QGR*|E6N3y3ZKO|Ja@K$ zWEq=OvK}_c<4MX#$BVtb=fa7@#rQ~?OaVUj|5A89-mh*&o*M{u`uN@ZzVu{kGXtj{|c{CdIMB;OBV$m-wS-?H8&nqWkuo-ZG-1S3o{9d zBcAyL7ze~qJe$^oh4)xEmVyHh!Qa}yr&cPUqrnE`4A#lY9T7W(k|ORz4Z-irN-TsW z4vO9o!ZW$2cq|8Rj2fYXPUrf7fYM)v>eTuBgrsDaPa<-zk`eTyB9Wi5Fh{AJ=;^P(}R^AQb}6Mz;|d2b~~i9A!q7%s9Q=k>}BSduQ0oBPzrG zG_t3OEQyRoQaEs7z#P6eczj?mfTaFU&iu$^b2C;mpU;`in#B0!3Bf_|#1s+*k*cb& z)?zCw%vx3U5=ETrX*(ws&IQS#&`FQU!@Mt)Dk<}k7$i4>bUw>}^4u|$Gdf0kXq~*J z_vi@uS@U>^Ax#hCE0HGYJW9aMD3y%KDVjy4^__M3DEmP6e2naEsgFm1R{oxN#u0}? zAOR~Wf+>1?kZR~!W2y=Z7}pPVSA;x3aDvhA03dze57z9Mtk?ru9tIzV8A?JS+4&^C z=jnu5o2|imqW_H#D)l#vt+2IZ=C@tTw%yTq$9YwAbF<<0`i8FSi5Mb=u9ZSB63Z8G z&Jle`6mC7%S}rawI5}CNw89gK+Mp|8!xB0*p)`Yl_dUUDX0s*6R5&k{Fsr6vb$Y_p zMpgo2I_1M~cJJ@rgEBO}BX@~n zHY1kdnPb1oNFymsRw}JY=}!CmN3jQetae~XeZTMNAusR#^;_y;d0*Z8#Aq}eDo^M9 zhazk`jIuGPZbf~X5SfmB5=;c$cMe?jGFVP$++}JxmvV!I@EdaVMo@j4M(_dnem6X>xkQ5{%0U_lQkD*pM z#5fluR3yEpV6js=N8hAtDbX!eDO(v#l{(Atw-HE5s)k2-Pe5{Ulv2XFWi8GpeKw<< zR8zzbZm)EZlsg;eg;u^q<5P&cQ@B; z!Uk(CbzS4Sj&3XULpJMMsz$hwfAz&vwz~~6I)3=^8P4_e&f}dUDp`nDij~mz0;E(D zu3;-zDk=I?PU%sp3n`|s6r5}g89zIQTIJ0kTjR&%<*X#;xOWM&Vx-*-ql-6Re+;Q9 zT=KaSlVeK_alH{yScP70zb402X;-8O^sJbTCb)wZqf|Z*!529n)4!vpV>tQSstlrYy zefb%2a|(TkdD=>unwTF`ymk?Gpu}=G1k4nBvg`{-1}J7?h5`uYpMimFw2=N%Fy@_r z=>wP9zBTsXvQw1zFqs>`av4_j03jGVl^5f^{VYR5KYWiVisU>xk=v5{^0*uv>`4cY zdMK+gPUm)V?W6KGHAJbsIeJ&iYq?y*`3xRG-4XBPuqTguduj2G#Nm4ZQYGrK4p-ir z4h$_;6x2v@Mh|+y-aYa*HW=h`wd)ZeaKzKQ|5|Ga?V9M4S@BJPNX5KIjpL(~Si$oO z$_n(LbA}N>$*kmmN06hnp{Z*Yvl*4ON!EQT0__RDmvDTou*v&TRTU;NZiW!(`VQB7 zcDoiABF0*@v8nF{v`TeHL_qSoF)4A~7()!8jYVrkv=$}}88?QM|L-%I++ zynwO`HY>jjLYBh}tTkw5hB)l3B*+p-wwHvcm8t0ap3T;=**N-s$L(FmcDoZQ%9!KP zo?c5?GB}(o;5V{6Pnox zU;pMSSrEqJ`kptpcl5o(;l({@#VcdH!b5ViF=R5Sy?M$=c>EDXB(oFjD<)Z(M)09W zM63!|oUDYAl_*MIe29q(G{snEg)_h*mFoS9v`V{C*>@B5aHtK>86xCRC6dxrD8A!@ zogPR-xK&Q-pL|vn)BVpM`+a!52ZY{jagM};RWd&NaV4Yl`V{jqy!Y;Vj$kr>Z)#-N zzw5F0hIoSi5woI0+H6}5fTHX)le{QpbOdCizDwCyW!9;K;T}k)R+Vc z|CZyn-roy!Y)$RUNA>^<7*r`4G#g6B{KmKT3<;aPzu)iO^?UA9{+sWY!ydHN2(4rU zEY?_7%caE5&t@0}vsp8SDk@FWG{cDECYan}K$mBBeb5RoV?iKxjO zAj#^fCt1l?Y5#brzQ6ymcMsRz+#VU=eE{^fIXZUvL!(hDv^%kQAL;4j9!Z{3e{-~n ztlRur^qhnGNO9cTjL{e?inWiDlpF@Yaz9WWj7Y?xud2|^Cf~XT(C2+DgN$+y$)qB| z$E?GX6YukhBwmmah?n`Ad5p5sl53q9ZLrodYZ|FuP8`8UkPxg8B+RL%tx~DjXh>{zB=U~}WX1H@89WF;OZgmR;G2#7c}#Lq+&IjuLZXt4(Nx9?ZEq?t zXi|>vV8cxco18~7SEck0e4!b5n?yzthNh~itBS>9&T6^h{NjSE%PY>#HS5#m|X z;GAQ?G4-QZaAmb>n-cehRtqA+x2YPmY5V_(8PhyGaDQ!#_b`- zm3KV(!PkF@^*b6`M{~%br~SS5pChd_h#Xu}B~~OO%4nwB^&uM^vXVctF}+Wl$m)^f zBYQ5qmuJs<#nAU;z)?D;B;;dZTm=JW9P5e|PoF~1^$<2>tyJx<>Pi4jkkBh1Jl^*x z!cY;tX=co4Gi+_C69CB!P<364?&%^?X`0!LdbSWxNCJUMYpMCRTw*JWcb?a8-q7w^ zl+tuv$7Zu(+qI&15Ya(lB8f>f;GIXf2vq_#l29IE0AEmIXAm$9dG>I`6k?@QU5@Ge zNZ4D65o3B^V-!@Wn*3luR4}p`7Qi#T3W*6RgJFpPR!BvA(WNW~W2M4+Wh!j61P|Uj z@rZbN-YmB{7c+Zi-T^?er6k!d_BN(5d@2qAGC|G078aW<)y4?_@O)lNu@j@DleQYh zIOhrk3WOdaZRfbY+3@Dgj?J!Pz1by;f+(jFUNJ5Mq3w-SwG>+MK|-2bst~GB%;rmW zyB*H&B-uG+beA{7NEbY<^K7;|ZtphS+^xB~yueyb=Mh z8+_L>s~eiipa{5rMkDu z`n=h4SSVeL^5wlRYLg@{PU(~Q>F3@r+Qc<_zcpVzlM@k19*i=4>Vt30)D|{+#HQo~ zjFuk4U~wn^yydw*rgHpHh41}f^iOPsck7{TA7CGjX<834tGs(4?UXb+ z{HOweynP-Ai{wWGAB^W>I^e6iqG=jdt0gBVD=sfCxH`Y!;{2Set4mg^6;0i+I$5!p zFVIF2oF7ARwW6|??QYB6-5sv)@O_7Cck+7>u79N!x3_Dazj(p3=g;}}`|tVd^Otme z&vw^}7rjtk#etr8;`d+sG2XvUr=e}{H+&Cma-x;sWBm6R(EmB+e6)zmFGC$8B?U^A zPB{nJg8=y9OZz!F4{LJ$eLD0b&!&X+EEME8!>8k@BMhVnd*hLfX~MFab6m1@KItDx zX;X<-R=gTk3f>`w^Fjh2F?y6rq5O5lY(8hPT++;>q=K;;TUpLdPJoED6)|dzt(YyA z%oj^+RS`p=-R*WBFPaC37@*S7>E9)Px&Vua2k zh4=cNu9sRQnJG)E)0^SURFuZ%aYxClR|4MB>tcMe7i|C%O0K7qSHv3mF=?(wSxvH( zz@JKCWjSoG^g89whwQP7B3~wEpr=sUV921gOUj_cbEyRFcn$pjQd zbqdFe^f9FJ3Lat)fJ<>`Ss8Oa;#|P{WSxkDEC~|wBQ8gckAdI<&bM^V;XIt2EP#M_ z@bcvicefjEuGc)exWo`JN-=L5R*M;xR;<_8#ON?h16JchkF5;ryEU`J!d(u z`0C3qxW2ii>m09NU9;^vLhyomQxQP$1Q%1)T8_3d{&ZidIc9Oo<9T33pD6Q;)?kj% z?v`=>S;suIk08{4YIJsucJ!op=E&eIQR!fcg@-I}a_MB^_c zVOEo#56Z$wv0Kc0R?dRNwGu~fo;*u>z{%_K>ZunoPDXaPYkJ82?2S?hP{r6xo?A_> zbnxWa$Rc8K-vPbyv&>@#mAH;8+~{2^YDz46$W*Bt&6kHj~{-B_mHB1IGPL&Y~kp;(1ABJ!-FCk^QqV{XXbrG1XCugI!JtMN>G9 zwm_~2QG`?8`=G_!Ds@8Tm&sd#N-Rlw;zd<3iPDTQ4z-4qDr=z>If9S?t=wy@ogvSY zm(DP^it+p>@6IaKz|d4F*S4xfp$)-<7kx5B3A?K5ip6Xup1#!yXJ_Y}o}O`Xx?-_h zuvpA_^ym>v>r|>iqqL=NBplJ&3hzC;?Ur`8!)VQX(J*Uf!?WGo-0Mt9MzjmaU< zDPAZW1(V#2G8YG2q6VfblXQmS6c`pmo{K}k<4;6lWDjW<$z z&uSwqSX!fkm%1py4?3I}Ma9E4@qi4~6iWq(>484^e3XoNV-1zo)Rm=a46~-9X(}qK z^K*@gtapg}DM&JS=kQKw#9R#A-fejE=7x6H(R)!vl}fR3-` z9(LXBpfr=bBE-ZqxFJC?n;pzqdes=DI!i_|I>dYI{5$kKhVAIQlTgj zF0BnnGtX~g69EZ=7e@y@)10kgnxX7D#_HgJizB6-qoAUE1?@HLp z6}qT!E8$j0z^5J=a?&8kS}qQCoeNc_Q)%c|UTu*Pe1r@@R6+O1z%`z~kJ96~#=YSi z)^?oap2f7$nqlKA4sfN$fA_3zWihKv6+W(uqC#_w{VZ5W>S z`OZf`ER*nofi|0QER`CpyZ3DhS!pYA-g|E!`|?BHvf>ce?W06#pP!!b>C>nD?mzvO zUw`!_UwrX77Z(@I>lu|XDfcl0jHx1`648O|Dx#9C=1W$K1u`KizTW4bp1#}Q-G*5e z`1J89tNKg6czVgp7jO9Sug`gTeaFpu%iXr6b%C8R80G1DPZvaqA?>k3Fal-XRT)M! zJ^A+uOZNBe{ZT%KK=#nhk%^JFa`L@^vYeAx=0DsQ<|rZG3T5{8UIffaFG?Lt7P0KB zW6`>yZiI?xlg`sb;UClZIOhu;Qw^WL@A*e(gT&?Mu~y1pv`uVQF(qG1orm%&xiYy1 zV!@u7@11p)agUC~n2SPYR?p&W$U4bjl}Q8#T5=fb$}*od34B0^o)A3Fb$I8om0`Y^ zbAEQl#o0L*7Z+S!U2%4P!Rq9c)#)i07Z)s6E9UdL=zYd8A~4z4^Mjn6=d~M=5HB~J zUzgRKlhezT6Os!)O38}8?*^~OcDLo`_J*6=Yu4*KZm!?(>eWkLy?VuNx1&=E@OTeK z8w?7UbjJ+1tTjnWLYDkO{>DPHIZ?xCS?5xaQVqI%K3F6SOLVV*g(0SSRN8>w3&Glt%X=I6fL!<_Vw7T5*DU4@ zb)DQjTA{L{7(BTWa!86~h(Q)gLhb9@J9fQi)An?oBc@8`T4}uRrc35vlgS8qPiZ<5 zBTxENR)otO0NEq}74eZ&jT&5^>IgWQa_=L%u9qsAQ5H{aJ3wRYDM2@c*zx9OBeZYI z6O|+@dGGo0FMpx$9ZfUm;_{5s)2|^aHoG0WzT@WZP89tZh(UmnO^TEE{@{6r`FhX8 zcWQ~>-xQ}|H91^vcrWB?o}m!VIMgMGF)xqlH6sko%>Q(lh{$J;0@sBK08w(DyX4 zCjB%I$j54fAs{Jq8H{{LCF#GlhqsTL+X2V4V{P;RCX|BZe8K7Jgs;E;n*aO%`e(lU z)fb$umQo)<>!eg>0HTHReXzZxb(+_9-ZhmdfjMeHr!B^LN8fGfyA9oLP2cYDz9Zt9 z&1;scC!DO7Jbn5Jx9bf*{PhLjeE$Q_Uc4l9y%a7q$w(0*eJZS)>xk%KyDydJUtWes zttcBhQse+R>}Av;a`?>qwsE}68dw>8Osi(gAF_cmc_<+5#c99&h~u=+Q89+%PXYGO z)tJJsl*lSd$=O`!GO_qvLZ8!dRN?`sD#NU)sj3P+BwLS6oU_@Ci;Hs}KYqj~PoD7j z(POS2KjP}qBbKXE>Sl(vwd8r!Gqg?MP38MTcE5=MB#)_hc0{4JiP;fMEu|c^vcs8> z`#wgFNWyy8w{)(f>soH!yyly4{=^^u_(y*H@kiQS$7Zu-v)SQ$H`EGN8pc{4sSo+= zOT8TSoK#OubbQIZHaSULA$3i17Q4!12@2=^@RMu(`CM?k^@FiBrg_lm9M=kLhy*QO zQ7Kj$p%2tnqs?fji&_%zr5nP+6`~&jh)Nd5vgwJ{@K6f1j#Ng|)HU;Y!+h3YjUpy` zT943kNob609(}4XNwQe?KC<5RyuQ9=y=!qEdMCvP(TBpE1$z3%{!3Aebj)6joFr8! zHiIQ=+&r>HQYjL6g1WA0W;G`#OPb0^rS*18+jnxHCxzLWp2`ZtfYlMJBR97jf_GFF zdcVb3%hlxx&Uv({Xqq{@&U1Qp!Q&@aeD&)uc=hHr+peYS+`u%2F^Q2u4@&p`cuc(@ z4?6_!Q`jcsoDL)J!|45p9Dt9!nZ#IbeTUZ-F6#S1h(AJ;8vvbvhHS2k_YEPT_@ZCQ z(X-e(i_qjo?FMBOgqe z=_4nb<+>G$#H=v@#XL7^PZsk13ahh8A&JFFm#t({NjSOu2|$>F7+rYz9?;%q$sD|R zna=kY1|XB#D)E?_kz_r|`;fb^S3ve}?dNYXh(i7G#`n;SqLd~QFxIk|&-vAtU-H}E z{+9ptPydCdPd}llD-6P-6Qwv8G3sRCk~4O&iUK-jPv@AN8WF>h3*zliLC!trcR1gI z4lGv-!o0@$1;Kl4RnhG_&aOP|u4TJvIXye!;$p?K=NJ6&!*ia$xZ&n*!`geImh;wS zgIA&UC#`Tg5`&_zrtcO1%J}?zbCLJgT(_h25Kh+QhXDPUYoH4VQ%X^Sk9}~Pb3U?1 zF2o>bv=77QK4wl`*;FF&8Q<9ZyHaDF3YA&=_AZjo?2zYraLbShjw)-Y>jrDf%z-d? zW~{M%{`u$p?sxx*r%yj+wOTQo%{e()aejWme074-79Wv9gjGH03fhRgnZbNR+D8+( z@gdR7W@fEKtYj-(l$tq88!@^Lcv(rPQj8d7nAtfqH6sSk$#TKtN0dGc3c6p|-WMfngT zT`FJE_72zkq|=i0z06jnCM7dQ9YpG=G?k%BIVRd<@Efqo4EZNPg`~^KPYxlUct8$Z z^GDtr|0ZEdO{G~aYGzG^)yWevD5xHlmKDgR;zBR4mBI(O+qS&EzUAg_!?x{l0Z>+I zh$O4(eh$>T00_waF$}SW`xb=#XA{oi?1D~7GC`75VnJQk)OAJIwcrCT6}bs9 zvTIx3#6V>f%SFvRHbfN0T9&6v&d)D}PrYl|wJk4SzT(yE806Un5m*R0H}XCOK=X^7~gou7qTTJdm?jkIVJx*gZCo?wD;@Nk+r|S z?)P3srgD%7Gp0(wz9&NOg;2yo_R1m2&zh8do!C3Z3c0y|ssJJhoaQ7(J@zBUeWBX3 zq!us7XPU-wD>W7#EbF)YobBUV&_8CIj%jHS`M2KQXHM?hOHuD$>hHWZht~)pP}en& zA3x?dzxfUS^k4o9zy9h=uCA_F&Kn30*SG98Eq(8)>zY|J17krO2zsDg&G*oFa-9Z< zNr`gcJED_1T0{@gqi|@AFkWhnt{MQzNzjJcE0(JjuIo5GTky$~OKz^8^4Cva@ZI-6 z^6mG3<%ef4xn8%Z-qQw!3gDxXP~?9lO>#+-;;#yC>Ugj`*!yv89wQ7itU8%%a>#hz zld@bHBdlXaON593#E=+2#~MfvS%2?_%jpwuxzw;158sHBtzRikRhfy=az8XK4EuC4 zq7nyMas{cL#4IshAx7>9G3iId$a1;l{QR8r^9wF7FPY6|si5@?W$Tn@obA;0kwZk5stRH6{>T~F6;Y1ssf#nJT4Y|ufuwfSnQ|>U6(M;5&=K2c zX7!A-(>bfga=x1L?D-4+^4TYR_m>~|_S^4y_3AZ!@32;q$5qU1Vu#}&W5lG;C~NE> z8xlz&A^ko?To`pBA7Iz@?7Eh=?df_kj1$L*mcH*rUra{u!TzUG%^>6HdrNICbq#_I zErCc)pt1^WQWS$vM#(hCS}APye8_@2vfz~(EStFlN(nQ@tTrrWHBD0?y1OKB2j^uX z6^VLJz(-Ipo3H47q-{GkyOuY%8(!aT*tIFv}Q$C~5)#H+yI36V`ir9wJKtWrhGdvuw% zGQ@>y#5<&?bE~x;t=Ew;41j`znq>ALGnU!GTU&#Wb06x|168vNQGLHK!>d=D%&ud&m`&4Rz=^%ydk(1SO$>YaQ`1^nOdw%!3-|_j=PkD5CA=IM5 zp`#}{M{vCua60fP>$VN)d~^HI|sFG#dl6#&Ei9xHz42b#}s|i!&ZwoOAX4GrsxZ8Q(vDg}dFMb%pZ* zr_@kETh?7UM9vjfxqKX7W8-z+En~S5uu56hx=_SWOl#=e)AHKSH8~Edix=y4 z3}c-F`fMQ8Dalu*oI2O{^j(J}q??H`@ZgLd-VYEo0iX1OF*i^%Mp}5U#{DB1x~wt7 zS`fWr>?7pyQNw%2m;umcZ+Hf_#FX~z1E`1|3_$t&P*nmxtTm9Tfuj_z>o6$1b11DP z@9D`WJi54K-Zab?OXiCe&1{aXn^d&g%8yb+ZK#dZs}bff58e~{j_7-c5o;6KKo9`# zyDe?M#XAGhp<=*l(7Gaqw5D~ASOBriIbv`STsqICsH958k(ae*@#u`J(>b3%zTm6R zpYqLTPx#}XzU9x~e8*<9p|&P@ex!nU2w|eb<~_wKBRQdk{m+;RV+`JVx|EaBwmsX< z5h4He{KO#NcIrNFjf+@B*##*v?ZKN7EasN)Z>D3WEzC<9b#|-kG%) zLd3aN&OUA4MZ-I75s3#g#?mx3^La&68S#85z>~m7Acv1c1V{t~jJV_m+P03DuWxwu z`i9M}7Y=EqWGU_~8vnkQkeE@pC=236m|jEgo$h%P^WLM3K`To%0#tmcXqqJ%H99f~%F0bn zm{0xS{j9l@>kh@N@_porWj~LhI0+Fs%v;)@PbvO1TDi{8&bYk1WVJeFHk%1uXwzVf zoho?2(f1u)*A0cycI}q!cFTIbrfplc+l>@S^Dcdd#1u1H+;TrgkIvr1>@!h*K>HAd zI``$HD&rGC^6$fI@$apTA(CLMrmkvMs})~;^%ej4kN?Qm|KV#sfBFf_*_@_nAbNV| zfk0(6x~W8_L!jT@;&yB1^BFNIDz>sHBCT^=zj-YleidmNi?N1~&av7`02_P8 z?esI8cW7s^-!i7|2bU~7*@07%Aa`FzdE1|QJOK^XgR+n;3@ z$%gKxsin$!_Vf@W-ynFr5^s>LDy}XsIXgRJv0QR`cFt_E!qzivRbwiP))gpC)P{6? z2?#$lB35V! zo)|nKbVT10T#pa3CKW=_=|ezAkJW|-&DF`A)$A#cuP(SeKjZv#$@ky?z}?+ilHC3A zgEqz};3J1C7lx2*=<21?JJdPfOuiIFh9|Hz2C}k z3GYK;nJv8@66Y`AM63nDPZj-#D!1k5L+}_q;$Wr4HCj$jdF`0I5b?lz9L8|4a0pWV zHMB!;aR@kcu4f69$9A*f`HN?~di9Ez zFJH30yW`ExHP^Sd++1JN?sh~CDg|fmdm%n%3t$*m3P4J%Yzx$RD0@f;4%LS&=gy>KzQd=twJ?)O|TF$}Qa*ij}r7#+! zD;CQIyWNiG&z@6N70pcQfqCa>+ZJ&h7{KuO>VnGFqEi@){^Pe?ueV~Xe4pj1AelOU z)_ItCl(VrVw*w>BFV`~Vd@iy0L{Tm)X{Up`!kkBYwRRTUtmGsM`N02~*5>H-?9XdF= zcFX$q4ejO*ts=f|h*gukK7r^RzTdIiZctmv8EGnmRsmbp656OVMDY>^*WIr015Fk> zpBAA~i7qvT(qzhv^j_aNuCH%-_2!!GwiRVj*qw_7v#0DF3ki6S7DGJX6_3OJ(rd*) z(d&)Irp>$S9o~tu-R{Iq-M1}Owd3^UL_&siM61B8sZq)hLr+sxEao%%w#9kJ_028L zIi7y{30GH7u=WkFUcYAB?$B+^ci(@9^Pba_ON@rqa>0B)rwNEIAVPn(#jUeWi zinY(1beY#sdO9XGD{^w99yj+g6lAQ;@%9aVu$C5qS#poVe2e?q+q_=VgVMM1&^7{F z5HMR;6;B>N;_Kgh%~!wv4WEAY85b9qtWHia+N6+2MO9h$!!Xj50njva0qSSxJbLtq zwiQpv%a<>C`T8~Ae)}zd{_~%C@!}b6*W&T0m}=~VSoCcKem}Q0^LY5j+sCli-^?tRR~P)p-~I>w`Jexp-~Qd-v0BYp&gT-Z z9V5Z@;5|AzyzB9O$8Ni3eS5>*>sPefEk-EtcWt8jl&aFNU%i9~b!~9FHCk(& z?+Kwtqo}op7})K0^v+Sw8tP^)ROl&uMr%#icf5G z8C5gGRx^UGFwLAtr%M*U`z@!-CI9pHf8hJ?e_(xghYL|io?@=QlEFuc#8FZdG7=@k zDh9T#W4-Ixb&j@^`g`b{cCUvq$t@25s-&A2??t3^0dgNn!jA+eo)%P)`LSZ^vr40F zT1zH{rv-=2bXwWck_8JABL>OstD&h4v)a;VMWs?KnwLSyE-jThL~^$%rKq%~53t^K zTwkww^X7)ldQ0yDA({bnq$Po#poPN~vTw9h!{^@C_O7w_Rt(7^uOqSVC9Fy*l(JIw za^0d;4XIemo9kPY)-2{tN_H|BWqA7d6Z)>>#~**7-8kXVM$>klzQ5yt{o&92_V2!? znJsAh=e&6Fiuq!OwVJb&Q=U9}!hAO4@y$8Up0BuBZxB`tiMv)oNzt0Ys9ksl3Lvnr zV3+wHT1!3-DIrN>c{7?0^0&m@Dc8(gw!&+hN7`sT=8Ax@qm*Nuia?ZoJ+R2cc;E#g z-#^byO!JduB!qD#gj^6T24YmjA(i4tqoT0;B(YTc5qSf?xqlN?=|4^0>PKqz<7T>J z;G=kQ;t=^;+Brv$N48`rm8kgA9wCLErl3NS(?pRj!A)5#EI+}pUh^2MDOOWJD6R8l zxYV#{BAI?S$GMMVH%yLL=nx}?_a1E|`R?-Kf-isd1z&yjC7*rz8E5AgESD=zPS2=p zldM)_5v-W*XJi*E`lcWzce4g?ySqgNgAN`)P=bq>LE@8D@m!htLk|bq@T_SLVj8OqXZ9uOouF5z4P8;Nouq38 zPf!RYb+Oh%%7rSul(A5V%7fz|R3zDYR9+_OkUwSs+V@}`kWR$+58ujt&tvltL(?yz z(1Fmmw7ZSuaqMYKxiL#lC*Yac|QnJ;L2C*i^vn)#fr>v;C;IZC}`xm@Ccgk<%-Pc=v+ z%HY|HmsEAd<<$ijCujWbx4)vES^mfW{1eY!-OzRp7bSNEB?;-b-X3&0)Mr;SY&m03%AK66?3pq&(em{9;m9Cz2c)-<558 z%r8h=juFYktB5s*x~j3(CgPGY>LH{s!U!mBXl64`&Q5vq^b_i)!pDG107YwyNuH0a z&jb~T(W8}k23@8=!}UVi;CDN^-Hxu^(X~=&8l~vFj;`+suEXXtKY37mRMd+(tFtras})~; z`k2M6;rw*PAO7%1{`%KvC}f_jHW*{EM$@|<@0{e=&k-paW#xie=I4+FFXs9r$yhM4kbPEl_ z5snC(F^ZuX-VdV{qUZR_d#z^QFGS)H<4tynA?Kr@?H)V0w1W~E?I9)F@oNU3qV z9Tbwns*~GLT|6g=4&JFl3;XdwjSsR|g&0wpEMWf<={i{-DEmD2R}CL7^Gk{+KJxnq zC^9j5AO^Jkj%yU)v(Na|=b!QECr>$9tt7W2 zRky{MbO7d}Rl+19a@aEsT{1}|3+`T}pYz#gU+~R0-z1O7 zbK2dOfgUg?izd%omiqxj4gfJ9({T?+}`2)o>|>+x?0gx zmYLPK%Cd=$yW2P1-rnM3z@>t}UDp$wZNnxo-AP#SBZ{)?)b+ zGaW5M@m*D1A{zxK`|V&r%ij;=e85Rx3Qzmw8S-}v1BC2BDGWd6o2dP-G3-yp{&8%t z(~JyA$3{aTBqX+yoDO3Q*#H!L7z!c}26C-wW;M%`C9_%0w%uYk4OKmtvjPnRfl*2o zAn7rQO1j@|!S~>LRPf^AvlUe;dg%I|&1Oy4wu$vg;e9U*u-0IdPQWGLwi_aW-EKqI z?(nWBc!%$LT-Va=I@-1$$bf(~~<(yyt>QkcY zAbMWBc#U^ns-hDhq$K_nuH89L=N!APW7BqQ`$+F4tTEoZhK{u)--E3s9-YNZK^#zY zJ`i^u#7O964zw9-gJ?yrZ>J8WA>Sw!Aks>}M_pG~E7d0zW71+70~v@D-E24zQS_Z- zz1h;Xog|i*)Rx0NA2=o-sg>4Nl{l2elC0Tv-2grm!ldJZqYn=69V&nhn$y!$>e&Jp z0^8dgI@c3}!&ZvEk8HM`umxZV7&iZ#y*JyE&T`l=(LR3;IMD#XnEoBJms`9AlTZKX`)%DfDN=(rz5RiE-qtMxB*f3QXr|Gm zzh2upG`Bp^0j07sJ_`O_OmdELfJ8?|Sl6h)qNca`JnLmNJ|@nC>DWafrC2N%9336- zt6zP^SHF0}FMsid=g-br&Pqxh@%0*qB=Q9UXh~$1qE9+0f;vUmB5}%!r;IRZ4e%^A zSlAg#WmEB`qoWhFww#?lFYo7_U*THoLqWqQrexSjveK4X(YxQba}I6FD!x4-^1 z|MoBc%9pRcpfH-?dm!RNq^URD-P~|{b4jzV=(|SbfbVEjAQTot!eVPMDx#IZUxSmP zx_#H8&~(nx1xHo4gA>4LjV&zJND;QWX<2V7nx>`c997j2QP{GiTr8R0Rw!L?cUw`_ z6NEuC8 z&M0Ryj4@mCP#E_d(R-rrCF85F#5g~7CS7O$h@=!tHA|a z+p=ob1SihvrfKNf7VjM1Ih^b1+K#U62HiKtnBH?lAJ7iT(+XZl1=7UK=GWZZ+;M!q z;^g@==F25ty?%uvQWTb}YoY3m-V>rQr3Hjuxa%EF+flbYopU&^2tlG{KV%wr=WFk) zY1xLTaL&`j78Q-;j$1=vEym^YUZ{0qB1nnZq^hV=enmKj^Nph#7cU^y z!kvV~s-~r`I~lMjih@FIE$tuV#P}&%9CI#Iq8*FgGkOPWJg5SW$o zJIaa}JYDaoTk+_2onuos0;yJDZHY1k##+J2w{5DmkSZ*PfF(j~u|{e{wC#-5-3EiE zC}wQd8+-^H%?iGFekO`k)6%w{vOX7_RXV>|~Z&c0no z$CRCN*Z9a6$$6YPf_txPYShgnf8+iW#3b|Nj|73b2O;~kHu%r77~DY*7-OWmxiJH3 zY%o5uK@}rtjnQJD6osXnl^A2_yAG`sWm!Z}OEo?j412Gp$ zmdHgB6v0VRz~1-b{ML%1EVx}YEN@nv-QDs0iZf`i4J6<7H5o#rlx7 znmzf$F5Mo_oI3&!C17U{TdG0BwwEpDNDGn>_w3|^ufBZ4zy8bbc=g3|j+Qetfqs2Q z-8YiJw^`BkJxU7`Rxw);P!dK)WJruUK@o)lJR96Dc$AiEw8}{NE~7Pz#S&W-l(QK{ zQBus7Xj`If0m@*?Ifn-alx4}$(J^Od=bZlW|M2Ip|HQ?+?^&1l);W%*mn=entY6+OlmDoom`tL4#)w4ix@4CkJG~iU%mc< z*Ds$jvx>gifcJEOO2vNHH`m-I>u zPM)PKSuPeFEDu>&O{s)AD@Kj;k(=8Ub=|UDEV#P57H6k6cx5?1Ii~47{=+q!x~KC2 z7kAlvw&ybL*(*<({|vL#$)S;xe8&hmgYmIFx8wD;KAX?UkMv~xB+^cwqMT#%iHVK}>nJ0%5okc~rRCQjteNU?8? z(8n5^s?2LOFj@{!vzX39ogyUGQj{ge3OPxx`Ift_v`Wd%$x!q?ZL^`RR@Bvs7nI_)f@W0O-!gRF=;0FS_Id3xW1SCmk{uU#d}BBNuJeewPL+q)3yx)t#Wi-Pv1%O zqi!0SMhbyC?<6U_cl1t31PGD2vEV1Q>y*HitnDzirnp9>yKsr<7WU6-0lYK_*mQ36E{N6R4=N?T6 z(8Y+>9%DVW@K_VE$$4t0#)qgSsnTasaOSg;gXLT*Svr>v!HIV-uj1h%qhgBEI2UkU zDxOzOhw~mVD0J%R&*Ko*(NR}x`o5xVDw?(hrLpAg|kNeZ27Tceh7?(TO&ii?4wC`fp@7?c7kGk*R`utz{Y1*frLEA>0 zc;T}0^>LXuDtqFQl;Xbfok2NF28bq7XvOpM6J9(!VO|(~*ANl*p5Ubrno*Iiu4y+F zo7)w=>sYT>)OEvrx!`8C;`rc*XV0FooX;q%rI;-#7d^HOEDoev6r~=H`ag>W;c<@IiFwzE4I--_bO!&UOazB44(HNuDE!2$!2pw zRoB#QgO8S=3Nb!N4)i#BOLh?p_uB+%!gS?-OumipOF`14pY{>mI*jdwWI!uyX|P+S z(6lM30y&#m3M-V2QayNzZC3ZJW3h@M24TOc66f&E?TU3(;d~gQBR@;yW4pze3K;@1 zy3yl>JgW&=Q4((uQLyo=O+DnwK%`6wdUfkq*Db2xvMHRcQ-Vf4fT3WU2pKw;k@9!U5G?wQngx* zG8WKypV^?sL$L39y1pBduCr$>W3uP-IkWkk<$)L==g(ho{_+JcUVh2p@hQs~(d8U% z3zX8#=QG~CdBbwP;OO9p<>G*^|NJ%YFD_D&pW&V-XtzZ^{{Bzc?$6%@SdsV3-%Ppx zNBd~ICq|QE79#{lVGGVrj#q~C0uerItMk&o|Ra4Gp zcrQhN(G7aFW?j`urf4=BsYTWMK+{#+tu}o7?t=H%ck}_e-le2*heF|^D2guX#Bi6q z=-P5{bV5*;)<>GE#)Uv1(%BUg6=)e5&e+GB zj7Lc0G<^n@y4(<0PUnR8NmF!LB@0D4Mkr>CWd{O!|lDXMx@cRLUmyERqfy zL%??p)#{F$%MaXKT@YL)wL5&rMb**=PhEGctBR^_2a*J1q^i90J;Z>u8f!E$dI*6K z#^~VP?TXd9qVGM{loBCnW$y?p)@Xo)%;uHEveKHq57PmagmR@YDqu8N9i-T9@2Q%O zbtM$oFPfIqlVgmsxGwPS;tKBr#uidvFJ@Q9r$Mf!BnOenx__V!)o6H3^py1Rh{6SE zyMWdmGox8r&CF`-^n%+hXU(Foak9!5bu{2n}_DO5V8@{DZL z*2X9y#!<|V(Foc5(X9xKN+11+jS%?Jk34#q61Zq95;8(DrgX^yyGoIcDU53; zT4J<8X~9;iFlseQWuY)xw~X^Vp#_9ov5Ax?@^j>2<#7F%n1`Qin?|Lt%1{qKH5 z89OfDf6MLF6|0+Dx~itFYU=fdx~}NF!$rZA3y(*V3F-Nzn5^tQ;k*xMqeo9#dM2aw zV1R@Wq)_U7!E(7|KA+Kb4P95`x}Njr4e^Cyd92Wd0VNch^K!<^7cZI3<{Y1#aB_Oe z|Ni&?&4-H%s%njkA18wNoOs$#Y14fyhjR8lrJuh^ul~_KspajefW*e8q9`7R%LT^= z3v~3jwn5E=sB#{qI-06tF&CIt8!Q_a>FPC3zvrawh+52%lEv0_Z`*xhSCCsrfnMDy}zc5k-NJ!ZSSQ%MvzhqC?$CtU5nO|^j#K4_9u|C z{wSfkjzT8UrB}yqPE~g8~EzG9lc;y@R$5iP_1~Ma_EC z(RMla!=#|yG^9B$>uE^v)7p>Uyt`0@trqWpVj7l`;(a-HKroCZDxyhnnW*SoKzAK= z+i_gZn9WOy+S2t6H@COk-rW&=U_PI*SS%@QNr;b{~9)fkT34UY=d zC#krfM0bS4UM2RCzzFGJP@;+j^NMutZyx7~QndvEBqjst9s@b| zSJM*@r%69Gq)`9Pb&|p`{=M(s_n&v$=#cA-uO-ir*5fzx>uO?vq`!qtLS8poCH9q( zw_d5~n2a8&-N?t7Bz^xGayWcsXGtxLb5Iz~7cXD%KmX-l_~o0|^y^zLFW+)^bH(a* z#p-rVTeq}zM_o07i|t*Ce5irl$t50fpZUax{*I6#Yc19Yq*1}hnu0(h%fiq$j;`rg zEa#N7g0d(%SRUbz&*;+8ix{f-X_)?B}2@`~CytS0(I-q$}#du;3e zWNpul(!qW2OxQbVPiX`Cdh&M~{Vd}`#N-5A0!3*#J3VGGD^R&uns@j<;9ZC7JDR$o z?;S>$6y<`#8=9u0?IJgKSJZ7o)l~Qpn9XJacR^uNoq(?IsoRd5>swaUhE3Je_h9S{ zVCa=h-n{#bHZ*ZIixW?sW?cY)Lav1rWYtm-PNo%E3R)A zEanBlJHpk9qoamr=g%mXhji-=H>--M3qnkQpH%z6ct6x2*yd?`SkCWmv1BTYGIN-p zDI953%GB0%5=4j$Kf31!Mgc*ot-UkIik)jd)Seo}+>N}bt=^0$(r~U0PR4YWPmZj7 zCQZ+sF)4H01_Nv>=JzwzyB-XQY3H@pslcE(j`9FoL8QKX=8EoF&omi!Wo=@#5($|5 z+xLB9UrYH0XiTmhLLfR%*Ho-;uetj0p6lyNg6p8PbhTqu*IZn$*)$!i^+xDpotHEL zrSUFMH#L3VOA*jA(MBgG*hmzZlrdru1Vzw>;Gy%8J_I@+=zPSvD0J6h%mEYUq3kC$ zt%DYWXfjdCKNux7;GBzezQwkV&IO{L|0R3 zK0_GiV)%C|-xHjDTzA9DVtZA?-~xOd5*Za3(VvoiP_>?Q>zH?LgkWW+7h@F0 zz;afwumwc~A3Z9Dp|3ILh$yLY9s=|}CXc?S=^a%k3D!cZHp3QCl&U~2~bK8MoA2!tQ(aQ?UJW=U2T{zO1Xws#1Lqj zny%}xMZw|05eEl{%w}^Gx#*c9i`d6JCDXi(ykV76TS{E6R6ot>2wC4r#)pq_s4y;3 z!6_zCrNx^1mZk56h2v>$YWd5bF&zeRIE+;yK2}45a;D{rQCK8$F3ykumBPZK)|URG zrZ@XYuXekoA36ZGm3XDMYja3}KsD?yl?n<@h52ZN#FYD?2Kb&%uLGzU2Iy7Ju`}s_ zFe>NVc`_<;fDTL)(rid<;f5y$&FYX!u}L&9SV60#H3i@V|Mn7QgFD{IgaCbsTJ+ID(YUi^9f??*&s%RcTX zP$-#$~NADfR`T=vVwL~U6*U@%u$`SEHeVXyy z(L;3A_u`z((dZ8xOw!)Z#`D{j1DivR2 zkQqA3_kbv|YRu{%+PwmrL3V^rxo)M!|cU(*H^ z{KRJ?>melJJKg#F2xaa$fAad7G{epwipZkMl)x$u&!95gDUFrnx2hxy6KX>V>ytL5 z3x%;nH`r?11CdewN_X=M(y($MV}3$h|z|^CL=S|g_<&(Dbk`f zdGT`2(cv-6#Zp+TT#s*C+RYue*O%N}N#WVPZK!QY*EYPrzGc(H_3a%#MtUEJD$=wa zps4GHrfHI%C$)GM-HfudSZi?JQCLk;23*kipy+)hdU4Wyeka^fK_jk~=(aaLxv|wfZ z= zz@&H#IWRG9SIhoPEeoo9$(`c|whXCURBx*|TO19q{TA&f_lo-(X5Ps9f%hM2lG(_Z zfI14|TqsJI*Yx0F8uLULKRdj(qui*mpk8jf#>dVV?mdI#d}MZ!iSZkSM#3ax=;Y*> zKm6|3y!qle%5_}6f6w>df6K+i1)I8Jy{@@kRWyyGOAw9TM_dr0_| zFl6a58FDr+c~(9{8JQz(41fOfpINO|2vBfGAs7^|zd;*3srm4qzW2B3(m$HCC&OwI z>rRXT9igyc)ZtvE-yZFGLzy6kQ-(FBvj@7zOg~r4) zJ1b|DvqPbC*WhBH>l&)2!3WP~y`l3iK^!AQD@vVHgEAOmE68*0OHj6h>2k1qo3!#AuRJJj;k1w zm+B*)jC?;uOvXWWxP%k|PQ(dpukDOrP>kXzJ1d02KS7193ShwFvUzF z?7GLt1{W2sZSl!qAJzubDG^VTj9DDs*HI!oR;6U}-gC3s(0f1Zhx6le7Rx2&Y|i1~ zA!Y8Y|vcJ%L|EiqbG& zl;{wXcUgXCWhgl_2$7V)=BVn9s_E#ufKTpgtu;P`^b-kz|59z2t>Q67G$NH?l{WN! zho;2)NN^q(dM++5_~tL)@WuIamdho}G)&;_DOA@0kUg+d_uzV5AVh( z58EtQ+v_MR)hdT2NCGIcYw7$sC5h)+Cyw&_>uw6A z$;QXNoR5rI-ab(F8y_)}a-D$aIXIm0o3CE+>tDRaw>9s-f6x23-*I_)#rvyk>ZYZx zTkh61ZRhB{2x=b!F2p4G6Xjxx5axb_i2;(U$4ZjAL8WmrT4S|Nw0;tC>?0KOIVb05 z{Nk5i^3|I+oShy)>{;JkWBZ1(ttpoWl=B6}Y(c>SYs3gib!9O{!P)sazx%`Q2|(X> zeEsLIY3lmkA^tbZ0fEWB-Bt+3NNE1-&wG#jS8Jbw=^vt{F<`Y}UK);%4_VGjmWz_Z z!#O&7*6W(hX3g#Gn(r^J`15~$$2V_3@cqR#T^l9ITpJ2oV2gr?C8$7jokT0Vr*0Zn zn>B6M(zU%1!;qE4z}>nU){;^R@1*7iNk#;6R8*9~G3qgGSrJcrdFSvCnQAqN^T61G z*{r}=Q8?SSN_l4rpNyOJre?9JIM$Y2fK(}uQen8ensj9f>wWqhOlZv0#X^b@Ok2M4 z;3#717`h)h+l^{`tPjIICdNmIQWNH*EkExx2r`5WKK|9|a}5#QJrVI~G~?cLme7TYC55fZd=xy4L*R6L2?pfz&TG{H+UbI z6*H+VBgBp7wqdfL{BfQ^yc+Z7k@FR1DbhsS3WR?7zd zq#Tg_WB1sYZYwgP%=~!c;|D|r*cNh`)_L;gj$y=(+iTiJ&)AP``>ym9&>reZpBg`| zC7Ls3zsm8EtpbB;41G_}2%#;O_K@-MSc_4d227U?ThlgyrW0z-&ZT7bWH|QD;etn-{0y0+Jd)v^F^-=fLX_Th^O3@7{e!-*-$`ME3lBT6@C&_)Ma3>aqIRJs(n#KGk?h z2#+7trXKUZxAsF4ij6h`vjk>^;q>H)Sz%DoVO2=7!L#0M`0nC8|M`~-{_@=i)>Tg* zESs*yQxFhux9bCU)eU{$)3gn3+v0shYlHKi-u2?)Pn%2p+!4XXjt2 zhX`9Z@9pQ`ezslNG5Q9RsfK*xcQ6_2Gi6tM}a8-mqG& z*;Ew~DGgCK9d#>pYnrx~;+w(a(mL&XVX=~;fx^ldko;z?8;sR>=O_w8S6{QeQ99bPlW4XvG^1^P*r;&LpuzWy@cQlAtAV z&4)#@ii zIjS%i7E@@B2wd=id&bATYwuZwqXCncB$x`WXwue1gDB6DxF$FF(&?OQG{uerWjb9+}&x1PEU zxW31EO&@~5uAJP9?m&YUDQ}F zDX+Ku+edXz4h(pH1`hyZx#~R+qJq?~n!dCXp^Y)%oXF-7DqQbqnvU75B*s9~RFpGI z-#eU-I3HNd7u0RX)y*9~?@whW)^6T>`DfbSJ^3HI{YzcU&YxVPK`3IUeIEX0EAC8<%ogg_vy*~;Pkb_q{MQ`H`7|!5lwbV6Nq?$5&2Osv4tHBCu^mOdCu`1 zp}^q!o}d!Ea5m%h>(~5`Km034M+cNz(KQv_W<$NYqiY+gOTBy|kgQQdPa}JzeiG$w^Pr`#Kl!(O`X)oM+=vs!2wFOVhNpjTAV{ zYeudQxypMC5p6V9XVz4NtzptcS4?ZfCs#ovAqYHu-+*oi`i3Sb<_CvZTdOgqAcnAmf(Aq%Q?r#$85STX=a}f!!{TnF&7|=qY#xd`O`5tRDN5_Y}dGngz{qwJQ{l#-;rJ*qL{O{hs<8Zm;)$5mZt>fly zMcp)nHZq^jsqe1%;2YX%Mc1r3K7Gb)c_?rYlVNWKZ{ED&fBirIi+Atd@%_d3c<&w` z+n+(pUaOJG=kI4P{C8?9=Ye=p;QOu-Z#ka2-mqG2_{+C%`G5bvf9L;v{T;U(N3=`q z?0~B2dH>;xz8#CVDWz~B;!v{pNVE|X`JUSpz87}M5Z z88_RXd;a`6uV205@NkJynr5?RvtF^jxn^~BNxfRJ*+`Z0x^Cz?M^us8IlA8AqryiC zMg$TGPcpzHfn4@Y2w@;22$1UANHV!n3Lk5g&arYqiLyHN83kyV5q{Zl!6I`XkMIQ zzylg~z2N^b-6QHLE5Cdh_#eJz~thsV5l{(|GignjewJknqL_e2&il8ORHE}_6eTMHo)9`>XmP%! z>uR2zzu^4zn8m!n`Idu&1FE*7sXK0OZn(Q$5kg=wpV1vI=$a*jmRSKtN4#q}I66kp z7eI;ChS_Y+>G?UQ=g+vhzD|cOe0s9$-=h8OJNVzec^@z_JBI?U>*<=BdaWo+OV{<( zjic?LsvUQ$nrI{K?V7sls9HztrLF*s&`cjBxzx8iO@^T6`|oomJ=XGksw_x%oj38`#NF=)iDht4 z_4EAPx%lg-+k~BSTOhQ?=me=VDPJ)qP{&lVK&fF_NZyAXB0>WyVv5d4q%>L=6h+C) z7cY4A>JP_ElPvcwCTL>*nMMeeq7M0Fyc zx0J#vg^v-fCWaoPaip~{-gVy5`oO{Afe<(#24(P{-vr5|FrU(7?@Ml%%?gHAV2#46 zLCgruAwgL}l;TBA>!=&y4)6Mi51Ke6$nLj*2AEHRDf}E1!oOC_O0x<&tLc%)F@{*J(uzAOeW8!jqh7%A?8pDOmB zZycW7%{>N4Ov!1}Lu>12U)Iy#L)+>0lV()qFusVLY@qFHPckY((ow+tz&l?mJZMg}Ssf7<-8Kfx2pG+lCJxu6Xy|1*_XNg)LYvXSD4G=MI?_ zmU=OV5U4jBg6mnHoM6i(rkt}}E;u?m=GCiLe0YDs)#VglI~vB*ahT)`bg^{-gj)ej;8O$5g05)!{DV#3Zn}l z;oDPw*+e!>^E>ykQvM{zz~rKtnpB&Nn2_@=J6ODrgy;#K$PTSTDoU3gkug^&C%O*T z{sG3vw#>W_O&pDn%ux9GHuf~a|LI@OzmlP`69pL&6b~zmVq|PP#BwxErQEm>rc~+E z$m-YxdX?A-&D6dP>{9@DjgM&#Vs?yX`^jicX-eL_5$3B9B6ZiW*=(pb8|ux5&1S`B zvu0DR*=*L-O+%j;*XpLF^`58;NjMKdfMKa7)#&DudwM2SE>Z+}YHWGuQ97jlbdCRU1&yqyD!=H&iUV;6Rf z-A`Z3`-loY)^gm(tB6%fIEUwR!?LvK;2}Cm3JSSsfkqpH>wD_nQFRjea8dLH!B6VF z+3z6CPr-*grDF#lu*mN}kpJlWp95mED|i#yZ2Jz6$b^_bIQb4rlBi{Y3u9QJHQI=$ zwa}VbIma3+30Te%+m<3mP7e-vc5*s;Gy*6D9H4Ic8mNy6nV&OBKI-vyIV@(b{MUq)+1#z*$RrnmrmBTrMha;zPGNo ze`4-i{*SSd#1NCRuTq{0+`|K@Q84f3d~D}_NXvx-bMY}9Qf}2yS0|U~OkOpWya76M zsK&Fdnj&ZD2;z-ZN$oAS*k5C0>ZhnQLm|2dG<{TcB%u9ndRy;s1 zaJ#CA>I2_@xMn#oF^D2|adE}P#TA>i7$7I^h!_tjbiry?QP>r(kEo)eX)0dOb8z~M zs5QEt#0Y-j=t;CxK50xJxgoXs~NTq0vYIgvTf@+Vt8)R2|i%e zxw_X5*+c;`3xJ+GWCRh z%J$suYnGKc@m8TxA%KRWDA3kY&gY;UH+LJ}y}#z&H`Fc=O+nj9?nDS855?vk zqn^&WK|vh)ofUUAI>WMTlXa5ED@@)WdMz?x=lRsGY}{Q9aa%see5JGpqM9bEr_ZID zBfu~&!HW|_p@mU0F#$%c254DOi9rtrkNty9krM1w@&Kh8R51<&7aF_c7|BvP<+~0u zJJtw_iMU6OSB*xG5mUx9GPhiab*iB5u5F)b$g?pp9jjqP6J$Qh0WozXsT7%jiD!P2 zWN(a?F~&=NNRazjE5)Oom+JOXlOqmQy|Yp-=UBwrY2-s{%N5p=YjzQH4HPJ}0b|nm zPLl73pHW`AV_@o8Gtd zSX_!e_=HT5t(5AGg~^72w8j{mX?X^+f?$4EpcEc(O3H!DO_ zi!t?%!AyU&-BY2S@SPvmc7^Odp?k0G>JEdGH{?w`l!87=8*+i5#calMHbciq zbdKPpf{a#b*c6Ja5X<<>fwEJRfV^vzcesCi z(ss>C(9q9(5RRD7tTc_6*{yi~aRc;+C|Fq$A20jFu~6Iou)!F~|Ie=v3u=20O#LtE za>EOIliH_(Fd0?5_unfBc3zu$5x139>4WckwAGxSoN;=zB)Fct+dFPv`vlDhRy{(T;FoDS_3dI zOJ=5^cRhEvx7^&^P*)XNDeA-wWKBtVyFn?b>0`@+w%K6knsPZu7ZT=Q&S$*-;tO8A zdd1D{HBGabJl~}Aad$uLvlX5o|DGx$Kk`M{~xX&c_|u@EM+M zckI&Mjp;mo>K%4JH|*5`Mwy{6qWFXmS*@1TCn=bs~nO- z8bTO6=j@paVHesT2@#phP#kvJuP_)#k3G}wbB+64>9N4!DDOw3YgfY<43Gy_)ab+6 zH(=u&MS>t#;&y(@RMt)*%p}D-N5ZmGC=yXoi}9S!XYV5>I}9g=eKx|i(bz)hc8wJW zo6r=?HOF~KYcdhG$lNA4xEJFila=H?Yprp;Lo3DU=_yBthZM%pHZ@o8Kj2-%a$Yi< z&zQ|;=(R~_h3u!k@9|OSabqeC;iAH2?M$mN|26k8uc?^#M%qI|PDei1r%L%`iV;Yc zJbTh>V9Ws-=37sT%x~|nIPio_)R8!eu#b)c6{PNp4=r_FQpTx{2WL3$npUXEzpl^SRV^nj^F=b>+i8B$Pc;RRH2x>K~5gm)Aycq zZcmaOjrl>|+ubJ7r3Y^)M(9+bR$vRmaxvrCvon^<1=lyXtkx^mn+;vpO9X|atHd$- zcJJOX4$9lq;CSjROy|rPOGGfeu5Z|EDp+}{x<+ex-)K!?EXRk393CFg`^ctg*=%aO zbKGqzybqMNps)ofh4-HMa*pX+>~)DL7d+ET7OG@c%y{wq8LwZz;y?fLA6#F{`pahX zaBBUd{Y_f-m=1q`#{K))t(ep0JoNh!NK7 zHE-X3hl-9DFU~0n%kjwx-g{ghs5U)?t%$K7qBcG_Vbux~eJObqb|X;|t(h8!_uPc+ z&Zi7l(!$bo4*4u0qIT}FWs>jMzQgzPCDUgDBm7ssG)CQGh^(th3^8lRHIbg-2SNNE zXuE_0kLxJop0!%%s*^(DsfO%+AmxDg0Cm&Rc226SD)J5+vE2V?_nLxKM24~O^@L+2 zd!!->`)d{DVnH;9+x3RG-+j-;hY!PnBHve!E6&-{PR2);so4N|(D>Nt*N=YwU8s+O z$LKrvJj&K)`MEQqZACloML*Q=ncYamW7iZO^^y7N$FxY?nuqOqQjAz(fg==!;raP9 ze*fEF^Xlab>bm0M{RdX7HEq*Kai=Vthjk`a#{IRL1oGtbqwxK>FEt{?qEs@bySC%z z=9acCAbP60!uy`8ZkWvqtkDGLQA%@rx904;#b`~$5S5|xJxyO@u5X#o7Z_U*k@_UF zrec0Dr(Dj^#TB+ZL@CR1p_$KS{OYS;^6uSttT%UDe(1O8xbuneb6>0f72B@E;_qe- zRQ8^W0n+!5^`^$T$foYuR2@~_(Y8{h*;u0FX(1}D;btTmdluvjB6_>dNt)(Cb@)Ax?1 z^YlT~=cpuWe-EbMuhO3KfS>z5pL&OEaA+k-LMnh!5fi~A?6Vf_niya;95!pYeg zrku07TXVD8u&J8uKEZ%#9t76bVosZx(F>i$pFqH5(|O_+HhQSTF z7T&2NJAEEUcdfcRkKY zp{9YEjEO*%jt=)3aOe@@q!3pQcjs`(?o{rMt8mhTkFcq0df&6DEXE+TxG~~gPuF{t zk_16wWVNYSRSnC71AO1pd2nca(5$PL57)O8MTsQ6&gs0N^&N}j1B&^E+v`iTvRDk| zY{~J_5zEB_Ys|R5ws7$GbAZ$iu=jHw13#wiJs>{ukox5HpFF6KePpX2yMKSKzI=u@ z}Ak`>HFAdTyuaTGyn;aF$3e|pR$e6n^ ziMph@&wlXjB<`dvPOmY@>|ObgJlxr!ny%yYdY-Qs(&s;Qt*5gi#5nZh-W9fR{z@Ft zd8|Vq1|@8qQJy6m9HeL0N)Lqxv*9pAt+hn)Gu`gg&=AVq?WFUu4?v8}%96w70cWRY z6xOoYtoi=?cf9@f8+_k^@@Vx1)*7yFZn?a?X0@(xULaS!3-lqR%IA}%lF!IJ+ja_9 zi!n}!dLG_G^4+X;jmFndlWTJRDJJLha4_zfyWM-l-SaYniYZ?+642C5OHq`Rvy^)| z9=7C&&sN*BoJJie8{^}LWSQ=l9}nh&Ok|}k3rlHDO6(FPRH-O=zEMd<$IhoJ#+qE3 z@NZ>&NZTjef7<^15M@D;*-d1#TcubS!_o1QF0^!ggZBesH)3Z~G*#Z|U ze5$~S>Vc3J?Wju1h8c~IF~qh5fyq|$V1@e;|Ca=9m=vrVHveM}{gfe}>JNEC=6eJs zxXWqZGY&M4-h$od+ZMV%&AdrHq)u$c=DjD$kyVIY+bCeIZJ@?-3#AA2c z5v=#**mfv5zXp+m#e!dc^$ULco8PcJSa5rNNz+txoiIRU10)}2S|!4R35IVvpYnn{ z*L}t3iAyROvB3p=@U&frL~>>9oCoFl;kiQy6n(_`nx;zy4t+=ohH~oPZCc8k6(-?O z5A~d&1FO}Faz00^f>M{992|o!rP}U#O1q@;OPN>SK?!-E4(PL7!sC2y~9c>CR3 zF0Zc8G0=6-Fvb!>V7;!nTW@H(9v32=_w+vCk|I6?JEpqD&OqjaY3+VkN1&yNSs#Qd64 z*dkS_9G$93Nlb~+y%Xjp*N0?)B#v-36?zX!zyFpk4NuypBhyJ~Eayju{Q6hFU|rvF zdHsRa`i{f0;Pmi-)8j+tizOaS-8x)|SX-b}3&K_nM3geJ_k+||U@OW0ev3&7n=0{{ z78#h#{a?R5#rXJpX}gBRy&oomeR5c7H5xcN6>1uVI_WbHSAzd3S|l=`l^h)(@Z$M1 zmWw%UU9;J&X}eacn>#mD>&=E!)Uqz4%8V})11JBTjFTZc!qkQE_%lvdh4*rRX)Vc; zT1N>%C(d~Jf)+<;=jlU-(Pl8P00@e@^W3g0j8P~Rh&YbSlGW;t`C>tIk%RdX-**J> z(4+&!kIx^I{M_~Y3?88eJm8VwsIzaG9CNhsC^F9!an?_G0t+NWx{BsRif>!L_I?C)$AIRptZqT%fZ0`i{%nq6kK0l^WEF;s5X@(pqF!uEokeCw(Dqn zN839(=Y?@I$?2GwG;`E{7nYRukugzwJL(a4KD#yQnZa|iAV-}`p3m&e$7A~2{y%m; zr*b~FjGGUUzL&avMd2w5wQamk+fTA&N5gS?pEy~qb{an0?MMibu6J}@PwyhR?1>mo z0ohS0EFNR$h$6+w}xQe)zG|5%G#5e{Jx5wFB>>A&j4jaKy2jgN6-{alS) zJ%LY&aiIsrdjvevZPdsW`n@M%uobxe>hQL%)|7n|w93CC=?AaN|$A^UA=-P(5 zs&LK;wV$!62vHbh+5oX*dWd|!HhugI--;=5FxR=s@B{{WW1;hGoks;vkqVnB!~k?2 zn$BZ%E0s%JPuDj@JW5CEbw%tw_41BpvtqeCToai5GeLUJgQx(T~5*zMCqP`COUuxIQ() z6_SCF?YL>AWN=z0d83Uj>h0#ZnuF1m;hV6ec#;~C*T(E6sFgdqe5@5x!YT`x@2 zSd=6#aCVL*XJU+`l+amGRm{HgrGfV?xG0!w3qIb{%Rm3A-PvaI^gnm87LrTZ<^TT)d|BDIm6auzY~l1&6$%C98Hc-@neKzyT8^QV7WShQG^9jh*Eu-?&Y|x*bnOo89kfz7Uv8lo*xapgbA5&9FWz9; zEOEA6p{Z&RDKIR?aBRtG%u$|%hD}RK@aV5u+{O`{d4#c!WxvVO#?N4$dGDUjd<;f0 zBY)0?ZydQS8LI$(4RLH_tK>twUf85P{Ou+gQ7JfO4u~T!lY!N&v0?undyB;5Z`~jVJxY^zTeJ|HI zfSi)+Sk`^J-KFM3#>B>OLrYhwxef_M3E6$Rca;jwJs-6Dt=axHrr{6gcv3DJDn;cZ z9@IE5aK1-m_QU5Qe|OK*#}glq^-weQ{rMlI$KyDT4wQ~!4ABy=X{dlGVrf#wp;8Aa z5XKD{(+u-mM&6@wnY=xtLsq05p1pjAckkZd<;&*)d9=G0*O!<0`R6Nq_;8Jzn=Kd> zj9G$0j*`S>BQ$FwFfknG&_7{48Rp_i{bZeI82(5OjaD}wpAVCmV|f;J+#}BjK`+6o zCp67=9<3w5c^ClKR~wwIHh6Kd!_w5~+Ya0H8t@L4Hdrl|SS}Y(SCoW{5(kgc9H!AO z&Is6#3`A7mM{b-p_zq(8(qR>v#~;k_<*XoEj&D|CeBux!M#=|E`Cdu5OM{d=D30qn zU8^z9CX0ENjA67wZ+leD0)P0!clh7_^MA%4e)|sAZc)`0ZtvFk|Ni!OMC_f1qU?FJ z+79N!n4%e{Up%wMrwAZp&Z0&a9mk)MvJ{Wy;2u$+F8NvPDG;Q+Vqcgg*;favkDgOr zInbeSt?Y^L>_9z=#R|l2MC+wA59MBl7<#s(vQwmd61v1J$=N;#*IAj^ul!F9OY%B~ z>lWICjEkc;fe;I&xIo*YktSN?#p|vhT5UoT#2#bOKo-|Oo6eCMEywUI0HLB303SUh zx)^gdG72eDU>0MKA_d0^N*icx;GBiE9lYJx5nZ_#cyxV-#?yF2O8)HDlF&+xqudn6&v-rF!=iWPGQrGE#fL`uJ6?lcUHyIkgr zMw6i!>nTXy%nZk;oR=&cMPw~yoiU2g#KjIc%jlJbwSMsZ@p()$9{2Q1o~hFz%cSL< z^GqWBApl0Htm!GGLzBA@Ki1h$!pwtN4-Z};7asFsN=Hl$qW)JO5s(A05Po|oFe*}L zNP(<{gOvL0wp(Z@m`b6pqz?f>s`+?t;hlqHNV7uDAyd4E$pPhd7fdC~8$(xPd0nY| zM9aWq0|U8pmjX~$yDAi@(giZ^pgS}EQlNiq7vkp%UQ@MYNJyg4FL^>S?}I}J-Dw_) zz+)ex$e@G<=*r;Y`~o1xuHB(A3byOKA{rB(j_kVkj4?o{vyYpz<4t?LJxL_&q zD2h_9julkjM?$Hx`CAt3A{~p}bfZClhL*zd%mXh~g@Vig91~pcvANsg=IRd5R_EyY z9=7WM29(iaSW&q*%Qr*Xq5)xs0sI4=1_bA#0`joLZs$F$wNOgoY;}fb&t71?-J-4t z=jZ3Bsv5CWsWfW?06lxJargHK$3~?XIHf=Rzc(0mA0Gi_S~CES7XGiXqVr?bLD3N9 zA`7vU`05mI())hgbUMf(0p~q?z-58k`>2!*D_zcwFB%p}Nf&jcD`=%rRnm{ndjpWi z^A~4Woz*y7HsHR;vuEdc^X3&42%Fn$w7V^C+Z}FiZn10{{NWG(gyrfPKKy)%|M9o~ zg?_VxBAxqiz$1q64)OxOGK|MERX9!?r)SBN?0XC6JgUl^eC;W)H@9e{P!FyFg29eq zh@yK?V*3Csb&?%p*GrjA_6!#^=pZ&C+#`EJA|lTW$Y_Jg80?6?mHmAZP(&fzBhQoA zLldD<8nrQ4H4BhZa0u4zAVD!PSV^0DrGq706Uc?o5%}C#iDK9CZ25I%=~@mHF$nLm zag)R`VkqDT7$_fK0dl2@VFK+OK8+K`RPnvXR6_16M2z!4#tKC7O++Cjx~{cSK}qMt z%#_yo_{RG@(f(+yQPmZyO1uX*S68St`Q>)w|H>@4BXB@WRNqAG*PtR{%2sj+2QK)8mmPORT*47yG7S` zXq-b+H#lFNNj!3IL!I>}cVZ;`PB}2;%#!7@Q2hP@Uj(uR^kG=gwJoaJptn8#{`bGb zJBw9Q;k)nNVzq3f&rR$Q?WHN-oC9c|^6_hfIdA_{8KI8xf=O;jp?o<1?Gy`$d4T~~Pd>KWd> zdyU1ihNd(80>=9=^vnp1LygCf`n`oPEbsQyg}uu432+EEj-5pl zzr@LEBZKa}L+>Q6+f&qQ`M3=v3o}0X$<-CuK?|#F0G#EcN$3B0nFdST_zYM7R+5SM0)F@IHTu4XZ>l7qrHVnA?&LA$<4Y*WPX$9>FcTI#qI2y1gXTKEX6pSI zu3`$!E5oi>6vb%3-H!_6Fq?oSS*)#cJU17=Sz6+ptQo<*RRlRcOU>Ce|nFf zKm3HLS5Ie ziv_A`gRZmRI8!+tqOg(6rm6{nm%hsiMjIF?G?fXJo*cS%2j{JLO*F%Li>j{h_T5|j z_P3XK|KTHU*Pq}Vr+dZcI{4&{ch)BB1Ax?>K5D9`ez zB)b5S@E@C>)(W=ogC|Cz@&-*+&(fjX zb7<1!86X9gc9#Q-{c!jrt02!1UK$e3X)exjyzcvcu)YKU&cm^E^Hs_~!kYuG3W0;O zjE+6nw^%e9yPYK4w_S(6ZSnHO3%q&r7T>@BLC!EUh^PH6Vt$Zyog}JiJ#SS#u_BM; zaqQ0m7=JhUsrL}xsUR0B9eT=!ji(^mW23MiQYj%FW5I6-Pcf``miLF<1Vw|g$9Xmc z=g>2X&`TBQhf!JtIIqxk9d2%}v8*+^_BH%!0Z=&K!LqJYokCkY=FiyJOK@t@r*9D= z-;i>wlB*i~r;L@4RAOGu;C3>W%qWS`D?RdDm)2uBzu|ACa@4~J6e*CFQu#beGE_>z z6H5tw7a}2wy_qvQjRU!2C-Ep`?n{6lUZdfiOY#D#O6W8?m*syotF@IXmFs-*0|33dlYnF@S~ma)xFP z&C*EF_C1!18q2zp5_U?MhxHDfvmmAL?BY4zzI%&{i)VQM;p6D;AyMq$%1e@0tS*tl zGLT)2lY{aL-W2u>sO$-OOxPF4cz=jV3E65uq3mH|Byb?g-D1BKFf;nTPiKDF)Hpv| z;`Pgy@Xq1t@*1Bmudu$`q3gu!;X$e5l~{fY_rf;_u@M%4K_jRXwiwe#J%hX?6npRC z6~QskSyA+zwdmS5Dbsg%Yuw)6iD4x*^BO<KVJYcfdV!)+EsZmy;&>QVs}(NJFQBx- z_4PIG?rvd>LbEV%u0_{v0e1NQ-@eB`{pk|_>8~E-Si1TiC7@qhE}?2;QI12|@Z+VAlBH88oTB@R$!h?BO^DCR$)MNnL#B z7}k39okedQ92qFBXAekB++<*H&T{k+7^6|wHI&+&Se57710OZz1Fo!8c?W>vP}vAd zLa{v17=uOAVBtMht0lbkFe`(_vO!%NFgqwkc=_T5o?t6@??9*OBZp7H^)tQB4mrIh6O5CzLctVS|rX!r4clyE4tZ4Ati(#)P= z6yeRQ-{E3)hTUd^4?lmv)zvlbwmT8TL*lDvFZp7eYtgXER-RuBQiV+PXxZ{k=%8W- zlZRJ~w%y_O_7>~i7NitB7#}}=#P>h`fKOLf=(;Z5F1qxAA#r{Tv?Qg#WeM#E5`R)E zmD&6UJU+xcr2DLIQX~|C94A_FadCmFuF!RzRI5}Ven>S_T?jO}&<*IPKxxW2r>W+U|27=zo}TU=jXqu=e|oQF~x zi)JC79mlx7zQlIDg=YnJ9y*z0Jpdi%F!h%G;vp@)FJC;W;puRQ^>Hvn05QQcL5C)T z&MR045j0pKrL8|OskHQh{JI%l*oZ_0y?8?6$K=7zVz*w4Mh^)e0)?93-T~my141n1 zaf4GJ1qEeq&z_6L(4}ek$;S*fZ$KX+w54mHwFG9sA zg3%f=QJaC#)GjL)rzAf<%E*F8#(N|MXt7v;m~ne^i}yeMgzKAI?Ajfy3pGwS7A*)a z>pfvs-wMR)T!8d6QbZ}7ltp7eEV=1P63`u_Tq1drT#)NqY&JVARttD``1|+&iXVUc z9_#g8NHiV7G;a3`Oq4mqV($+LVDAT^ec$Sy{qiA%^qm3}i-&|OiNQ+Yx4-{AUcY+_ zc2uUR4lUtghXJYMCH@D80D?9}I#*sWBf#@lcXm)}R-Xy7B zq&&mK@EVfj@xJd+*9&L^Ksog74#w3`M6k9;*S6TTJ6v5|;^yW$jszkeg-+%nl*)yW z3ooh5!SagW7AxvLv-}tHF@*4_)D#FKw`;tJpt40C(-oe)o)8?dgrp?-!rdH`W{2f> z88KdLzx$fgd`Kb0+!yX|A7kJ4s9Zo{ZS%{iqa7l;8 zN(Td98^=L1&3X6D4dM9umh9fBekNKA&k%=<7^xu2TNv zu)hbdc@R9k0wzs1lNkv_8Rt0Qo!I#lKpx5{yngovfBldDf)}q|Vtads^W_SS)>6R4 zS%3`?W3m&e94W%%=6q=!`#~MCGnQqr9}4k&WynNP6bR0}5lBKlO=SX|_OgB;=sC*BZpxx9EmmDEK?R815EdUSGfbu!6)P$9IP zh+vcg!>1?&>te+y0!XJlihb6IrUr!La#1|5f)K(clST_b8cr)u!FxutG&stHlG|D& z8B<}r30@!XL(vqdNIJzR1(8j}SW0EoYq8TA+Fc889Ttlv{^c)!#fuj&pp?eN#TlMI zJBPD9)_1o!KR?4C|M&;Idifj|7ZMwD9eU01eTQRJ7#`V=Tws#vW zn-wVJOhr9k#vRUMh-LI4bS~?k6#7~X4QzA8bM_f@$^?G}1*WW|3g6^O^X0ss@g8G- z!5CCCjbj~rL(U4bN^+IRBPtXhm{Xsp_b4AbF>3J?FOMqUN&X-hIk7=NW`=c));d@Y z<(dXohkS$Z?DYH4(TD?5TBE8ov{A6V!<^XrFgQg4d0Zeyu2uwuPN+dZ482uE2I4H& zghi^1LJcL&1v`0DlQbIi-lOj=w%aY*b|lQ8g%$OoP~rx`s0j7w=wX@6du>~bDmQ@$c$ zDk)h#+(U@c{XHE=SPhEKMbQ~iA`uMU;W0vzS%6p zHSTUVSg+SuuWwPWmhiBM4Zflg=CQpAgLrK&&Nha7oL?6ut;g$Gu2HG*&oH9Yzh!R{ z`sXVk_l5`1pl=(v_&+vy%b2@pYP@*#?1{Z>4Y^m5dkh7W`K?PxJ(D-!LD@dcihvIQEZbhLrdRbyPt$Zm}y^7Z0^xY!# z7J(N>4KAJC5n5*j$On`^bq}W!8OPP!Bcx5MQ&FIid7k;gOzQ_`l_=bXIkERe?r>C) zhx~p#6z?6nP87q!QbNuN~f5h9@FHtuJH#b-K>8GD?bA1ix9Qxj( z-F4`?4g@Jw({)y&K8j~2F(_JL5|xa&kv&pTR4Qrv=g6(m5LQCecZToHD>0*Q4gZ`~ zD6-jOG>{p7HS^8!g^wU=Q(kQtE*9B)hIKY{?VKLQ=^SYAcS833B%qk2G16{7zB=2RUSp%l(Jz8gBZ4Y>htE(&g{qO&V^Ji!HX_UmjF3CQTPLF+oIoWfnA5j11oKCURPLE1{D~s zmwZ7_$BL5t5+Msi(MTg@bsaQoRA5v9bTZ0~ENM~BOU99`wBLUmqj!RLAol*?8RbLz zZ77uG)b9(?fOkJ74Oe+JetKj|18WhE`55nwz zZRZHcj3DgGHz98eo|(Y`V^Kh?cc)n})1Bmpc-ZG7YSTavp@~9wOM||5@ZI5x3|}6O zO*Y1j$Ia9rFf_!Kr@%0zpcDt0l2f8*xFRGUE7ZzhQ8h56rA%)s=LcBVqw72LwwJJr zs5J*d6l3mr5JNh&FqF!Oe5de6gt`0!J_BMH)IAtXR6r1DfG96f1UzwHPI%TK??-yZ ztYt%R#F!e++TitwB^!|}X;Jy`GnV*Z5g*R8eKIbT3?KQ-pL~xa-#wH=@(*|#5Tm`|B7w18-{Jk= zzsCzQpuUH@xkJ@EGz?VCP|g9$AbR#5F{SRC0|f>-4@Dk^2^AP7ctQmC<&U@qu_#}n zPk1~bzAPpJ43o~o7cy-LO2|Xx+8oyYD8rtGNtoA(hOps15R@*E@8bC?G<^ z8SF*o)Y^bZ!G}<^NI!iH$BX0mW@ORNha4mx4#zh*H50uQV8D8hw(CMi-%)5o+8Ekv ztR0la3s?Ifut-=wK^qNYDiC$Kg2ikYz}JA8lTHY(NJ+911w`V=2|mnAK@-pzh0188 zV!}p5iqq z@mu~JRiXlly4jh*l(ECCJW8Q9dOB3{d=vrelpMS`g)+#HXm&2cJ?4p9eGKxw(z_`Y zcYNso^Wnn>G{o?I5APV#!-e6kAK=-Xg+1dFC8I;E1wShQJh6P6yd1$(8HERx_CT6O-$6HPRK3OZr;niB4ht|E25JVZXIRB>$%{99CIxzS$f-3HK)*gv zfujN&r_TMZ zit!h;Kk^GM@FnF1Kmp@TZ3o*q^qqz6eOf<)^o>!Z3nMH6q(EHq_^9la@<-NU2Xwgfq7WVB zw_`~18Zr*d?b~N7%wbQX>=C_oktRIoc_rO@1kKn;*&R7dy?5xXD;7Txwb3R~{&Vjj z2}MIL5}{owjk<2ocOFV@LpfFTn8o!f_wYDmT%Y^?P~jLR zEGm&`LkZyUS!~EuT&E(Tr%V8tk6R`p;~R!;#ZNfR`Joo;h#$<1mOFg>`4gTsHJYjl zu5mfb87K34B~m?6F!HI6?|&_8dTjI-D~wSnc^vmjsZt@4-!A02cn5$;%4KT0;O0P!U3&7etPsKtbX40|;F@-+Mk!)cFKF7cw^NUOTmdj*LlPY2VUd;-6!e zJOR`0?++R45qeTEGGY#q0i5XAoX&U5Gn#~)!dPKQ0X}5;G0EiQ4Z>aC}k3ERS~c>8mro1X$+j!k_gSYi{*Wa z+75Z5m`%J=P>Ih7&P7Ee=Vuh>a(q6*!-x43`J8z!1o9~Ok5)eN(fDc(N?xdc{2R9d0Z3bj_K4FRmA zx`HFH7xE&>yB^+J5P(-NUSijG_*+%sZs(=OwRLdL!@)`DO9+1vf~v9wB_i?2>roPw z^79DkK>Y(|;XdJ1>6-9gPE0I*%J<{}g%w_%a@GgqDish44^9@UdqBP>D+bQ_P$M2t z-ebL9L!Y0Q^fy(%845DLbbOi{e>^{sA1A^ud!TJQ+}_+s;-j^4`Ak{siRTW{d-b@j zG7~chxaH~o%xkJV#t@TzN%h|#z5azmT+!Ie$3uZgEc(uS;Wc0wXR!dV(i%6^L$inF zPz2*w6obV;s@6sXD3&5Gl_E4+VPP~HqtPjaJ_^W@KN3!A%9y?U_Z(YcP>~$W=?${? z1Bo~LXuz4j)dF=UPA%!*0*GrgDjy>JX+@ztAWH=fRxpa>M=SYJAy9)EtQDLKE>HGw&SSgX z;rjXpuiw3aCXGsIkmsa;c<UBGhkr+*E_y{KuGg9KpT1joAwPeZJj9bi5%J!mXOFw}28(8qylw+@xrayTYXB8L zhrW}4#|esEjyZ%m#zvkjUF?~GzU^>xb&cDrYw>(wk5#LDR#>mOCX3g@BsOpUiJt{u)wA-%aPKW<1W&t!_eLSk1L(bn*9w(VXh`9_uJCNx;A+>R zWiD`e=ym(WuWu|{4uFVZm{1d;Q3^{_p{^=;(&DXf_vdL6fpM(R_sp=kq?fU};hexN z?>X-zXxnoLeI8PuFA1gc5u0V+7g@dM5I7UO#9q80-m^3V_KdRrSmt#nLNKCq-453F zXzCj0=Vz#EgWYa}_wRqek3ar^ySrPoT@UX7=b~XP<4i0tqLhRXTNgs~I4h`ovE~V6 z@oXr5`bOL$?*-F!=sBj9M&fuQ@5k2?2aPaRQ5t$XWJJ?rl6NI-Trqaf4Ci`uU5~ci zNfImP*k~dRA{{KQE_h1__a^~kIKgc+d8e2QP2JNmZY~N8n#9{7)|eQlE1eug`N-iL z1gV7Ol>(Fkv4i&xz3Z{w?r{0>3hj3-DrHb>1EVyQ(vW({qO5rDQka!zv4HvL{`3$< z4&irHJ_-ki^!O-tPE1N+<}<7D5Kq*2+m7&nB&3z&EW;c2Sa0u*9buVbB}J@ z^m5?s3vZ?0l>UYAo>0{~DItixL*fW!T__}}XN4vFAisBTvPXe2r8gmFIk?q51M6*v zv(}0?lt>0Ivnj?N+!l=mRqNsoctyC;|69a{ap zmov-kz$DE=i5afz(Qdcs+7<*(R?cJdBBj#)pEjSldmg(u3r1OUJO!rWL3FJv=AxwL z{l{|ry(70Q^>Z5L*v!fZlxfSM+a4TIG>j+orG#7ZF%6ZpbWwrtMN`3jW5|kfz*i0? ziQ>!bS-PK_3Px907=!c00?(Ef%exKkdKZMfC|vAW<0No#`hOpsf;C@KbdD7!Dku;#V*W@`Z8+e0nWK;fmgdBR(#Vl7QZR-x$aC`2%QfXZW~W{3 zp?o7&zKZ5Mh6Y=)EZA{Ins z)SBRvrdDcXqv4V{Z9)e?jTTr0ZV~uma7p=NUSE}Ns00JS=k7JUeu}+ zf&k#4(4Sy&q9fTVT0M_J^#Fq0G)omP}4-_3a*d9RZ)IvS6k6P8E&J?URIqr{XA zlunD4ALD+;-557ZF{gkaRxre}%pt!3B6vn3pj9r}T#lXKgYuDuR8fG1juLVV@^GHM z*z3dd8H5c0;G|IG&q!&I(qPY6w+^?P4oU&0s#6?oBFq%`uVgTk5Hpf^q4_#B(K|-K z?=Ki+@h>VFnu0h+1Wf=3V@G8wu#*r-1Nn}osHjj98a#V+&Z2W27#^gB{&??Udkf!- z;$d{ITsId#D-1y8a15m}-_$S9Wr(_Iu^|eUT$|1c(db+i`<%{Uk~}3@)Xf_j2Ja;U zlr%UU*P4*;9NDKCMmM1sM{Y5Ql}DtjP`9mz~G4sx|MECiD&2;E3x$d@bK2bEHzA3!RrdEn(=I5ab8!rX2wnqG*~I{ zOeyFn^yHH=ob!aP98;zllT-YdkZ~Bn?!i#((I|zBy1`jpV__;NZGg_gS__c$-4buz zkRy~Kuw}soF81{h#Oq`Dr1w5qv9onQdJJ=(VD^xx)Bum80s~+eCq@I|Zq6V$P#ZZK zTqudq6CN8r_nf_=sSZlygRyy8#C6I-s%HoCl#2z0by7*OVB}`fLSG0XZ+PNDmY|Wx z9eV}WvPq!i%(n2Zht?92)3#e&UtOZNEv&Uc*#j!0VLJF(MV*}uohZm-Pp)k zDFq#x;H40DjId!wUl{I-2;qmkjfv+Lg8n3Acl?w>d7B>yfhcFvl#QpA&h$rXg{rDh zRR&s9;M<(;S>nlT7;%&@%DM0eZ%1$v4-eBZ>;Y#PwikCAhlwdV?w5!HoeP~mce_@S zg4$N9X;}+Wd4kf^aLT9;sGCm)Lt+sU0i%EwlT?e42@Qf)vCFDZU}6@bQWpYdAV=c- z8W9m@SZi@}x54|LeuilbdTY`59h_$nNnt?=Ve|0e$FTyjFB&dIC4}JuLG|9J%2q&R zd*!M|P8*5Ih|9%2b*V8XcOAoh#l(3O@)*dd>B!Yd+;owYDp#;&g zS}7xo%EB?hZbmsCo}F;Uya%f+3_W_NO+k*oiA40hFvm;o2b9Uwqrxd88yG|a-X>v* zDRkNvCCjBVCwRRu24{-~E9bDYfMwwmzeXSyV}W7^1E5wKi@Ju_1|XXi!GXRT*Fr+t zJ_l4$z@p?|Nx@8XJ{B7hIcHED1`O#c{kS;*iMBk|`P`!n4ZjiP& zJ^?-bN}!ZRRaK~KQ9!aH`R#!5?4{(Zvl7F{97+NzN!mF8j8HGX>#*H!uwHM`?pj#u zAbB1|_ee4258QHJ6DG<>6abkaVJ0ExBUm^TMFqr5H5O7q0nso@huja!ma>tRh!M)u z)irj`;osi>2r`75+ndlJM`~0{wJdSlI?r$(!Gb}dyQ>3}0yCv5S->6vG+4$SgHfLX zbHue&I$|hU>B9RpLG&8NDp`txl_d)ADqB#auuO!EdjTh-@*y`ppnQ}T@3jB(%r@;y zw3g0Tj>mh~b{$wt(FmobVJ6s#aPFsNCH)8ozvS*l57GEX5rHYMO)d@)41bD7lFM){ z;(a`XPIkS;Znu-4c89L((7PVads(0Pft@x7Osz-5-BgGbyij~M|BiP##z1FqgtBLb zf;B%19={fb{2(ZF>7m_o`rhz54&IpgwTp&{7{(Pr5y2}7eN$S4nBfVq@NfipC3Irh z)OfL4;;QShwmmwQ`a@+mnqU0Bkq5C7zUvv4(Woj7Q)!r5qc;RkVk9V0KCY(18U|vJQnNuf$@lF!V=;X!-yXMDSVa)%heLE zU%$r1vkQQMu5HnFE$X_$YPo_{0IfUpt$4x2`^os?5|T=Z-Sv`vv3rnvpW zk4yw2f-!`;uF*6Ns;Wwr4`Y7F#3QY;rz-RFryZwZ$6zNO40ayy3Q7lI$sZ3PDFpAyYp6P}1-L43rPfF>vXcIKRim zo#T5qwt#c6mirh^R=6NfhA;0N`z#m;z^Y^tC5J4m^+{~XmkhuZa zTM}{xWS0kN!+Q(p3Q%0^ zPZkmrcC+MwfQ2ZN%9c-Od-S#kGo$M|v~3FswF-+-o_-qseH8b@xD(9CEffqNdMZ*UJld6>C z1-Z|T#jx>5H4r5EWf-*)Wt)8pk9c@OXfaPKVC{Wef$x)iN+oXRzVG3zgV7pB8N7P& z5`X&BA5kfTpMSoAvp2Y9i>_;n$cpFz(GsS|0F&e$cg{&pn^u9oDybgmo;Wf5E1XB8 zE7Wz3s;UB>WLp>qz8$@HUki)~yA(nw0HYN=RM3QmNT-yA zh%~f{p&nk!S5is^Ehhg!ruc~9oe$v^AqT{XfT2_@l^VY}v^-(-Rs+p&5oNugj`EC_H5O;VO8-RV+9XLvFNuNh5|(M!cL8 zu%b#6$LgVCsA_mvqz!^I8cu1n0F0Q1Vc<}a#FYrK2+4rgZ5by*?4#8OdIF@%6@mwM<5Ii6TP1B%m8W^KxO?XJV!W{dS^i@xg;vQ0&ysV$-mDfybp2s4A7FnzCr z2ZRAu!h{v)Mm9NICJj9^$6^Y}9f?ASRFb^xm|!749Xu2(Ylztpuhv_qC z@O6gSmmpq>X;SjwwF1i4Xk}YSilb@Ua0RYz7qslT0MJ8L~Lw;tWyVLznbomJwHZ#NVP$G+D zHHO(^#0yS2<}weTY-<#}K^}s0rwH58YZ76gj1XgWzQnuVyv1*S`&&GJ_8hfopkT3S zTeRC9Zf>q|bMpypw+6_E^H_GN_vm{E=K$*&owXsSmf=W@Gu9Ui@+$xp3f_nTMWL#z zQ2k0JBikt(oN)4<9o`=T#)kxQ>ue!#MMhaxcmceVBHQL9nsTgg#a_SN0~6aU-=k^w-ujE@Nm{klJMhU%duQ8(KHRz-S)t@Qm2}SJtW@c z37Yjqua}C}Ri%TKI9o)S<+U^cl{3r1+i0zvxw@`kt;OZ1Pv{7#@Ph}VEV(3}yFeF7 z^1mWXyc&-(KdCCnZYV?$iSmokuuH}^6{Zvw0-TCTw81-87*+C(j|pVi$OJr=ZDMN6 zCmMNk%E`xa%d;Mj(d%M91Psq7ki)Km5rHXsgo7|97h&KPrhG@10;q9dfgVU=vJcg4 zbifxed`0WEC=|Qi!DuOn;H?8~9W)e{%HX_i&?^rQg`Mir_dZ#=DTeMIEeH1Xp`?QV z1LUC)lFl_?6k*ZSXqGiLPI9rlb72PrzoB099phcDBOV*$Q=C zCneuIi`{OG&E^hWzXOpE6-c82P=Nyt@A?j&3B9%Gx?b|ASY$jG`?35=6p)l6kdzP# z)vqcE>z-bb6vwX##)8p#2kVX>0BhyYuh$!_*K71nD!&j*K8W`gTF+O4`PRXMg-Iu2 zQ3y5%B_xI`4+r>%AQiISvH`m^r!dXW^LO@Bo+>sD=W_FAb()RUxi>kVsA-#X;c6p;;7! zkzpn3NuseVz{>34}49>OM>e!P=ZG&L>_=xuoTysZmuXHK5w>YJ2#;&xH&; z6gUnAa+YHm)a0d73YfUk#X{y?5xdQzWVE{-db11qeGkv#3CqtH zdU{C5c!(mxMVxcFi!sJ`ZXpyXb%!RBkhC#m<(8#t8)%rEIT_-SG1CYSczVTSP@-{; z)Znf(k5f|xga?9%9N*$RLvWsgyh1VQIne;I)MuJO#q^_gJ5ru#^m_!%9?&|_bPwx2 z91%Q|*h z-(%gk=$(Z_*{3ET*`t;<^XXiaW1aox0p(*hEQfP`#Z#1GoA*FaGLDts^BfH>ry)%A zmWKNF>E6q+#54tp)R%_oNg-ZQ%5 zbHMCukJidYAy#stXw{Z4#pZ#1-(tJnVzb$bbsj*pJ;@Ea3?CfaT!kW*^~Pzv0^N-?r#Pl@PgO`}C6h1X5J zVjTOJgz8@7FpP-&*+PTk?&)KPg&@^A!kh?FCw!nVaAqt%Sba++Br6TkdI^vU`=l7T zw5~@e=!{rkDK@)H)>4d=5LMiBEF8vq^1*u%=Md!?#&D~H_*1-+ve6vo1;fvOF3t)MMngV!*}#}@b#!*<}OiH5_CLy2E;O^06@<5=v25hYq* zl+O-0Cp9w>YObLQN~4L^Ar;OM7^LHw6-5yoyX6u{8)+h1c#oph$QT1%8E7q~4r9TN zeZq-+HWq{juf+Z_R*GitR^CJCcJzLhJFj@ECQ4+GO0+<5DEAbIV~w%hGT*5^d2;qA zpO40Q!dzzOHn``MF^(E0%f+U7 zE^Y&I>fE^o13ZQ&m_kmpD5+gAT=w1{OI)x<17ZiQb?@z$ae389e;N6BSvffFVO=McdPu@46j8EDFr*|QFe7gl$hFt;k#K6r zXo(cG&cb#nkROZVoGcrcvCjFud%=vw<``BTeL}%_%WI=_JRjo`A*8)p zRHzp<+O9=s<)fM49GtRer^B4mlD6&ObCReR7zyI)F{eH~vPY2#$}2_)nIaL8GBNfk z&&pYg<@p(w%O%wH4ZIKeTIn205+D(!W`)N>GV<(-*X7<8=P@e@f%zAVbtvqI^XoY` zB&GDx?D?E14>W!6OTm5V#~AaX072!XXK`wls0Jn-u3;pj4~1S2nFF$}g64js`uF2B9D`tfqf(;zY_>bBU60-|ybf*n25Y{51W~|E;mWh) z58+4iN zjkIVeJ7gt)UTGRf94OMzx`sA2lnFVN!MfjfE&9Hbvb!z_?Id^*tH4i*ZHiod5GEX^TDkvU)&n$WLJHFbde4GxESIru-Cbyh^kG+F34VKF#&d)ET_Xm}p z@sdN#-ZXz24X^yr*_;N(d9e_Ui_zoq(&4`jsJ9=$bCM#4ax3zdb~gMH|s63Tjd??54} zjg!DR6fqt)N7-973Hyms_5JsDtgzfidHLpHWF;G}k<3vl)$0WtehlFb7lkT~&qgyd?E&{ro2 z5X_LCI|my3IUDNDGo$Z&yl8~jgQQxPCY?eUh>#LAqmiCQ8NsKz#An|hzcJ9pW|WGf zPx|)Y%h8X+&xkobDk=M7MUTs^VgkDgwq8H1v?TY4+aoj~s4;?%Dk@0=!3QUyb7_PG z2ZGfEENjrJf@>K}7N)M?bq6MeMXj-DYSh+a=lf7Xu`r+jxDOq8I+zMU(fFlE!LUb7 z9xJ18Q4!8+1#L8{T4C!MtpjZGeq==;g+C`T@L_Xhj-_RD80s8kuQHB!dmj%z=iW*w zQLxHT&N(AT@Pa(_{FQkib7A?BU8LWoLdQVeG&sL_hO@IXR8@^AH*L3DTwPybv)-U< zI|&yh6~bTz*BJl@n(Vw!1$uI2PJtL+Ih+?vmaVMv0=SV4s0Xvt() zFo-+=X&6<5;7JAdEP`dd>?tBvGw&UGYhj!Qqe9cv(uYK;P*q2vGNvf;JJz}%XWgR* zL>kaUmo?JfK867Bt5HBsfLt0@et6(+emK*ZFp9~o=Dh@;3(+&;S?aNCttDT{7!dVH zp)Cx|l^gz6z#5406=g$_nc(%m(^9y`h-{FT( zH|VxNv#3!!2h~|55i1SoTiARC27)~&Lre_&)5JJmRCxD%g|~|Z3>-WW%(BMDdw3th zhz9c+Dp~{9`H)ct#X_s``{BrOtQI6sdq_^#L*R3~AalycPys2=-RI3??5Uj}&-j&K zY*p7dJ3qr}wUUnKMq4RaBIuq_I>;Xc#Mx6f7p#j-FKHU3#Yb@`3$1s)D6>pjb?XHJ&7Q}$o(s#o4foVly z=g3w~<4>n+FnurX&FOd$chCm~qjyf4oDq_z5y;FpK9_(K3C?=qXkrYFT_>qiv@xAynZ~TK=Q-ha6U7@ zVSyMUi+2jg3V9QWZ6t%mR8W5kquZE=?o&eM&RA?Ezz99Mil()ZXOBamCb$y#2h;c& zTxizwV=n1T3IQ#YZ3OS4RtnlEoUP9A```Z_zj^l#XUnC~TIb-cL)Y%m?OOC*hpz9j z-EGnKJvb<%HdGq5zED6o1XLxqlzDy0ONj`vzC?bn@PZ_3K<TfL6O2DghvxX)kmFkMdEcFCRI8UqKrT2k;O4|Id0vd z@yx@%^JWqOjKQXb0KqKfC%3z;#F`dz#W3RdR|2syVIW*e5$ejIscTdfVb#=Ft(I8S z6*Lk0-3Hsu9lEZAZCjA{@ZP~W1?HHDoP`Xf6r96=lSK=v(pt3pR$?1h%LU%OeS_W+ zK3(3T?^{UX=lnr`I0#jtl*oFD@)52Z39Ye`Mh416Ckh}ZpvzcK9}}4=N=ORzaYH@LML4hda$YD=RBWU@5MPJwOi4XHF0tD}G}ElFAqH9wigzJ=g9f zph3WbLQ^lWTr9EOwy;)`FSXL}9&on1_ud>;mhVkoMgj&4*_Wakl5}q&-EfsAXr^4m zh2vf+C{>h*^d8n)I4k9Owb58Cm#FJn>a0uWr^0@h1tVE|*)gTY`?g;w48t8yZSo<9 ziY`%@a89g!-gD8RzF@wE5Ef(B%ku!9Dj)mS`QWFcWWu2;bvm4qX!}$^faG&=&LK*( zC=6EFuyH^E0rG}YR(6L%!B7AUu?hKd0vtT7V${!8c>TM#c>VGm*0oq(t*|^_qFQfY zdO~H4;Ad|KMvUR?Fu>OIoCmGeVVIorG-XbThl)l%t)LaKI$xkUU%;#yDW9zru5a(q zZau65yeACHJ;$W*_>w~x^C$!l4R=wvDuL(TOU9)TTl7k_;smVc<lFA;l8fZ|N-0#8!TI?azWeSw{KxxRNH4-}M4vc4UVGMghyP$_I6|E*L>xTn_a*u0X8>p?)-d=NM;gbyr9_pw4?ch1!)2r7`0~!1K<$jzg(@bST3N94y7oc9+$^f#Z!-Ii3|I`GymzY zk=}oGh+9GP9i|0>XQX)Fvd5-takJgvX1B#TFJbB$)!7QoYK3NDVY(jLDAaX@x~s%c zzv7jR$+skhW6dm&K`GBLFr)R) zj9RqEFLo{-nuSm*$EDXJ@#-FE;RAinN|g(qk9h;$;K`pg_c(SiCnaQjOJYijuvhan zJ&!iJoB=-yg<$p|Vp!|2+qS`&9x6X-g3(&4Hx>#>UDv_j7Nd4NNUK6g;xtEsOXOWe$$(%n+u56S)Th}9E5_t1ZnPzZO-6a-}kvm}MHX0+O(+iJ9p zL4_XAUcbciS1-|+x9B^I>#JM*^!_8RuC8Evi@vwPauMs5O4&~;I%8Ysl9Jbjj`B@N zZZ+l(M1v z2!oKW1e}D~`2Ex^z4&=bkfn|m@R6`O4quoIKeCa0k##P31#2`-gRZqHL7qs#XagU- zAgN%&lR+??M8#n+*23i<-esl`vyY&PWSYsuU<&;rC@P%U%LOpBjt#CXx~|350)g;M{?RowH7ok&^R;;4zCYyby@7%Gm`E>h{=6&aF)@H8l?o4;@ z6>K(DX`M2ZD**cdz#Z-pDXPdOi)0_OtgMW10fz%e{?9qeR-zQrtf^7;<|Kr8v)%{B z^VT2N;qW$c-z%1N#VhSgx|<(t_&&N+m*(y|@Z62M#&q`@aTGHekJ;VXWov7Te!p+? zX-!30<>YxrSr!)VDg?@<4=7xTT@1=K8&ujEAmRNvZf`-ENxIB4SQ=v zLBhY!+#79K9k;_`_5izJ4MkDfhFeNmCh5x4hC0i+JsK`V-}4&ce8gyZ95=JN%w_YXKcKC#KG+RkAl zn}l@X>c`teDJvY}I6=4y28+dl*>qYnM?1XLxw=SH>eTN8$EXOGfuAvRiZpd`)_Nnj z-R+g$?iO^~eYf}9`LABk!YHqOiVH5EP*q)bN(htkWa|moGwr{oKBm>ZT>6Xw8!Z#g zYZD3FZ+8y%Z~JrF12yj}#4gwSwEIx?Xa1a(s|CLWKI&-|aVHgA`9&e5El3bdP&gKW zB#sjXy#Yn9L@9;Q_Ps{Z!kVeXXn_|#zHe00*iDWj&4K zoKk4(ZPl*Ng~ZQ-Ed%O8Rzgb9R{0T;Ac`Z5QP#1L7n~d&b9{6}p5-Bifb(NE9u_^rWt>39Xx%scV zkE=u^eWzz2N+juMNIDv#qLhPKL9|rN7MjBJsANScGD?$MMbpa~>jhb<#I}}xm_yDc zQ``FXPh+Dyrm`M&WdwOPLkcLXiYSgrdbSr4v4uRQaZ*?NX`DgLZwoK1+1sf)w=upL z#Txz!w$Dy5Zod|q3y98z*R8R2lMv*TtU0II!PIi53y)rr>n%Ob{x*iX*nv%&_fT#f z+<*Q4YWR}p&)q0|ldoVs@X{F?o=4dzmGI;6PMtN(z(z7`qrn=^Z48EzvRqOWbMkUg zJO8xNDDO^^5gJwZ`cQR`v5OZ*@lgojx(hjOYUeDNn>M`C<#jmd{@PN^p8aS$8ZA5H zG0pq+hx_|9jxFytYfkFUpdY`l&!<(>tItzeh6aF@cTr@aS&@fpB~jfRJ4sWLI3`XK z;>h+rs__KESVWIsm;Tv>q3Pe%(E~e^5_?5TJ0U216E9Pd6(xC5QfUkQS=o4;f^~f( zY75i1p|vt|{ZXfn!g-=>vW|t5q-kQyn)MR;NyN^0z__0<84Vc^Qbzp*tu?dh2~w8~ z`#r8-o^bo-F7w5bN00kFdMY?PIOceoqqIPp7%3w}06Ia%z9g~bj1z>2h$8!22!Tz$hh#NXq)%N^RejmeRf#x0ggD5!yK+q;2M8i%nFZHN$=nRTfl5jlWhfRkm$ z@$n(k*^J|pC9jWj4i0A=og7mZC1s_llyXXmbYt(x?au2y4-8WG06p`dvyZU+$IhND zmS7=5W#t!Tx1UOd%*s~bn$x2FP`*m~4ezQg7$E`k9fC!sTTMmj~dMfw4+pDWIsJtRj?&F(M)D582unvN`NANMq7SpsNy< zXN>wi-oN)A8;CVXYl2Ox?s{oN1jf|i*f>cU^oNv% zV!6y}@0W1%W}Avvwrlb!g1t+pCEK0-Si<(vsh_H?{0XebDO`Ux>Dn0@vPpoArL^7B zg|7f25m97~76a(gVyBg6G#s+CvBjcPM8X;?79MkfF$XX|OSaM0MW}&0oIX*F5wgxG zxX@i_t<4oz+p00VYM`tRYUQ(Y$}btO4TZyaV_|gqKJ&DWpL(yJ#u?J#SVdzfD(`R! zpqfIjI_884`G`}jtj%hTPa!}ll(I^497iN^LVqw|GMRAY%4M!zy-L5|V?JN->g7uw zK6t>R2M;+qI<`(KC7BnHR|;KL;Pxc7=UfCo?+6(yCxmtibD*$v_mWoLSg@p&A}?~= zv{yDoJ%+;}+dEsl|Ni?Val(@)PdGR@z(bK*bOF}hO#qkTZ##!?n(J4SpfNuDwVV^H zPG(-|`StzfuwPkWURrJEJ6RUW6Qf;(+4E^5&{|OxIm_jeY`H*JB{rI^F}k2CbBZ!U zRRwvLQI@4^)M@XjT-7I2dj#7KEze6<`DR-q_mxPsR<-WjE)-#}87B#8nh-@X%e=5v zP?d5rI?x}%jnh7F4GsRum1RMUmSQlC`_y9b#NRG#PLA+D)vKq9`+mjT1yN zW-%>DB0(BKKZc!+5#vF^#<<7!=9t~di1DDuXfR+D_2@@EZe730?$#bRuHE3<``_{G z*(6gR&kQjOKsz#vM55X+&GNV3MTdc?nc#C+K_`SfzE#Gj{%f-`vYBy6^{Mke#)qRO3ao z(HCZTp#4;j@t#?NnV>?e9L!=baNL$TBDe`%SQHIC}5*b;?KqKxhGIH@!ufC@tI!Au4G^&g8Bj+ZO#LII9f{J~DaSTfBS~yCv0kso-rgQ} z@7`l?_Y%ux#$W#WHUIi=|HhNYPgpK8PNq|;s-!9_AVP}>ZCsPhAZ$~6wBIeZYw0ca zy+2PY2h|acb3`l6!v_ypE|%QAcbDJ%<~O{4{hCLQ9xmEo%tvT^)O&qRxAPyIYUe_r2Z+EZA;`%KA_*ZSNlcO?q-n}V&oio0qV)*SNMW(4S{w4L;P~*ElcOUnmbJ5}|K?<0{8@L?=PAf0 zfMf-pgr6b`5{4qrBU2mO&S;X-!B<)boqf4Zw7!m}w2Y6{i>w$Q7aH-KkpM&_7!8M9 zxpJMo-95^(;?bigJbL_;*=$B$7KDnL_aH?sSxrVZ>|Ew0d0tTDilVTgT&0vv2sC!i z)>TKdbJC5n&`L(V2pMBm^w_RY2_r{R2yFP@edgdVB?ehZ;z*KIl2R##y_8XZ$av6a zd30cL;=)4Jj1iJF?M^e%QP*(fZuizHd7P zR-FW)!LoXvcBp=x;(sv%(}-cL%&F|1%h5w=#`vKeN)R^|zD~sbwoz&>e?# zczWr&Jcmk_kFteiG4O%#b^mSNfkY__q3HE`Y;A6F>-H_~zV|*+6!YZCQ~vVTzw+0w zzT)A7M;sj;W0OiFJ5M6UXhEeF7`dVZg^<5r3e^exR=up(bfqdzPEII_f;`VzE|%Q9 zd6U6lzwvbsc-HfuIEted z?pSGC`KnSCMXvCecyp2DwVyHBq;R`Ht(#^W)Uk1nyZyT$IMON=KYe*gJL>}(Ht@Zd48UhGqpC72SeGK?+}!eZ1V z0<9EM2x44bhO!;hjj$dwDWerk>uGcmCfZhDe5%pu=XwvzX`UA0!hBk3w9rT+iDcAP zO3%yhFm2&4SdCVk7nqZ@lf}V~$>$H)>-l6VjZwC@g=LBc{Csy6@(%f~^1_9}tw&A} z$)=hF#z8(D1nVNZ>Y{?k&x_}QAp-}uW#DcIcHq%3TB_w45x*|ffD2OmVoNC%X#<<=x60HSE z5;Gi*xpDIrAH4q&Y1-$HfBqBy@BjKE4oH6M4#5zTJRdLoW z{%`e!vy|&qd2ta(OLr_vpOOF3InQWwuJPfI)zm@pdj7QbdPS4a4_3y>NVXhhGHRmw zb>l~EFj2Du4Agn7USF{C*u4j3KxrZYNhC<5&DX2SlEr+%ayCa-3Y11w8WgSkNaOG6 z?iWW98i7_#6i}d7p~Yo|5E7-8?em0TFdVRV`4U&JUZX$gQ{*M{#e!v)b8<4JC~QMo zr7C+*cg?1f)heFW%c)34NMoaP-tl3K)A@uziKcp$F$Up`79I5F3$OXZJEYrVk2}m! zx^nYbmhDYMxEp=n^}9pfvO($)MG z8Z87`Rm|seilU&YYVdAAets&lFWKSyq(DSA=`axyX)Nd`l0h2LOC!=)A`^iTn!H%@ zYX2FF?1(I%vAes??c3M*@S|JYym^gBkDl`S^=szyIYt%a*_^V-=?_MvNs5%x8Yskg zh2*E7hxXrRF}toYVzH3AX87h!YtQ{yBpfFD7v?eaH0Tewb@LYY?%o67%P+s+fBw&Z z=F2a?VK!Y*l?s`}Sfk7)nH71}^62NHZ05)-o5H2)*U4``5Ax~1{*4<9m4;h(OWx|UD9mN5pcl&wM&#Z1N% zHaEA3qJ)3{;tT%A|M+LV`Q}>|i=}g3IOh{hw7$9E1(S(>(bP1#^AMST{nvlv^Ur@r zuV+h$=VfX28+>F`qEM$_@(i-?Fg*8SPQkS!7t+c|nzhk|Rh^DFppJZ86n5pEWVUC- z#hqPiZ*L4nTcf{@cZYi*rn#ub1utiDN~kl8f^o$(=sAQ|)-jYr(Lag)neuMj5*hlht89K7b> z@Q~?zfvOr_96mREzq$K)lAtOz`>zj~O{YA0@`Ni_F7d&~_t@B+P?S3W6j|vI0zMQS zI5MRxF{hO49;q~WkyBO`!ny0#HCg+mQ0+}QckO86M3BakK_clV7IHNnrnVy1pvQR7 zV>swD8V(qZh75W=`e{s>#7JSNbk5=HC-jFywzn?xPyg{VX0rv4pFZK$>(|7oq%1SC zJi}m!;`qElUEl$I`2YJU5bNL?p{0Iar`C{h#&PWQ)^{fpq{$JGzf`*eFPJwsx7gg= zg{L8=o8-M=OzwqGx6A%fA7#YVl4+_93x^*w;KsyyqV41)Wu>vOoX%O#--$GxA ze_0p@*!?1~P^-mqK~ZEJ&yM-sCm-{lKL3nSzt6$p0fjM$G$HP#wyIqn(dg_GzqSzP z8X;}tBQQX8v;RUx#9mqlKcSN1qne^^bkxTL1ti)-_`3me4t0%?w`CfBPZ?ve?ALGH z;NIPPj0Xe0{`xEaumAqvx&Pg_Ea!8V-03?P+qFTvVm7i~dl!@0hVvuHKX)ptlKuVH z?CQjk>}25hC3xUET<}NaRCnb7AAd-%Hd*H{Np8j!%JU zDiQ}#5InD;eSKqWS$Bl(+cDYP;O5O6Y;SK9ClR<#;AL4+%$6(N-pC0%?kTa55*OB+o)OjH9jAGN|s&xMI zmgKYpY+(lvtN$@%eK@D<=2r;8!-tRg^2;yz`kVW_etl?-4Ev`hbybw!k1m1rWqdGHXUD*ovY|AE`@zfW&4!;zEGYySAZ|HyaWJz&0A+H$5c#u(ws zLE9HCWgIA57kr#w&AXKK8c%yE&z?VHJhTbU%OXSNMSY})*lN)j`4^p++A-s=#d=uZ zMM;E1^tvA0qH- zhYO}pQ6qW2_y7pg$o1kw%=@~ z{=eT~p%pI9jec~kbzA75Ve%_gC|2;d>lE4W9C2drV;#%Z;n=I=Eg!|phwi!Aw$%QhKS zx5p@Om57AxxZ%K{9OU+7+v3sI%n>d}tH(b)N zoG+Qprfh6XT%Q$Nc*gcxu+JQ>56?4gzD+>q)yy_wKP)>$P0$eXBGYTu7#QFukFp+V^ zYH%fYjdeUaNjvg|Kqj^Lu+=yu};vvG$q|-Zrf8(6%#s(`fUzO2O0g_# z?~kTTU3aIv31jjzzHU-}LOSF2f@VWlB@-}$ei}KLRYETj=wiWgULkcxl}A+Bf-;*k z==F(2M6y^iUu5)qwuwucru6#*;$A|fDv~rsCNcei#7IG;jqNl!n6SMUQKWLZg7*)Vu5Fa4N81$52ryD)(^n|ikum@~;$!BfIk(~`zkpc7J3 zREndM1z&&rkoj^+sWn^Mn~X*ylvYfqQ}Uw37;B(5eapnks`8PHRzh@V&O5$?H5*FU zrg^4a1j6Ts__eSCy>i`&b?cb$KHeZceV=w$X4eCZ_MM1Ndl-9Q$UVFOKlPr*xJZ*f z-xF`n_R*7MJw%l_@YTmn|z z^zpKak*3H?7K^xyaaBDw|ku1h>v~anLcGhs{b6mV$W%8;GW$$=b0SYGG=;f$aF1buO3I45Hh@sRcV&|1_+w0Bh4FAw*m}k0%Y6!RDh8w!4I-?S|4`fCOZ%NSc7Cd(-+MWy_^_6oBTb^$L6TL_&0rhk5GVJ$iN zshzCw2gr3K$iw-uyfwpg zUFZGpCKAy^(!4TbPjDVn-SBnW!8^R>8b?@{Is=t73Q}!%g%9j?8(JTAy8bpv!SxX> zUD?;%3pPc=W`=p~2=C=?)~)@f5F+G#5SJ`PBM*%_-kc-v5{+XtD5? z5!Tss&Ujremt@(%c2AFE+tBVEl3(kLkJAhgAH50BJ&(N1s@LNW{<6J-?34_#YTz26 zQi?bNA#CaM&5cd2UBAketCtuK`}L|8!Ws;|Rt?+6Uh16adJU}c;! zXjLHu40?UGwzv53;}5|YzWwf7o;`ic$%)K^#A~^CH&Y*&grMrq8TKVUQ*G8s>pY-}+YPT1PLM3fF0jmC_I0}hXm$nw%gtyFa(Z;Yw` zX_V0}0xR?!S{0_jaGti%&r3Xim#M*@QX2^?UR+0ZJ`)3l*%bfQdm#D!1oXkPWfCNyy@)qi_rJ+dH$rd>LWUnj4|X{MxN)S>6kc< ztx|i+dhf!Q|JJnPEa-m!0}RHu8}N=Yf91aoS-#U4P)!6yYYpBIl7jJg#Et9M*xlWx z*H3{GFvM}>9JWzyz-ye-#XHO#EJ!#5gj#kAHw3#|47S?qV32V2$|Y{yy2YJa*ZKD^ z{vD$Wc6T<}+uop`rfhC*v9q(y_VyN|(Eur-D03F`DJRD#yncPa>w`lU%aWqfXxAfJ zYgI#$UH&~<*l4gd9K3989K2x5^9s>)N*6BT;iDj7PLkk!7X5?0!EQwD0?ffj--U?K zy5eB}h$K#I!`1q^?LV<`mWows6;ebsb}g)a^|nmuT}yED6Y2$obUimDwi|Mq#Prgb zekvIb5=O(6@vz5Wn9%RpPMuLK5K)X2DaJsdVKJ}Bvz%piz+#!RST4zmoGi~MT>~3c zRg}th@YKdB8^UfWAq7z!(@Rr&{T{>Nh~aQdzdvAOJmJ#r4)48po2}^<`v=Dy92`;2 zW|X?D>u}e?j<#WCf@gQEz`-?}%0@=GBrENj7#Y`lr1rx2D>p{TnfWC4Hs`L`({nHC z1I}H0=dCf%*hx>ILs{wp*sX=S18HRacC8#Q+hV zXhrSls1=lMN!iZYr-{sZQn~@7+D8dg^UhXk-I&HM=_cWeU{4z#Q8L!TQX<$_-g)5+ zAt_yd1ku2+!t40=W#JXpXzca+Y;JF{cli=Up?Lb_G5fDyp{jx?GDNr%u|iWx$6I`k zk6W8oBMAh1_^4~1?Uze|VeLN=*5mAFCR=6{S%%!05XUi56a%)LwsAQj-k3HRyLY-W ziu1kC-pN-(gq#Q4+NUy{%FMR?lzzN=40N!ch5SDs=Ivl+mG0oj2QI#4ElXW z!x4ji>iS?PvgMq^qkUez1gaU?4Cblr|D;|25F<2|sfdI`3tN~)IR#AP^$c%7nC68V zyC$TR_FZeQzc~j7e(Kx$o%so(AT>YqB-R;2Ho|UoTS-||oE)EEv|%t9BAq;vLJ(I< zlNT=fp$t;QXaSKBHDmrov8-Btm^u_Gg+#hgU=+v1NlctXq-oEV!WGgg<47uPD077o znzB^nMa5#7Gn+4%&vO=wg83pN%k#Q%kPf_7M&N2ldk>@wt2ytP59RyvcySc7v$e^s z8&|n?<0|9Hm}}Rsv$?gy^B2!~_3Bj}My?fMAsTGCyS|5Jv;|2#g-LiXQ}0u2 z1awRCYN&HPl+r{|0H^RP)_VjUn-RhaHlZ7(CM2IwE`+Qj46Pgx58H5xOKnF4ij-}N z8eB6Ju(&|A{y8m35;}Ieie3Zr zXrjgIyuG#8*Vt$9eG;NbG9Hh)aq}iuuU+He!^a%#zam@Ah@?f68>5M10kNd21Z8E7 zVJ&P9kBy?#57L>xU*kSr_agmTHspE6VzHnoN?RBvwmr6VRrv*1%TD3;&YN}z@q1SW zQNU1^SS9@&#~XnWQm?C3=g>vuhgVqF=5L7bKmI$@{OkFVKUbVClZ~6ZM)h#A5;v#* zMAIZL3j;|UGw7$Jy%;GKy2_ELhKCqH2%;zgw{N1@8kIii==pQq?9Llro`)Ds94B14 za*0oW`x}1u`Dfg?ew8SeeD>)_4t+8pjw0)1iX^Hk$QCoQ#gti8qKcS&oRDw!L2n?X zW^}S-IxSe{70bLp80TQdpo~Tts064GdSxt3$wKIxhR;$;gb^r|ZF;P=OK8_ClP-`o zAE}MopMt!h8jwI@w5Sbj11x5lEihmNy|U&0T(!XZbP7 z??&P2Dga)9|00B2=v5GBdk1UJrUdDd#hPTZX6~d@&@8h& z<9e<6u0LHPbUH&8YjSbi-#+KSe`b`zXj3Q2_tG9h*e<47mfHl(+F12tt);}NWB+xL zorfqE2%(80m`n!j?QGLeBg!ggK0V>+;DCeI`z#hqrn4#U-MhzRYXgDcSR0mkh7iuc z_7%%$GjHJ)Wkc=V?!4*7JJ~W!1CM`Te}4gPx66D~HTkpV4T<^=ntz-rC~-`M>{}lcPh@ z2(Dh*VryfA;jm8>Nvg_lbdvFUf6DR6oT4ZxtJPvLzX-nk)^wKffi)D3X0e!aa&pY2 z%U2lmBNhvV679Nf!nxK>cpVe$w>6Hd@X%j2aQ!^820*Q67{}=}E~l{#{}LFCsnEK> z=n9O&7=_Wcw7GDVC5<*zWmV^boGzoBY}K~9Yb+&cl5pwL4xfGcTmJBm|G?e%-eY4j zWHKH$JV;x4DZt*}D5TV+iDWz(BI8{~;~rP8>@lBb937r;d^}?@t(Y$|PUdr_(>W)z z8F^ljmzB*8(T2i+(zb_)aR?Yy+5RV5Ls2QTGfX2Htr#Ij=#E3D(O3SKdww;7de*(X z9O^{6jiIVESym9oOQI-7rV$uJoM#2gWkFdgo0AY+sCmD?ET-lswef`cGIj!O6qSSy^m zC!U8C=75}r)o9O8@Z4u$%Dtz_8X)VVM4`fEj331JgFe~HSuS6}XdKS34%4SeN|L55 zml@OPjH>iqNnurRub+=!C5b?o3WHWTWwB&3zRG)dZn3#JVX>I;?8#GJzIefOI%EI! zOY$=3-UsiqwY|-Dv`I9VoXqC6M{i}XLv4I~@2PjN#kB@3R%vgM!F57*UdeX4LcN}a zl(m6kYS>nL_~CudQ^I`L;wD(q;YaoUXs*BFB2n)W;? zry$ghmQ%KehqtDR^QjulH@&^x165bdfR5bLK@wmREJE zLseM_Z=PGHwGbtlOV0L%0{rvZVyvRg>x@wNBN2hFS3|dPf)s$2%q#!Ts1}G}qxTsPU zNl)hev2gI~2h?E9n`U;>jV(?R z;v{DOaG&FoDaMuK6&`YBI{YoT?OH{$bNt_q1^h_7r>oaCS|$RGF_lXOE||}bc=+IJ zgkJFRCm(S4_Dw$h_#Q{c$9(tQeI7l2%p%J;ICw#vz~<&A{r)DpDp@QuYSfIOjR!Z) zo&AtnPv;a~DXf5W+HVaN5dx%W)|Ny#_@`BXy~jRrUD(5<#-@Us{h;-3g*U8iw8RS) z84i8L*aDxyld1k>BtlD~EUW9oC7qa5p(;Oqwtl z^cfEaY;25(W5dD0F^fe;St)142FCAc6B7F7)=4`j_d{pEiqkK&cuadA86P>gAa!?K z*Y0V0;&geA`%E;B(hwQ2I=1!tLt}6I$dSLE!IzkAWSzhmg*Hx!b`_?SDk-ZBX*AkY zXkEET2&8Gka5SLMf;cN0*r|_DI{qnzL_~(P*Jo#EhnqL9^U22_@tfa#%+}ThX_`gj);$@cE3L8?|L>bdu;5-9l zUD&)$p_g?%{S--YJ}KE#6Acg`B*tp}0F=s5DqDdhufoQR(pF>8&aB55(G)E&ioJnX zD^_6$o=N#!41sTEWtEc}kLo_dd1X$HPFPMCl*@vTKlz01?QM3q_t1rAKASqP-Wv85 z++J(lG)JmEX>R=5GnHn6`(!xpz3V1~u2WRIkGldNI@k066ReLl@2MLN@#CtK{e@MA ze6bs2aIh77j1V%A^}k(7^Eqa(TLHo&&FiTfBDboD zt-Yr@iSONY>~ss|N~}gAs`U@+ac}q1>Oyee6m1aR-WopjKNtzJ?r0p2+uz^v^rnZX zmqpg0B^_*C%Jv$wo_OQ*3E|EYwb7!RtDTz`gMUj(bOWW*FfHoCMXOz{F zB41LLMeQ8128(m_30wI}N|4g(VM(0O@Auf*-Q~`EcX{ z2|dzY0l(toeOW zvig)ex3U;_8QC%K?(Q(&*x<#>7tE$Js;aVZL+v^N%Qc>ozf(=ZNcw{TlX0Kz%`v-M zV=nDZ*x4GBCMkKoWdGnL!{LzOaKz^3b#C3fN|xokeD#v)$(*vNh|`o?*Dv$(PhuL!h0=T~-=VRn`cRE=NQ9M0LGZf$>>lwK5zb`KN@?4ds>rFF+)CmERh15wRZ*#mq9|D`m*lx?mR*%~Zlle)gq}=5#EgbRcK3F< zdHWWZuUtXMi2Dy8FgrP6v6xeqIm;}kC@oT?Qa05)iUor~pMILs@1>+k%y>LvV=`ef z8Zwy-84MDlNMKA!Bq5Q4Dlb?p7fh#9=JO>7hbJ7LEIB#OI67H!vMiV_D`v}_WnQwZ z6tiVUQ7Otwp$r%;>Y_#A+||{tyuBVcqrmDQs`jm%QfS~EA)}4$VqX*#ML`_L#C6k? zUzIv}G18{#E^M5#7M76TpcQxT-D78ao2nd97CBYWcVNZPT~T-)1FUmA)(hibDtSdBRB&3=vLyJ3 zgEkEGf#0Vlx^ePJ$+8?eNC-s*{50MQ(^z` z)$6U63+^5uFe0MKlQ7U}^w#zGukyTZKaw^{pEnakP6NB6j*uFx(J~&5xpngzpMClX zpZ@kYT)A|avMTwPKmH4U{o+gh^yfeG?D12|B4cM~gAYFXknQa)p1pX*$9N>vt4?9m18 z3fkCWLB15PEOlL`R|-kmA21$mu(P+r?%po_{(xsspY!zjGoC(u%HjS2i^US9D$1gy z)D=;bSQLvfM6sloBqT{nn#Lq)!q(O%mv(pA+S+7$dxOo*5nCH$2E&x$Xh1)b#JWUR zl4S|Atm5Eg&g)l4wq3&EoY}IXs04XwC@M{+6nUwrjCC*>{5^>f?gUi9`xAS9MSKk2(4Hy7nFHPu`H;RVkRn^5AymYFMA22e#&UjW6@8TIVP&S za6e@jKI);2wsrMQySDzi<2mE|ca<(Y+EWe`SZ~CIgu@Ef*ZuCC_hug5i&0rtBuRvf z4A*a5=H{*I>>nI)e0<^a zOBfDPCgTB<$%vh;341$ROpmANvgGQOOI*3M!}V+bh3C&+P-)4=ctny!RH|T}l_&&S z2#jf}m|-kLPU!~fXxE0UBO8EgQ0IlYs*RYlibaG=uI-3$4i+1lBP_|hvTnQUyY;h%U8TU_=dwnTVY2CNh~F$Iv;lRgHsbM`x%jRJkOvsaSGaxqJhVnG~B27?|~ zuUz8BjqBXJagD9`sAG@c3gq+X+& zW^_>$za?|1n>DmQ6WVTCVKL_YSkjLL<6+9RE4$pheuXQ0J50s{Vrj_poLHpfS;@0! z&w2Rx8PA@-WV%@L{OJQKwdC`E_z&E?ah1P*{k83uE@S6n_2FI7l3CsW(Z(g>ub-hB z>)Bk#RcQ#I2qASb+J7six{3|%VpM}>Ob(_}pCfhsZUNiu$c1)WWGpsT)|G(7ic%6q z(s^<`KHRMt?U(_2krGx89?AZPitsblV&2F~hBQ--2I>4g$WJv(}$2rr{P= zhsUfKG0vbuP_N_gEqL~EX39cW(P>#E7U!;2CzmA1@sqUkWQ=hQ1^wQn@l1P?QV_?U zA8LGAT?k4VhtX3Z5XO+kF*k2q;OjsNz4{x_aJe#pW8 zE40?!xP6;{{HOoKM<0E}a+&k({YN}``ij|X>GBQ@N|yxw;WH-bhox@yskO(PV-sCW zf$v1_x;*(J5`!(b&kUYp|;RGuhtrkuD$*BFrCA|N71~atM&XH+2A3x%|*_>H+TcwMkzF|s6=>d z%{xRb7k!j6?!_2w`wz9waZP)h{Hw+w386&wxQ)&E64K_lslMiERL5%jp-T%>iEI3l66b`S#&+2E87=G~(K&O>SJj z%#}-*xp(^xcR%=)E7$Mv?D-QOJb1)|2hZ5wpV{Y)BXm(hWGJ;pD}zS4JsrGjmrG-H zEE-vj(+Pseo{xff!!Mlc);2TVm=3;NaftqKd?bALtubMnLsnHml%&1?qYnGOu~mPO z4R}sc+>hje?T%vIeOu9v)))0jXWpZej<)_7A!s~{+9p56w((&vjo2Lb+1na(Wp9g{ zS1)mOZkm-n`~bL%=EfA~Ik?%ZNyV~CVCsZ&Z}6ORWymJTm$ z2t5_u`uk19NBeHDt>7ok-)k7lN)E&tI!(FldQ#WT*Tv@atiRjiY>(IAD+ocD*Aax< z-T9Gl;a(|`k*pg{c_S=#21pYYZVUw9g}S+G*Ff|0*)^Wtd{1gqAtU(s<9mGg{`*vA z$#?f3@XfamIXs%8DyVlveff9Q{qWSyP!tMV)_g&> zT-YSbszm7=T@?%lJ;vh!{a(Uw&}U;ZVLTb3Dn(f;w=by;jH)Ur%95(8P)b`ow5uTE zog>;uI%w8Yn~yd56Xcn7nF17eSe{mT;P~8$PcxN-X@M0NN|b5ShUs(kgFU>$t6s6$vut37WK0%?2zcR`506G01X zQ&ma7pRl{T#m?3SaU>{;j67RXESD5{PLY>PUAfz3ULa)5?(Su7+`NsDG5_m-|B1hT z`4!KeKjrB7h&;@?qQx5ydWkbY zMqJw2V0UwaG>Ry*1-U7R`w2-Tp)3(nllBJ;CMg>m*SUOoi`NHtdG_ixhtoM}KjP}v zC9*O@?;o+uJv_rH#4DaZaUmx7HuQ7wte++jz40Pz<3qUO0J5cw*#EoeNn~?EglH>* zKjyPcHb#8>!3PWmJsv!G$b(0ZI6gUX(LDJsL}K4rvffeq|7KbE(7jDKMnHw_8KD$V zIzeqpjDa}TOlM05{V8c8Kvne87-33AlL?bi%4mFz?VVkcq)+eB3tsJ?a5BrRaEv2N zWol!?N6TbTB~9yYin=#Gg1@1Td}x>Ccsc~Dd4Auqw>hqTpOrfNt>}fTe%R4u+t=_t zHcs0ct}OhyAgm42E*f z^WXiB?d=`k6Pd|q#GP9=*%%MFckc${@tDbEOq}$2{&Jr$zWA0ezPQiJ*GJ@~Mqv~8 zeQrWnXaAQ@7wP@5a!%*(#K!Ak93O!ksA~J-Bi?6#Yfy0u{GN!YXJj<9)eb%#5m*6xWfuKSrD!-3As`3VS7!nTQEJ`_|2wE*kub0s8wdi$QbXqYEZ&4MtKxm1yEoenpT~HP&t)fHJZqw=x z*jVqdzS(DIZl0&>|xUY#Qr7(m5rU2uQi$nIy|4K|lb!1Jb&I%rrr`rQ^Ez5gD!Zf$XPe9E`q ze8<7TOD2;Ur8YDxL;p&zN6rZh0_C92o9`T{HDSYyNRk*!L}>)mwBY=Fj43s(IAJ!O zva!9zV11MRdWY?;Eu`$wYHctWoN#z@#_9Pvlj)RF7YITjuAq1ed7c=ddD zfLyAsF62D+WHH{adWOJPSP$O|5sEifTB~1+QV0h99(%WTc=YHIpMU;{t<5!rEx}p( z-8OD}6Qvxtb~mvmCy5hI&xic*_!QkMRogsmG%7E`^ob8 zvYPjz?lz$${r-UW-q~Y&bB#f-OTUwF|Mnh5o?%}c;U+2C8t?U_IWMcfuh;(z3W&W~ z%*XOMqJnC^QJV9#xi{oZ-oAD5iK8}g zw@(~*IXpS%n{R&L<>4t=n$hXB*xuS=Z*P~qy$9Z8o+y5v_#e?3D3j#&k9#%MA#N)|HpK>Kozr_R1ezdzwPr$0CbHN65u=%AuO!~)_O7P{e(d;CW*o5j65yT!l8u6j7~E_X*AZ9v>a$BaYcbjQ<8R@ zZm-AYT8nOXkI{IBv4XAME~3!nS;otg3oHt4=THaHc=O7&J{O#u@9t_Pt(w%tk%3dJmz5kn4`lp#uHzEV%Zv8bAZN6nrqf}_3XDxzTf_SdQJWkHeLvQFXk|W zN^o(a+8q3t7;7nyP!?->!uc?^?tNFgx@7PCD0_pjzy5=WF}8D7&{@FgCT z>*t`YE&A3~8Ca^6xK*r@pSQlw%{;Gvm9#@hzt*aG?r11X4fsdpjjLj^>Hzp^7+jcV zje9|*{5Ygg2pN>Jx+W7S75RdM1gmi~i5Q{oLRi--Qoi;;lZ1b6l?72Lg;I0;y|q5k zgYO#!0!kOK+CUO3;y7Y`(C7BvE+2mIkk3AS#J#(_Bm%Gntwdq8rPEPp(*;|S%`(nU z&-w1#$Nbly|Hfaxe#~&3Q zCH(z7r@6QJ@C8W{M{KSQc=yg8o9jLLgAToZk5=nLtWFWGV3Ot-2O^5m)?&2wEv%Cq zTviBVFk*>S-oR=tt$5dp_sx}o%SuWWgL(Oyx#7!01Xb8a+=|rkOXKKj{W%yaoX=t- zz?rbVm&e6cGDO7(npxwT$AT(2$U2|x)8b_zqb<`(%4Cv}r-oQb57Vk(U{VmrZJfyI zZ|u_PuJiQy0gr!p#^_>9w-a+`cboh7?(*Qhce!)t4m&&B^!t6HBt{6l2N*+MWMo;+ zY&vB$ny|nBk|)nza6TM!dNyV_nlZ~u(!3-ubq$YlwYQPL=Y02ARGkIS2H7Dk47o0u zq!*kIbNYibHa0f7b!(Syf50s%>9nVu4bKtd83;pBXtV}vJWtcQVH^qx6c!Dh%U%$! z@q(-v@7JWp);Pc8b>i5P5l09GQelkvDLf$8z3@NAaOOGWhKyGc z?IfbzQbdseXDGD=r-+mwk`U<#i$Pn95&}v~X?!4-b(U5up~$*)x*fW`9(&sZtP^CJ z#l5r3WH#nvJZ3n}KqMfd6*EUvyZNPlsr7-^=%*jkry>}pYKALbouO#uJyzVa`D@dA zQCk{525dgrGDq%d{6#TL~`?FX*%rCK(JThGz#OvcfRD$T%NOka5J;t!*|pH|TT{ zj*gBvJw3x(N2C-E>8q$W=1V0k3nzU#S>c6xTg8b4i<&1d08}uP%BWz%oQ$FfDIzL- zv1lflaJIJ2`CR^sioC15a8wpTg%b{ILhyVr3P|OPhrwLI5h0DHZ|-86b0KBDPAdh+ zx4jyZpaO=qHDAB7mc}my>Nd~CDk0y+UV-EIT@cs8s`R2PPJlj9ye3VgI z<&~!DRTt=LXxZDPH?<~TQ#N>`tiaC-YaK>g@-$^UykNT4VWS@*oTAef?C#vhl?7}4 zr#wA4=5#b8FAE>L8PAPn&bdq5X;au=gfq_fgIDWvf&V#hE6!Plwm4kl5?Aww<~vh+ z1aBsJ)iQUptt&39)|0E0mSw+ekX-BiUHBBoUTBBWWXAr>LvB6aWNTwU9IX=xjV=mG z?Xw05sfd#{>+8Gx?VHDZ_0=~_Cu45y?(ptA_xSB+pK|Z+ZML>H=ycjdQRLI(I~!yt zgR=&01lGpryiK{g&h1<4y#L-^rfJF1$pz2%k9hv_n8V|9&W9tWX@=Hr4%cjW(B^e6 zC9X=6-~bfHIkLi1lo=P(oRhN&FOSaI-ri*A);7J~02L=7Pl;wJlUa&OJyceNSW?$0 zbWKApfz@6R5ujqLl#qGb|Bi*UVoAO}2U4ELT1Pa${2!wT{Hu|WB4kVQl{-6~Hm#%u z2#TU46M#_^!l8ul3QDBFNxHIyh!sg35ydhrT|*=k5=)UzusS2pW+d%4olcil++t%r z;r`uiMw1Eo^CK@5si?M&g7AuTut?0U?93pF4(s!Rm3Xe8 zG!&4g2dTD1&x3ud%F3?{D5Tn8T)m-$mQn>UieQCN5|UW*-n)1C@^_!JcWa%qi!*+B z{ETnD`GLd36K1o#4ha4+O5MLYRq}&aSP)?xun6gB#Sw#kpN-882K^2~3DV4`e^has zmTEJc<~-Sd#$+}lO-m-zoXM=9(2lsBu(`Ft{rh*g_s(rLcW&W?V*kYf&KaU8f+grv z<)Lt)5TRra&5{s27FCT9hm+nS>YUHO;;c`-U0K)Wzs+kC2`Nxc;jG3vi!p{OTh1zK zD)N>^g+fX1F|c)dVQ(FFwdYaHrL7<$9{29y;;Na+t7Y9dt_uUVC{q+yLJUS)K zeeknG`V`HTl2tvn(M>-aMg3<;%Su60Zx#4hJ{3ufu?#Op9K4j^a?)&oaRve0zw5Ku z?L2&kqe+I*Udf!RZ>~}FJMgdS-NJVcL1~Qtlo?r3GEGxX&WH569eTYUgMOcO zyY2aqo#KqoY$(-S!4-k;3n_!QdM=yHy&!XA%>4I7@54Wu=I!yv5wB#fztWqe3Q{A2 zVj)#<;X8@ZjskCUzZwj7d7Lz#34s~r_&~BwGb-uX`AB?xv?CcUhwi{$aFem zcrju;o|0vnQrnOgTOlLK+4%)$XXlJ2V?O!hLw0s{xwsfJnas%Y44lU$TNA401{k1E z!;4SrXl$4_wh@5t=c9BEB~_>|;)3!K-ppByss385F`>Zb+|yxu%^UA(Y*0RTZ1tMV>MdmXq{0OIaX_=h!9eN1zj2xhCwgl_Rbo%+zqfT0h}=& zXXvWfYh@U`w%aSHyDwN`EQ}{9H&$<4-H@M3+zrNa5CW19_ zo(?W#QW8E*pA1-1V{O2YYmBxaBxRv7))%nR)_@R1Dr9#EmQN9UwL%J##8>l-lozPu zIQAGyCm0SdD2sq$mQlzsQRUsQydRBVG%o)If9?WgsbVUPbC-iTS9Z$6p{N=5%Ig}| z&?XF|c`TNWf#7kPz(`=PmH3Mmy*2U4nsKnnClU zNJ%=KHg|6C^4s5h&j0xD|G}q^K4N37kHVnKj69!F<} z$9(hMQ=aV~aXg%ol`i1Oy${RP`Le`{=PxIdxPG4BWF_1v%;HsiOo$ut4{AT1So*v< zTJmgV5bRD32v)tl&g}@bce9@RVzvBK zx1oxKbkzH3)t*Xuo1|bwtoS@XYZ7ij+)veJrMv|C;BtYADEg(}MPMKOn8T~6=0ik20S3c9o?a4$2-{Ot-+ccAfBx$?{MY~fEBgmWOlCR8_}t|++)gYDE?heP zRRLMPL%2uCUgw+!3{foU_S$T2t+BB&AW9;%G2}(bWRh}rG2--m#Km~VG%d*rOQ|i! z`3=g$o*XKIey_#GdYAP=A7*loC`j;UB7kT(TP8$2J)`RJW{t`^uAGH(Sw)Xb_^E7Z2SM$|i}a|BolOT{@I z&e<#1%qr!i`Ta@-Xlc!@RzT)kdi6dPU#t41N@&q(x43uzF8}_A-}CQ({5L-Q@By3a zeS|B?vnhEtMVA@2%+YyDo=%xgrktOh@$AI`Po5ocaCGircus(byi)3*uJcj-U6{tp z0{hKY%RPj?Q@AT$4%c^&&__qp1K&2W6q>$w>jwGE{Ot%$7nPYWM zq#WH&%+A&Zw|2JJ*jOh?5-(T^QSZf36amshWGk-i++28F^~RO*(exxPeOB$CudRSI zt|6@B`iF4sf&wB!EDM54LRhXOD01?&Aj@*5vy?2&DT=a=&(y|JlqGp#Fxq(UvUO-<5kfWku`v$52WYO(>fB1< z7Q)w+@$5YFWrCs`_(_q zPg(6SST%otO~d;WQ&lqp2hMm=zqE5Kwx8St3#Uc`MQMqYptKI{1hEq28fIBew-d46 zYte2gTCpUO5^V~~+<#h0Lc7zW+lz^#n4QgaKK=M1X_oWi2vANOXz4z|(>7$R?-Md9P zOL_5PpRd3EmjC_dZ+NkP!gyL>tY><2X4uRq)UciWG}^>3kXA`BzDJW*!eG#4eWTC% z`ha%3jnt+vmSSzt;+=cD+_|&M+FF-xH=)yM zvANkr3TVfmFcc=#wPHz@^i9I>;r&1auJ;tzlPJ@4GR zO|R1ejE7z60$t`{4Z>+`z@HacN|xp1c}bCLK+$O@*rKEm2CShpJ`IkZnfuu?nWi;EiLd?7q#DPu>OCFoQVOSreU$==>BckbThyQc>{etN+E z{vpGQF-2)83X3s8kgWD|UT+WhrI6#Y0ibcv4hrZec!>vf2wz}kW$FMJIVZ9|b&f(N*uEDG{0B`

ayVzO z&QKHuX_}L!8s`MM^u=JU1*r-U3T?b{)$6v{SRYW99ww&sl+rYnTd}C0UKQ8-05vTa)bvBn4Ri#g1yQiPg{YUOdN6yO`Lsq2)GH%PxK zAU`1q;T4d|qM?oRl{tjZA8nkYG~SJCt@hav9F&nkO2|q>uAvo2OtX?%8nf1KVU45P zj%X*6Rw5D3U~J*5GOLny>d=Lx-4(R@3AcAP`0}@(0$=g?>2WAf7PNN}Cb?)>-fA)D zUx%txqKmH8%EM9pV^PGKTTDaz#wBa+^)B^B3zqH;U`Z0i*5(=yAH2h-k3Qn=o!iW2 zDF-k1`RcD<^X)f}dG`E(@gzkX5K`5G-a^>NuXIDI#uWtWx7JwS7|=>$ zvNUBl9C0z4Fdk36<;)74lORG~Gg*y;7fL%x%$>b8-hXhHM;|}nopcmvBk_T&!Nkn1-Ep( ze7^r`ZcS}wx$?Vj<#D)r!d_B9mW1@><5hIoZ|&PALYT2G&^-K!K57nqT7z8;!IfRI#$kZoR2 znu1w2%A1Er6@}BqVNz@t;vgwLKg_( zFjgZZF+15ka za1pv{$B^?cnm<;*N$FEN}Hy&K0fcz-)(Q8qY8NA7MEHCi40lE576q-@NrH!j{bAxe~QkQ=CYAmv& z)D|HeGPle!N4K32D?y=sMO^26&{8{*bXtli3h*E2ATLOBOO{*uxnT`K-0JY){vBr1 zjIyx2JRUR2bbw?91q5}u>6JWC_s(T&KJ%{sw&f8mS zJo@M%pMUlVcW&>Jr5OhY2mI~pZ}`hs-|+OwKI2hJS>grXrJ{qb@&Bc#zw+=ONB`)J z+FRm%R#$_Pk~mJ-+1X-ibAw*LLljBIqX|bx$DE&E)No1b1VTn2BoZ&!c$kJo;D{A$ zZ}j-+{kwen=mGD)cbnav4K~&X^txSC6yt;%8K-`fP1)veECbxv@oWuujrjLq%<7X~}q! zk4C?yX%U5ySHdM<-_#WfnwC$K?W&jpp1yKDl&8nCeSY{>@SltgGGVrh3!W zM)byBfr~Qz9jBt%OKQoORyd zI?I_%Q_{4cC^cGZ%Cf*>Q3@|#NkLv@jK?$bLXu`B(^*Q9=R{E);$>UJaYA7YX;EUW zC5{x`cALCgQfTcV@=MS-7X;e*nAB1~myEgfVyv#0wSiC6)5` ze+McBL?A=;Vu2Uvsw&h%R0Y|>xw2vkA~1zee63+^2wvvK+zu^ptJigPjt%`LTotF{ zJEN7vY;Oh$TH{x*v&$Zt z<-%F3uX;7Tnf3FJ5oAP@X$pssezWYfTdc3Iv%S4Tx6`F8at`*7I6CsFj(vL-zW>9_ z3x)=jBv_jxoI@(X`k>3b+dJId+houdxO_}GwiL1?b_GeRL(=ZB(NFmDw;yru-Y)l^ z@ALTSJ}>rAs8mRaGUp{(VW;5Qwm?HyUy+RSHK?J|zAwqJlm6tb=8q z^5WSOIxR^O!*o32?BtZKjSZg~JCgLfG2KqWpx36|N=S3fWRf!;XQY|NIDyfIJWDZU z0mf&x^OkslaRP0@*x=#9kHuBUwCncLYY=TWI>(9YgUsa{C>Z!AXw|z^dx6ykAq5%` z6iA`5NsASTU?B%oQ=i^6}=HHVR_9}>(&zXrMIAkI2>5Fkl5v>RG;7EC-Z#f;jcIF z`0yu*Qi?c9xU;*>?>>LT?>_&Gdw2Fw67n>o$TRXhr7SaAEr~UnNcfny$#}}e#f;%- z#xyH~qF;C~MkI-1g%dstWtL~8X^x}C@O;W>G(u~QP+sv#k~S)eFvgM>nnD}OP>`|J zN$8mt<7rA+Xq1d`E+8J93t2Notsr_ER#VKsJWy3&dgUp%-i=+2fIqI|Xo3Mi;_E+! zf|bHn!hKT~_Cmj_G}WNI)O*BN(mMW0Ay_|W zoTatP%G9#pw}{#H^Gtecs`WJ=3hgbirGmV$C>%yZS(ap(BvO#HB43P0dBGZOa4x4P zOD5Blq&s1-zQgX$CPF9*qbXhHJSPGj&Q^pSW zsj^6kkcc8_B`7H{T2tl)CXX;hLODyB8bhk4bb38n?G8zjaC^JY*7^s0`2IUQdHRz7 z=l}gLUhJQc=8n9COnc?S7sr#J1kzbwqo6S#>iCMA_P~;o0xRmstTGZTeW5C^s`E2@ z+4%7KeA}UfxA@hM5tlbdxx5yxm9VROg@!k(wM#J%CU#hkj}9sF8OG$CoSZPZm@??~ z=(G~n`W@ERy7apVopwYLM-+L<>FJP*iz%aNL7J6V>%G^y9TORdq$JV?ZJ;o=j_3Kg zcoqIx$PtRm1r$Q0d`0wDEB2Y}q(rC|#zkaKk&SX@Co{f#{DQ|%_IbX4#No*)7vm{; zS)fG3$>5yXtl<7T?;?~0DbThgQVIVme0T-g@)}XWhK!(=IEmTY-QnN9{DMFJ_6v5m z))CH-PNz&pW3t)IL&k+cpeRe9PB+hUio$YsF=qehoU`GC=_JKkM>|PK+A&ExCQc%7 zj-t$&Wd*u0USXMJKI4`YWQ8MXIU?l~ij-Oh0{~bgG7>}~bDEUGTTZO!u(q;jHfRy* z>N(@hVAzc=w80Q28nOv^#qiR-g5{BhF|<}hUSkga1hF9hlnMy^e3g(@Sy-07V1ZFN zul-h5cIO--H?$&!F;JF{+(}#{Fvg*^AdUrAf{HCtwQ$a&vyu#-HdKm?cs!#l9qXIh zY;AP;-ETfdNO*B@%rw^&8p_48gl_;F`17KYRln95-HPhCVs1?&{w;1F(t#(Ye)1}?+603-fZbb6n4jcU*&-c&x{>cG9Jlp4D zoFX;knvmMtdEH2Wqq~sCBYA6t+xmtyCePm;y9+)8F2UZE+2pLF?n9_x39lq|HUD*Nyec) zrrm0hB#M5&#rj&0wLyn|w~Z2#QfpKavmVDpX+~QoNEx#>Xfew&rqh%(%`qtQ(lE;k zvZACkHe@Sw*FvCvrK`zXdV>ImLkNqKf;f`INkr0W6UPaqf%8d5X-1^8LkbaG5#EW^iNsDAWP5o&@ESctLE3eeR=PnQUURqEtWPjd9|hb4L+Icn9528WZ|ny-1j6J=LzR6 zaK<>wvP8*zHOW>d0RN}lG7E~e}soN|0R zzGQIv_|xLeUdn#*Xt9@guA=j^m}Uz*7|I240!tDkQaxij3z0C zaRFN5an;VrTIjNsP!Q(u)rxXMkXU*tvBGfjDhH{;8a5!MU@++O`KO=ohkyH$om)G6^|x>M&;QIAk0z9b z!<3e@;g~p9blVAsty4DE`|NCO((SZ~lbAS;&{pH5quq(=_t%J{9Yz-u&d)D6AC1XN zjl)ApBs$EiUyTcIg1jN1G8p(|4=R#GQRE?0QHyDAne3l39Dc|7nK$yBonJ7`3Xrk) ze@C%LDF{KQ(_wpOlYXzyWIDu@8B$muYAxh^4Y`nsVhPUjOEKJFq zjLSiU%g?whrv5Yv$8tNk+>UD&YBM_lurkN`Jd2G_+bbn#t#R$x2e~SVlVGsqWdVhu zZ5+l$lvwVVN`gA%iI_(IHBP}xY*s!y)OB{R8SCZxwI@Ea4x-L9v9lmlr zZ$=ROSEGgA3KnGueJW#wYLn)cli`fAC^Zt4hTm+Ej$n}Rd!<~Gv4l_yZJHwr=2C#QE+7gVN=CF3#nwijFTePd&p!Qx zhY#MNEgkuEOrFgs@{}?!m`VCoFz*$CTT%h7*2;1&c``v33=&j zY)EM+NGYYFYaDT`yiB7V#jFT*nS3f zL{LJ4SB+)IYr{*6=bu%~2zD`R=ITMYVvIt6M^S{aW`W-=@U<)cU6E3O-`m$$gje&} zS6)Gt#r>bs1M<&K&Urj}S(cP>L=^jgW1|gu?kj+%xkej-G0;w8l1L$(^6Bb~Lt2dy zDdQ}oEDB~>Nw?K}6mb?X+X(WJ*Ed7Am_7L>fInzjs9!k-^`<`=Nr1noaqs^-km zTSt|W;B37xmkS8ql&<%HET88YhY}JMD@>Wui52(n?()HhUip~KGXD0}*L?HsW1c3EDOeNvnvFBo2oIXxd@9Yk?V%SPm7 zj&zXcIg_kpmRm-%oJm$NEi^@GvBs}sGc7P=SP_%N!KkV=MmwA}80Q1wQ4$$Nh$zD1 zlZRO6unwOv!V!=p4i{jpQp{n`Qp&o{(sksoBLtKCl8moZLaw`Ctq@wQl{mKuO#yTCK|fpTtpF#y9lZXy%gEk^kAP;+ zd)Z#p|CV7+MasiGBIS#Qg7ZZLt;HILF&1qcrLkyjy*IMdlyDP{P-NXeAN%?(Ff$ zfB%yIjppA!lXk(d9XACb! zq*)F~q=*nALa7#6q5bdUDQRAi8;fxPHb<2tylU@dUU;q7kvFi?Z*T+oM)=8_7m^wy zjh}BaRPOePUh=%;?DT^3;fV1lCoc@vN~{%Bfk0IpgAfkkEJ{EpiRiQ??bxv%ii&>v z=o3Ev;6vg#VwR_r#$cWGb$BX^SV;5&es0@dZWT#6e`cjfiTRBXLJ&t0gF%Ns{pk<<>5u=$yZ84nMas#` z7Yxr%D01&fOw*j>vooH)c*)`MDOpisoJ2$sMV6vXNl|FBLNiHAj?X5Xj%G}9O=)~w zM^(7hh4C}SmpjPw0tY^AdDZIz9|zPuwK1kPVlBhFxqdbH%$e7mMD6qo!?85RuR8gLbX6tv`SJYUzbYWFeD2Rf0bx-F z_j5vcjaNmANJY3H#HN{FFg9#<#(E|j#Xhr#ah6$@krg>vVaQ5DX#@_%EHAhijR06* zU#HjWhDxHjUu;$`ynq+sc|+FSVUjYvIQzAdAW{FZ3WpQ2czQZDW0{ z7Of3wR!|f^!w$j+oO@_TRoxz{sxij8;D!5D0r`9A(*6uc=?zxaILbnk=NfDMIy5Hm z?@%QjQ3;Ln>mw;>#fr8PNT;#7pdH8bT5UGh*1320Huvt|r9bGBXE{ZYW2~tQAo}<) z7ixc1z~!ZRD^@2S`1=%)#-9CJ3dnqIHsccJ=gO4|h!7R@O3?4M`Qr0W`R{-FBX{rI z!kL1@{b#&*{){5eNs<;$D2Ag6&khcGad61_XhNEmlzu^xWhrG*k{6m;YRF1BAI}(1 z3kvPPDZl=NM92ssr028NQWS=)D1D}~s{PW7|8YB^-D>%|Y-NeoUir|*p{?_=e-)xZ z%#+qs^R-Ttal<7r7jYJqkfyw`!B>6W4!yR3lH z5m>(a_B;Oi)z^Ig_!-alj~QOf$V)$t+8DI#p%X7p)P=SE zqxj6vgs$f?RWR|@Yc9ZUs?W;%F7m?p2JQQDwQgO-B>B10N_+}mDI;IlkgDH(zACL9 zS?;ZQX|5@?#W`On3#o|X4%#}3(xS@}9jgB7Li>upAsbAQW;j#OX)89?du(p3(Qe1& zSx%PDP{J!jvU(pkx4>0UkopzE{IJXMT}vMea~yl%V~)z2>;JP$@DjVcUR-k(iy)LW zl=zCzG(O@o!&n$L&rhpo+Q##3-CpVIF$W`pAXX7kB7zW#2x%s!Ss!d)*JTu(Pyw>P@ckEGC2 z;m5>S8w_?8FRd~^R(nUaJ2zz#k3i_Q6Z-8o?Ic1MCE09-bYO}CiKW|47z{exy}irb z+qZo>W?hn(Ii)lB*yjN2aUS#+rVX{6C6*6RK^*zIHcATC`fcvt+vETKzy43&y?2{P37$WF!q%c6yrR$C>LO1~k-#q~5g;2TWS0@%x)v;` ze$(|8LNlpJvl106dQpNX%=x`CIcz zOPwQJwTXF%M-;(Yzs-Yp_W1O}2VgRuzc}FW(`W4OA8~Ya#_9QlX{O0aN2z_)UKjY2 z)$@Mvf0q@*o371DsQIPRb#1?9BCePt*VA>A+e52llhHS)j1B8Z_;IpAPy$j|w1XrP zIAIZKiEsuHMP48k4r>Zh9POkeR*-fI(yX8rPiZA%`uz!On*#=$1KRyQ5AJO<=yli} ztn-(zzT?UBeP(Hna}k2XTkLD0*{-mfRZ7V#INH)RbA@>I@BX`2_n{4#_}8MP(*>3A z-)#Tfn^jxtLl!!LAc-UT{XXwMxXzkX{5Q}H6 zZSKf5bDFETm0&2t_rAip)==7lX<(^R^f>P?)y<6HWD`RH(EKC|d*XM?)A5ZgjJ&$qp zijS?6CE&c*Knh2{*XG{cJ%0b?7u>(QOFM}p}81cFqv+!w%%iBXOrX8A;r-ItE)|2*4F%gRMFcX z2j}xjj8{IyLR`q(B@xEOD?pJH^m`rd-@V0058q>FYlCn9_6?7p9&mJg&iTcdi_whf zEGI9WPblE#LYoUX^@XT9ITn&3u6_TVRc*-5;>C-X(c>mqYTCJGP^*-;ubB8a#{ESaN`H)-N+eA__9u7G^JmCE77!^r6y{^y7lVxaa5lEEu z#VBoU#JntkEZ7Kx_0<@Q)fy8=A;CyYB$ZDm@0`#6<$S=bxWY*FEB!Q76`~YUpk#!U z!4hww)RyTi=iucrLV$`TQbD^Hv$o!)9Sa^lxXb4DI(v7v*!^aWufKi5^JmA5hM5<9 zQ9&SmTfxGwHr-tShCzA0N0%Wrl}D&r(90{%A)TYsX>sSyE}wq(37>uT3GG&b(T4ql z10Fwl#_8FR-CMVqWhMC}<;B4vFAtBoxETAY<{`duniV+4WHUv-uW2P3M8tHOlI4Ym zrdB?z1?ZYZ8y!3iibx2w_L&KV5I&ox^#0INYs%7lqqR064#Y0-tc8+Kebjs{-;1r7G2>u?Ax`j>rd7h03`w zUl-a!mKP+Mq}}y7=qry&X$-nFco>N@IXxdU z8fQ$?+{0}`GJy&L=0XtAf)mCy*z7MCPxVht-cwmwAH8A8VsRm!LkWmON@)QULSt-6 znWscaLbs!l&Y}w+c) zPQv#;9B_V-hUd#rP)pVuw-?aEd~>g!-(F8~3&+a^F)t}8*C-%w+b~ey-GM;TZYON4 z_j&mKJN)MJPx$Dg_t@TCLrKSEJmTp1h?fTk49`#K^}Fos?U5G+d7h)S_851mFrjMk z|Igl;Hn(jgVfdK=L4rD{+g9wzaTMD*D%qq``TxIRD<87SZc^DT`H(D$2SDPSV?PW) z0u(4)iXFRL?kbf;3Is4iOi#c4_OTd4`@A*gLpaAz74o39McN9$RGuDNJC$fX&xadN z@OI=1OHCVVzVw+nD_4ckcymT8*A}HUS`}OjCj>!2zaJ2+chM-M@6+GdWNV|(ljBbr ztasVp-sI=rfB5%rPYJ^mrF6-%$QJneKFi`+pX(0XGwk~wySrOFe0apyUp--eZwHKJ z8csQR`kd#dFVRY)5X4!|IEuI!jtHZf%lj|_VW0}1m| z)iiaIwHm9-^(mlZywVPkRQWO%C<;xQX5^VhDdv$TS9GA@O60?B=dSLosEq#C1|h6= zO1Nj*=@rXDuUfmXt6m8(UWbvpg}fI4ojq+~o0%zK11DC5RCR=*advZ;RG zlwy!2_~~~&4tBTr;^7hZ?jA6kMm&4=+%>sM3NFK#;bcafDsp8pRyM9wUoEZkcjc|u z0B*}P)_YoB`R|hEyxx9;u{b~|tYa)2B9`+K7G66cJS+yI3sjyXyoAAqqTA_^W|}jx1m}VK1IA(1eG8nA0wmxQOdz+2TZSLJYpcnKBx&goc`IOW1 z32~~>T40P<=0Ys)zj~gQJ!7lqi^fJ*{r}2l`i`t+TbA4wLLzawCcZD(+1}vE<70mO z;aiSAJ!CKlDDs5i`6Un~`N1MNt55%*L9f^(q=>P(4&- zaZm1jrAh+{UD`vtv}1w8O>Uul%!AwPjs9IU2E(p z0zAB)55DChyu|Nz8EkE{xiz5I4>;W4VrOfez3m+aYn%M?%byH~kt?HbToDSWA`Vtm z;#)yz2t=*

03LZI}%mBpUnn_0oDw8QmqkGG|Fxf?k(T4i7kf_<%e6yLb|cLUHo+ z8UOv?2^W_W*4FzNBx#`-hf^j|OkP;Db=5nK1#56@qcC_#j7iB-==hp6R}{+SO4=%q z)-ExQZnP+4%Kjc2DLZ(cE7vcjhc%Gv!nHK00F7+{AJFQUsT=jJ^Rpr>tImQ{A>EC0 z*d{=6Z&k{93AB`eq0WNTO`?hgsoP_9o92q{hLzUN^7^=mUG&ZY$h+EvJ_K>Lxm#og zJFm=Ot=nAfoT@iiZAf!PoaU^pyI4nUg)0qdTu6|x7Aph>Nnxz3l9gu6JQqH4cxR7s z7@=orX-vs0iyIb;wYs%RwkXay^(Y(&sT#m7jk=Wvu6le_hb^T& zN1YL=fK%gIB_L`y3YHgMeQ~=sjy>PwU}wPb(S7dU-ACscfBf+Wr>8F&h7nPcGY(_M zQGzm*6lwh)S9nub-NMp+tp4!wF(0mBb91?U$U=QBNF>HMQ?-BeE&|h0a{Ofm~zZ_#y;~G&_C7Aw-XKxwLws_fYV?4K4b-6~2afx2@z*PtM zWW|P-48zu7lP@11^V3h?^XT&jSd%jvUJ`{Nm**4C&xVXfLy9bBtsf8s0RUkb5{4mZ zk~qiDyf;!kR^$5nUFag|sxFm*aIyhi7J6~zkUwBB^0&%rH9lX+r{P|Z-&qoy6 zIKxrOYxMdxrlDX_T*9{7T8?pZ25-f5I>S}2HlVMYI%^lXOQMW45Q4422FIU&#wUk& z$nzLg%&Ycn(=jMp zmM<^E(5pfaSMIT#HG|9R(7M#mQUIQ-hE}-HD>L64Uq2M74g?w{h!+R6%$vK86bsE! z2O;y%ErniHb6!qLdHhv<>k1BB{j9Tz9H%lbrn5Ab`kcLzk-MQ^_MHQek8%rX2i43U z4^aVw@*%@{1GKgzX~s0kFv3;dscOI|Yv+pqDN)9G6w+Mddnr~~_V;!<*xx4C7fh4F zH7G1SG;ejdb@pTF^)1ghkAZxfAy|t9L1$}rBs*Z}nTdJ_`h|~V^|e2u`u9y76S*)-T?y-^E4&J}(bkoFl-7CsBod`y zHcPQakZVg;ShCzT&~p6+ay&1i=S%uMpDktBEHwSDhn7CQj$|CB1i_Rj&bW79v2(D` z<42#lW?KG#d3rh`P8F&sLGK$(?phwdd+&DVx$!wn**l~j7F8NsQVRNgpC^w#=O5pG z#e@5Y6nV^Oe94O!FNne^Q5bW1K4BV#NMY#r12#4{&_)x55tB*i`hQqk>j5BMd2@@g zh}utWP?hK86+8OJ5UktF%GmkW5_g=|hAhvizNb#dmDLtlOqwGuLvovQkZrqMJ_Q{Q zf}lg1#H6#7z1_RqJKW>@@4jL-OUQG@`Dp5z>6LV$Si0^Uj8-oN<+JmyfM~hCq?nh& zmqM_<*5l~th)?f-LZ<`6;RO;)5UevEjR~hSvfPjcX3l)Hjud{9zVqN%gY%b>$KZ)=Q5`3%)TWyOLla(J5BW|_sYQ~>AAJzpOI$UBk0woSZB zKQ2Ifb)buH)YjF$j*}EAx@9?A_oGXXtQ4-^xz^e#grQUS*+BTRv_>gZ7fYiW-0EAsA4}oDl2>9&&U5+2zqu=dt_VOjCXJ<^~ zlu?v$b}`{{7?b1*D?E%TK}=)B^}kz*+&40okMbs@`CVCS7x`Ip#;$q1JH~|(RJ@t7 zt{GuCjp_J3m#ZN<J zt=(OoJbH*91ZaNck|DeSztmbf|;X!n%0gkjbHf3?KpP398XJhHDkMWf_o$vDZ< zly1Z%S*(-D}_XsLF4v!l0rABP_YL z#Bt8vPR8bXK(8m6O%uZLl+opcot*=Aw$}OXn=dH}gZk}HhLahYwvLso3RcxhW;gs& zr9t_&nXv0ZhpaF}(+?PI_N6rtF18~mG7=KV!+WsR=9?O&e>xScx9rw%fV_^>ARt8 z{~Bp?dRDq|6$n|}wpal|N(b`XKPl!qxv~eV(5&TTb1A{KVlbuOWQjRxfR@+H8F@|l zKL7v#|Nrd0*OMGclIHm{bN3+0GeHYgC|=z&-7`CT4|gZKD{>EafBQY0?8(hOjOK%5NAg2!OWZm%LEct06MiK3KyR3+>|*8o6wn#=C1!SDJ&l=}KI6Y{)~t@Lhf5HSLPJhJX-s&g<0z zpZbuFtF&X2^q!aIXAMt;ZT_nM%zB!dl#)TO$ETk>!nhJO5 zVdJUTE-j^yZBJ$JKw1k%J31sX!f5xqX;!kML5LV5W3-hN%GV1Kf=IY{u-v+Wc3B>g z4|~K(K@tgck&$Opb_NOav}970eEkjFdvKpWeElVPTA(t;>yt65uiET(uBbN7huiB+ zLt#5zu;h_M9dD^F>cPbi>C?!k#HWUo$UdiKEBU?{V)G3_wVhI z&L^Cnop3c8qm-g36_d$~(dC5MY=%~f;h;}{utl%m=k<%1{QBY*mzP&QS%g4{h$xaE zTuNUf0yuOjg~QYC`Yhc47fr4UbJH$-`JS2_-hMsl(mfUe#GB^-Yo234f_1p@cHx{W zPGNC#)@5)G;Ilvz6UB~0DG)B+fSh?&GMnTSIm~v3BryznF*`eRE-%JhTwL<#(Gwm$ z+UKAD{2fJ+^Y7pPjJld*tOP3;27J?p*8IS2X}0>XRV!p+cWa-~R~hln0|#=#scgfj z)^1CEPHQYiYe-^-!#?-#9dQ5N0Z|+wtc$bAvJ_>eOr}#uH50BTS(aNSvy$mtp_TJ` z1R7HmyE;8qIzvKG*6~WS!8S5r`L=hB9@X(dJ8`sErJR0ISknkL7k(?ZtnRP&a|k_H z^_d3;OY3WNdN2G{doeu73KtL!Ba8>S-n0=lUl2yYZ=@bPQ|#4PEq%5xHa z<2)d52SS>3z<7NgXuak;Xj4O$%O>mcru_21r`)uIuVYEyl>2_Ocy1xiHLHw8X;&{- zY12rf>5dH&xQtp0^2{KG!q#R2tTh<4cXGOG6oumIYR=yFkVFb5lQFyd`y3wbF}iXJ zv(W-U>@21MMK`wCO+6TLTN|Hay8;*seYxlhsJ;osmHba-Bs&wNvvCC_U3l+}1!-LY zp-?=@6*p`YGZYPA(^U3--_`GN_hIpit!2=U`RtR&{HK5V6Q4bKz~$u`zaAfRd~(jk z<&4p2&SaW1n-v&S7on;*+%^DdDKASm%8cH-Qds5Y;wZ`K zCoyHIn2j?gamq9+FiDR%8IU9c4iAp_`tcLYw4i67^XhcOJS)-0k7+_ty{a7BLVql* ze;dYB9LcuDv|||zn`8AJ%R)u^70CLBInv{k`da&-B@_k~*6oQ>S`H3(c=F^C{a)e}VhyG=2oW)z&KX@z$V-hSMj63$ zRx+O(irf_dbQ`Iwrz9&nk*NAzqpP*3Ds);m8BBZ3RCJ`EY)gx^8trRpsdZyPx^=ia z9;~%gRR?L95v048uM$!sUVa3rVHAwVXu#yE_Wj2P_tgG1^;RWM%DNILQ`WVQM?%MS?rNdL|Qkqv;GpwL=m6fx{4JbG}CKmEfueD&pLl-Z2uFJAEM z*O#1~j~(!sm(1s-vvS?ZM=Cy$RG3P!?iydLt+~NXo^BP$j$oc=Y{wBIjz9{m_EtYz z&%K~hSnFy7Ww}C#7;9pTh#5`ilm>doXGAh)&>JF+WPfXiKYaBiQba`J87F5WW~rj| zGzBC4n!^&ke*r9P7TajIH3rkE*|D&>y|s7TA|(v^J&um{`NOwg@y9=WOFEzP(@#J1 z{Q0k>Sw?@b#nx8BOy^BLcL z^Eq)0&tAXc-P{3$WLd^wFmOe) z{O3&oB7+NGSd4ZY^eoMZq8KSGRwU$FU{VdIVnkdgFsFR%9mj`WH)ce7I&Z5$udmVBGAJWgz5MMEERS z#;gS)_2CUqq1S<|b#uy^8W2_SD51EB2!FKit(DPB)EZR_z`TLn0gw+|Y6~fq>(t32 zMWRX}C#D2LEW(z;I7!0WZdQxF^-2KI*AeEZc?{^g(kfunjYO3S6G@N}5-6<$5s|X!*ky1sTBD3X8`vofd8*I~ zgoya`)6e+7{?Gq|)AI|Sy*lIaDsxmW>y+kt*1Nt~=%e4jYt&^tND0GXk54~)#FHnF z(5j%9L?lUsjJ)C>OUgoljfiASUMfnHk!J;Eq0q*7t9`A|w>-Umd`KZ(F;{DsC=2cJ zu?!SA>ba~y$TG@a>o;qUuVXEJw(Q{%{LBP03YBY!l8eQ>3||%uC`pq1Q`D;ux&u-r+v~@-Kg4d;dBA_CNnd zuH9OpZA7ov!x-(opHvipN*TG9A-3Iq6SykPBi`_N2d}P_f+Ui}(z3ffbBpM-)ZQ`Nb*!`v3h8ULK#2WhGHhqKKJJ=A4hGOlCP{sYznzwTgQ^rt_3vo*$Ft zC52MtxhvvWDAyNpoDj!}w+fpEHFoit|Gi3BUxRlDAd(S63SUP;I^{A@Y&xDmyP{6b z`>;L;uRv<$_Se|P*vJxj>$$;#mE@%>WLPSLHW^AA@_fL-;WoWA32?%7R>?|zLM+iIT7B4PfA^lPpo$$9 z0@0yLg=ovb(#-V0(*MnlieYa_?x5!-GApuI30OF-G`6xh2=|Jji#sK~(fBYQRugAcXN= z4AFSi?P=1ln&-Q&NNWIv(FJj2`SR%#{?niT#FI}RF`rKO+xOq|(@(!}c`;_58RnVc za*{F{&B!v13nn&q+R2@)6=MuaDax{3q#HIwcx#|Fpp~I46Lp z3RW-}p3xs}ljS8_gM_3P)9*!m@%a;y-hlu4*B_V{24svtqP4_GpFF}W8M_;%&%*rb zP#bQ;;3K6Vjw5!q`h4;96TbWQ3$_Lcv*{IQ7iawCFW>X}^qi~d9FS-PrE;{$tWYRz z(H43#AxZieU@}iRyBIOc3W`Fbj7Aw*-BULSMwPtcPHG9X^M=tWitFI!06tolZ~J~4 z0XK=(+8O+WAhr=w2qLKoInj&)ghfP#XkHLS0x%d=lBXHMN*>&Q%s>D6PvnK+#fuYW z^TJV%s2%*bShnXz5(3r{9vnoH?X4kOg8@bxq>PE91YIhMEOV498~H4nPPx|F#m`{9 z_ZAk`kLG#@4&Z5{>DCu&wyU`)Swstx+?Fm96z%}3o4Qnp-MdX$$0LFpZD&qAug z#&1PC8NqKQcL3z0kUHJ0n{+oH@CyjtTLj9oB+E0hEN3_z5XqP{&B^l&DR;0f7fwN3{Voqng{m| z`O}~Oz~^6lMp+g-fBu3We)x&kuTGeyIawjNnih=5bLOeS8d-xOyD9VBo%}sZ@PGt9 zbS+)#Uegm|QObZUu(<3~T1Yg;2TxnZY02^V6@#5~ESBA^gkCRVur*-N@ALT4J&b@K zetgC(EigXbX=Q$~H`sHdTL2)we2TBdoK+0BRq3$bWJn7fkpLwmB2J8t@DIfDP3HMbihGrO<6!DHBu^|L=`z%RuDxA z#zs8-{8O^hdEAOuCzQHDcXrWV{3aHxDg%WBjHdC7q?Gh}LxzJb%3LvJ&b^~Oq>PzN zuDG}ulcgow!@e)LSU8%RvM#ozX+YhuhA6FE_Dp~6%>c~R32DvFYK<}8)3D@cvPnZ) za)7^XHBPHUp0@`)glIH%jTfXoWRcw=g@0E5dm#mw8Xi^gA=hqG8*shJdVL2#J_50} zP5p54c7s0*cx^ywL!lH!S)vW(dCApyO7UPHFzoH^qVybt)v(voO~z#wd2SdwlclSA6yL7etZd`1KjT zK7Yya%QG%UGt%6U6@uwJCo4)<@JL9E(e<_YFfDy|^6n)p^Z|q{1b@R48WULU=^mnV zRF_~y^j79l!)TK7>ghbZQ7cZbhEeay38Uo)PhC?hbM2IV2N zz#Bo{U6N?C<=u&5vQ-u5ybq%KOgYPqX#_w^buK_axtD$ zY6G`Q+`YL0(`Dgw`=f2;(B>V6=NLuqSgqXzh@u!w zPLVtMK`-eu7!(LG!P?T7F7O4`Y{#i5mMGSu%@>!r_tkP@UA6V#reB{HFZ7FBzJvfF zT|u5YXw4&jYpRVvAeWr8p-55J<_MolKYy+DrGkWE*zfW9(L?^_U;df>{e6yKzT}4= ze&UB8pK*FVVV+6mIn2_6XVoWcIINBQ`Y{_Umqf~+i_wQrMjBmgA0+lP2I%b|JN)6h$ zjnTzy;i(~Z*d;WAS&w4s&mMVbAR)_Xrn97M$jJFyv1!Ycz`R-5RSTIg0 z^wCp*ba9)7^TJ0`5DAGembolQ;)3yL%JK8(?C$OJ^ppEsjV_tb=C0tMjBrJXtq-EM z-ZA3#FE=MGqH2SuBO|cBe+1qGX{=w%jJNzpemuB3B29qO97fg}q|hWu#GoG|fbn?D zbP8HoAYwWznWTz1N-(mA7Qu5PusBPG6~f2JxU8H)y2LgD&{#^XToyj6*Y7hJ49Mmg z)FT~+*TsGzaUKs-0hX#RNc9pB ziWkBPC8xP?+4ed7R}$+z7nb3m&*R63{MY~Xzw_Y1eMVPfe*Ezl{`&n-{POG-qfttx zBxwQjRFM~T+Ro;E{T6d*(=~2x1N-fyy8!<7b8fY)>(^y{`X+4;7D3(0Al0yk*~LBz zEz{Z+2kT~fvBIK^g2Hk&wM5cl)Got*f-2_}g<*GRpW$H0`0|pkzkEt>nDG6N&roGf zEFzR&Bf@;W&aEvsCOU;M{JZHiqUG`RdHsDdh@G7Q-+c9yZ@+xX_AnuxPncX?F}fUc zc7DaH;|r$K0&P7N#&7(p)acSwiiEY0BngAz5T!Ju@fEqUC@nEI0Rs5*R$xyh3N)hM zYG=IGyxnlXZO;9g>goZ9p#B@`=iKpp{MNE=OEydd_atjCgOp?^bFuI3OLph5; zX-iSM`6mR}1Z1WVa_+3(KbCy{`BRSecKPMVH^Y7cY*b><>ggJJod-}%Mx`?G;yQQmqh+e@2Uj$Fp zKy&wYLr=&Z1?wY_Rw~|GIU&`e>iFZDJTJ(z!g)Y|`7C8NowB>LLli~ydp+VPLuu_O zqP8(Peh;b54@QF zoN21aOUMhC^ISJl3PNo~`S?NnoxF_%PW&oGtxH>}>P|sR*@Nb0;x>D&HF<6sU1^e7 z5cdu-NKVeqiT?Hj-+l8nX%Y$;gmD}@uu;`ls*Ral;Nf)4-v@lWE2%m8 zUZGtVjO?vij`D2DOXc;}2!Y1VGmO#{>73zki$_Vqy`v)@J$yi#8`9jaO#ZT@W!!9H z+9iLt&k@!WYc0KA!f-fr&sH1y+gn6QLSB?)dBJQp$6ARt25svaIj$S35~`K=rkXr{ ztc0ry>dNPb;V84c3YkA$rVG|-Wu@flSh2_hp#bO@xu>4@Rz^*jbEO< zOFe)fbk&q?|T_wOBYa&pGW*$9`JKJxak+Uxk+sg!QTlETqO z?;q{)&6iL4hgFpG>E}+C*Y~0%Yr5t`0b8w*gy297pt$1gs#- z3TE>Zt$gt_XQRa!jUYlqZUa|ZEsfFndyxuwgrI!uQ|{Wje$BPU?*}ezPu_Cnw&?}e z)*`GSR~jL0HD}AB1o~WzM#v;)|L};XpMA6j~DnBFM)00?M|I{jgm%HW*lqaSU%ET%5xu zy1!VxuQnCwX6DdkUXVAE2L1d^;=!6Rt&3ie#SD8(y&w(asGkge2&v03P=UQV zv*Pl0y-m=0S8fDAmR|5ZT|XAMYaii4*S^KKy}u?0C|DD{8@WLz81(x@kwhtlQVMG= z%GV_r4h9sZrqHIcWvrFP22|NrIv$B;km9P0Shah5g`Ydbesm^v>zuZ>fr{#E)=@md z3>Veu9L)u!d3qM>Ddd0+dJGl zI-o2|l34Qj(@)S^aegsD<9Nu3nv>pcN#|lq?yb6~I!bhTQ!Cy}T|GtM>gWu6F`s?% zkgvad%HjSF#e7Pd&bS;;Ik^~de0ssS!@(Z$iy-zW0mdU484*KpL9=2}{%)D6aKuV^*_$ZhBi z-FE$+f&b06H20b2eq(X<9>V*UWf`hiEvt-I7-Kiaj6_RFO2y^X6-ZYgc(CQ>;%t^v zlv6i;oBOHvZq^UXx2tho(F81>KC|l4gm{X!Vr+?`peI0*xB&FY)tKpI>h1%H4lIhi zz@nJVQV-s(CvVxRk7_@Q+l{RiKH%IqinsO@wNAW7E8YlN>)!HbX6t9+UBy+_wT3#> zj5}RUiK@Lc;d%>mruCazi4V_XS+7^*rcF3YNL-M5du#>IM)O?FI_JSaP$frbj14IT z)-HNMn&+;)NSoT%?f)9!y`2Z-cGi(Qc|&PG*lM`G>DktP8)zTBexE3coC{TJ2Smz} zUNU4j7&4z{l!V;en*#FN;G_Pm>1kMF#%LZTLNJ-^xDwmxcf-l51)#nP%^>yG)?mYV zL#jguu#zO=!Tm$N`Ra2XKfKR)G~%c4f8>{+e&zM63nr7Cd8)}vi?RZZaF*gb@B#e3 z3d4siO{Mlt`Y=!)7S8L=h&GlqE0IwKGDb!TxiY*yyJYawbB4n$hC>0uaIn9_x8Hn$ zwLdbR%qdFaL5Nsfj;`BO?`^ye1tBbZmGQtDj`k0E{NNsYTLWSYvpi=uopU*!@Z!}8 zCzm6pSwUI41Rm0FqSjas?1G2MsYAo9AzRzqltsy8G9k}PUqIFK--RopXN;+={T29V z^sG~+sCf3bLG}18L_jNWU37W!q7m!i3kV{d@t^`9{_kLXsS=93jdX^`I&S^D6|a4- zxOCI=&}^d(T6=vKXJD`{HH4Jj*ad(f%}c~|hLkZ$uTQBgTf+edd)pL6=?jB4{Re^O zD+pXnK^rIrrTa$r%A(?LjX=f;{aznqEwlNYEX$ET$UIMTiae(*OO%1hbVi&c)f}+a zdN3~*;y7G4RcdqP`H;zC(&VC#lr8=()W|a#kNVW&MY%*R}KX4O|UG2 zkafUlnKz*V4`Ez&9#Pi|;))P12MYcm7uzF51wxwOqh71(^Sj@-FMb;y5Yf1yI||mF zY!J6;YHcX;5`iR+6CxRrmj&}IC5l{erCu+g7bm0?6>X_23l@aiXrZW!2#R{ezS+6` z&z5gsPQ4MqD}u`ya$sbH<}7v$=8LLp#fqOE>70 znh!(u_-!R@z^za2I_g%W@-#NBT`P};Qac(JGGma$7$nnq%FEZM93JlT*{ApD^&nFv zUw-j9MUnC2ProvrWX{WH8@oI1&&AFA_U_Mt59lQkpMLU~hxd<=)}Zp7Y?d+}Pq-XU zcztrgcse61O012rR@B+Sgp1p#u#@+`ZVk5>4EkhQ%5*aA05~_F1Ne}UO9@)lC$J9O zd>~k3sdVGy+#1_fN-AEtrSfuUqbrXc+*Hy1IHbO3i;%L?d26eArPcmgTyjI z3W*euW(CG*q=@PDhvcPUXJ?ng{e7~mBr9?Xt(LSQ%zKuur*F&Yx5m+W7z`fsUYKNk8hDBG-TkuffF*E$fg?8$P@SI$4L8t-6=Hp}llbb*YRCKHgyi~^pJt}xX z>T7qwM@`du^Lc#-KUYs6Y?mvZ$1D{k+rELsA+B{aOnLF7^!(yYK5gNzf(vSd7&@zb+s z3msJ%o|N(RFLNs@4NHDx-Pdlj+K zGGQ^wdP9u#ymsdW{upL_cf2&$$MF3!%2`#x{7xJQ4bBMa?rSQj)mRi^EE-f~iKMH? zRhEjfGzbyV>rXjK5~kBB!@(~5`@4)LSIqMK?HlVARRUFR!x|jub=%uJYz+sLMM0XT z2n0#5ht`_;e8y}xBTX~3h3Ra{{{A6R6t(6^(`(RZPW}A`Fy8_pwA;QsguojEn@Y6@ zB<|MTT(`C^4bTtId6RjmU_N=Lze7i%V;7XjNZ!@RFv5!!#D9HAUevVf6`r`{;yq++fDe}@%de4WO zyUWsXE*lHod$h|BYaP*5>#hN~HXiZz(rxw{5BPg88{4^tg0b=}o0ftV)=GD|pxSR5 zA5BYO$Q)CDSGn1J^G&tw3NQk#3@S48dp(3y6lKB5>4?LKAz8h(E&x^vd_hGAzpP(RqbR1=>tU^BHk~t@r@mH2u-3YJ zGgJjuJZ&SyI@oGWs;rCkE#A7^hHrIQ8t2y9+tKWdb@UTAwv4kPhvQdjDB@Pyx;RP1 z`me8bjt#Qxxk=Ife#w2iF(ukxTdMvFw8OAn7~z-)TDtv6`veqORxq8-I5;?9I=uoS zhQk2|`}-7HlNN;s1xxO-)htV0_abCN!c%p^v9^1L1qrF>w|BPb^#@#BoHL)#iKHZs zV~jDRX~uj$rzmo)h)C0v$B&;7#c{Vu3*FbIQPoXZ7M$(c6F|rkeWCj%Fva~B)`VCH zsw(v&^bZzm7tdNdxxA7d#zNEO^cF)s#HeoEE(C5I+Ge9_JW_cpysm_)Yb`k-g0G8H ziOP5ja#cq+z(*tk@Mwb@A*?HuRat+Wk`cvj z>qruKc>jQJzJAJZ*kd-I^2@X5{M*0(jh}x#V>C15g{Aad_)1X>rCxnI9JSK=x@pZ0 z=oIK$`dB&>x;w!)6#}{*E_`cQP#C+<&D!qTv{0bTbV?>J=F{&uIIKLuIHIr%1+1V9&s=nA8F_mj@|i*vLCEtg!_HkSXo|7s(c z@3)TENNbIbT&m|Ni4j(!v|&0+nM~(Iam@MUCA+)(JbChj-qw&8$H&amtV&eTXsF*8 zo5r=~E**+0hEAwcw1$>16Jrd$Uc&CqE|H9xTuqryrtIzR5J{J?Wmili0OCcAFUN>Sij4@cz_CN^V2aQMKQfC{l35fdIgzY+_ zvusDNGEgXb&6+1Lum zxwk&ssuk2A#DN!As9LOiUhVe~`sI#-btfCe+K?eeW3WgW5l1n|7^N-qEJs?66^??C z^b+DY+MFiunsQwlRq%03X%~Le40^0T78O9Ll#itt(@o$cmhA8D@c7X^9^5}7&oj=> z&-vRAKlAd{DVJ9%X(1?;^%bWbHx}NFQhXk6h#rWr4f&)6O& zeD>r4FJAl%LbAQH&x8B-NHfiNJO=?%##rhMJ@u@-o-wAVevr40V^>_oWMopS?aXIGT;D9{OKuCr= zyBr=KGM%Sn`6-}SHxAduf;HIyt|Iv&|G#-`uQx!5h&<0p)09#vM@f{DvMA9$Yo=B% zan05E3IG{J$S6jLveVSI`SMyxH!1|M`jBOP0Dv)=5Vzq$m!M8lE3cz}yf@WRh-B>3 z?LnzrW|-|^pD1<{IEfSDIPqzEms^=`(15pYzX%(qXaEfaQi_Gtecj*Ut+0`R-R*6j zJbujn-Y!>HBZL(E^6VLZ`|%kskI%^pL#c$PXk4>S!=2p8dn_R?0^EGiSfn7((rsYErQ*Et^(${-{aG_kHHw7G{$nIxe)Sbb zNb+2h<(h0hCrvfymlIx}UyzpyWi3`LuY(fAn7}I#^!q))GMlC3dFj@(B^zxB!nVOm z_z{WmYgrJBUGUW2$@L}B{F*_$i>wvb;RamBN#ne95*M3Qlp1q6BI)-jRY_Kq^oLs< z+`Gr&(E*d$3~LQVQMAr0#NvLpI?&l?hx4p~RT4#x0v1IPS(Y)K%_z%~D2j-ph`h)t zN*91$lqE$`a(Q`)wU$UY?@rZ*%2Om7ebfNIYvK+nkisHsVx6pt+i1ozA?14kLe?b= zXoEw~i`oD}Ty{@`j9e^8h@V*rLY#Ye0T2pNKd+lCfOo6Noh+4_*XUl>7?jpn>o(gc zj*v1!D@~TBu9%Q<>1m@#((5HvZv3}w$=Yw1uL(`>hp1>*?)Y{(-u7osWWz=14zLXR zeGYc_*d7kSXwuo7)01;vyg1?Hbj*CNQA$_fqZ^dj49>h0C%4iYzyF>9tlyyDrF7Fw z-gY}e7NbC)08rMD`3!hx7nhu$UC{6Eptay?JOeBb?jLbkVFxYfL60fIBWfi%PVqiF*Ui}Q4yPi z(8t15@f#T8I0hleiyT#!E9bD3!c(n2zPY}zhFNEsuU;R~z5bTc9w&=^7^u|N8t*~S zSm6q1D&=a@%x9_ldp_s*&iemT(NFAFO01~cQczF zfV2iFEd74syd7mpmZp@YLPjxCN{XUHl_jN8E+eGU%w{uRq%x|(dqc5p(sZoF7cH!5 zY^z3GJx13Zwfc9#ds+3p5591zq_20r>&5xrP~Ko!57fXwV{cQ<+tjq0ekvUW-2*OX zp!Idwq^Q=pWgu3rTGLkmUid&gAlISeEO6!CRb$=*c{dw?w|?LQvZc#%rH!tXhwvPV zqCg6PDsr?oL_%Ve!l)8S#>P0lQhSstH-e zmef{(D2x!}0`np%DsFrvB*Q_%{?3rCeviSx6(#%2|M)jv9G`M=Iiu7Tqk{#sxd<-a zZ*?!7d-~92Azq=gAT7ihbz(rO#d6I#7v66odA}oB;JJ#;+}GctJyt9qcR>Q3rdG1A z*G?e>O1TuDQc6mz5k}C{f**hRmH+(De`Yw`CCgGK<16mnf55$?eJ(H0nP)j@iHv$k zD>1^yg2?uGSjrW)HE3zI2Yi~{5-By-Bn$>yy!`c7&Q3LB(HN~s;;0G;Z;taQcFMn$pmmJay7jDrH$iEI zv@WPzcn@L}Xg=Dqv$aheNy<`@7mB=8elLzGvyyC{V-56r36t3zZ3R)%Cysj_fYfeI z>*u^njomOL0$4#=YCt2C?tbHH(9|9cutHagb?8^1@@6#YWY$_2{Ew-Evb&(oE~%2P zjYc#)g0gXr_A_r&%3AcVHH&sP2z}L>(x%9{B7&7iVudHfw-|}R_%+OUcZWbAbf}>2 zDXSs`rq|iA!g<{N8rSa~oz3CF&S%!%c|h*udSZ>BEK1UON}lD_Y{>JR*=$ZEqXxAJ z;z%w`z~55BqOB7yW3mrYz-Ln6I7{qb$r6FzrJF0 zIVCR*+D7Oll&g=?B6?l9^ZtCCVuj0!S18RqSDapqIevZW0+17S>ax{B`t46DvkwNn)2$!YyRK={;#}zal++jN>;c9_NEI=P_UY>k4f$T z$j2yd&T1c2o|Pr@H0Lkh|A0ZVvwy(FXv|;#`aMx3`R3bi*xDMPjUr^yzv<=|C|JwB zQANh?_7;Em?hl-toH80+F`3L5UCo$GGp6&LG%wKzjF8TH-#S;Y?A2>g>|@xF5+f|7 z(G;qrGz#U@%`WsxNI`22?NrBiawj(zM*zeZu(duvl?Tq?L9`_+3bMS!B8lRJJXieu z^D|z&c)`8<_c+);@SYEaG0N@DQWC8&5LPstB{Z955@J5LhFgFo%`!?=B8BvGNH9-R zvOFg*3QDD&7skTXXk4!gH8T=t{0lv>?$zlt@73*s5Ii6V6e|Zr-0wGmkfjF26~=}4 z6NGrq#|jWq>r6E;m0=7}e_wr8{dN(^g!j=kL5L9I4uIUrIuX>#u-yO+n{V9jA)|<- zmrxW1lgSjVoPrTWf+Uv6=-o5pmuGuy&?%@r9RZuvyjQ^5iryg`G#aZl62sBK9tV57 zB(a-(S(fqY^(n{4XG~`WSz#!YrBt^Eral(A10Wx#xJ})qmbF$;D9vo1aeg_XC`#fu zVltiZ;>8QBH9UFpgu$Ti*8+p63R`X1eu5{GdY;$b4+Fxoy**@mYm0-!1CEbhkrxVO zAT1QLydW#xd+VdwyS|BKz$RE~T`XG<5EO+%DX+Y&;_Kgy^*eb7(vFR=7;IIq#~8>9 zMNufEh_Ti(8eQ`1ufKABcFz9(K3iKu=K-*$+6$$xu>RH*L1A5i-Z;WqKX<&25Jirv zl@~63wbB8cyVra+!x&A_ODiKGc40nUcTROdNa(YsMFn_ z#iLi*I%4tfkSenA63y(lub*#Vi#8n{v5XMbR`o5SYT#RhBZ9hE0fEbMfpGN{ zEcHb3{UWRZYY@^R4eSp42wjrRr^w-u(PYlgzrN;jnlsA{ng}6%db+N)Bs6r%Q>c2N z^Wm}#JFV}l=hM7SV*#MeZ-*B3vTE0D!zI_-U7mBNot=g_FxVpFFVyqux@{L0?`yDK zHhGp@TGrL#JXP!Kk%(qBtLj(X_qPvH7rEtboUhyD!X{Yrg;!@*>0T#{HR*59AW+76 zLSDQ$25T4$`b1I2@$oUcyL;TfcaO7^bIyuOmr)6cw9cYi_mT0>sZRMSL%_K8#se$d z+_cVPA+hv_eeONHhq0FP%S-0-9Hm{hmcnY%vZOTHuQOqun`#aSOm$E0y><2$pF-4V zOIc{j!e9+Z89BwmKT8pUbA_XyEqf*(nsoQ{H}TBc1;wB{!>)7E^)W&OZuv^7*Y)}* zqEZHfqPy^`g*9i>_qMfeFH^kg6sCUeS%Z{R^>|!bX%8^G!W#ClV7XUX`3t&vVGvkj zkfkP$4RJ3uQjyMc%F-f8h$4%OO2Ab^-`m^c>T1S3Ef8Ay6I-lZY~#YS zS}1cOs+bkGY}m@$?{6qc<9`S{Xr=u79;`_8c$7T`XDSoi0X|S~5mk_MGRaZkuoL+j&5Kr})oB`LNgfCDH^2*F1c!bQK}H;_uF(o%hxnbl_5g z&x93YtRPKuthLBUA|gQ?M?{hI3&1rSE*h`qwU;%mrMHUu=e7nX)$z7*pMH`wtpWAK zb+biah=gEk&}Z0-i6xXp$;J7I=PzD!e0sq=)09S_jI#pYPF3H{ViB#;(S7v4*Hq}X z={oILP!7WX{OfIvbGIeeYR1Arn~VtWP+Eh3x!laE*gekAHz9FVFD2T*)pW{uGQk+j zU@$}}!`az6swg=+I%K#t^c!ux5sJFxX#M<~w_#n)4{0S57mO~Y@aa2YFzhoN4j5lu zF_}&ojVDa!DS4?Vl%~|m)gS5fhu_hX%iC9j*x zO;?#!-#5X>O;@d(TxZEuZRZ8li(S3p)gBN3Ny{71JcmFVM~W;5a_ zCXOSdbd+e(tT(gC1G4Jzxr^7H4%_U{#eX-_v1|u~c81DAGn&7re$HMOCy=(IhG|R|}f}&KOb6ygRab3;9u2_StH5hA%dkGdnQ5Fu!81JQdUy$*g{9Q;z zTdMX651joxH^zD2w63k!Wmz&BjW{_uAxUDkwzvJ+HUKTrRV>$wKrrYJkU~(DB}&)q z2Wu^ba-gJ8E?8b`?WkWeVmh5-t?}`eJ`g?f)a}lm9QN+T+}X8sfl3WN7HL^60I><6 z7_)Hhb+YW(Ry>MrzQ5Kf-$=SttcCAsfXs?!B^%Gb_A0ewM%pn#;q=R>Sm84%JLAcF ziS7W%om^XtpNt`eXHW*LHDr0_H+f4># zQ3|XDtqk*7#&j}AX=jbSnoN21>NRCia(HmS{{Eh$Jz0%#VYUPVMeVU{gPZ21>TD}6 zdju+;Bw-%a+||HQ)kcGWBysLjRT}a$Mt|-H-J}4wODwMMh(&H31hwL!Symu?JXSc7Ho*hw|6lFX zZs!&VpNHQmEWKVrFYckW=JN8AI8G2!qK%>~3l|WJqj@bKX#y%xA!g!k@0+|Yw6Vo@ydUj)m(m2kZ3CM1H9OpAwF0}$YHrb5qqTID84p16 zykIt+b9FUkdwYwmtu69ATi>9@9<}^hZf&u~S@Rqx*)+V>Auc~ zN0Y$|Vyet!-s534bzE(TR}!rl6Z~qs+PiM+>w8`4F0k>1D7Sw^!?P&?j(q%!G~@+@UcjYk(8xnJ};PD&57ea`}+sXrZZl?c*%5f#qQ2F z_wOA#zIQ!?s!NF0xt9Z0A)K-{91cj52(1jW*_?Trl6%0A7X`+cD#C;nuPQs1_0^6; zFsN{AT~QR|S&r6vp)JieEp*j;qSSxb^6~EK-S~#!$?(h+LTo}aM#25Cagv6@ZH%e1 zw+XKVD&>^^G|e!^5GTFsHO^0uwK*((nS$+0%hUl=kWgi z9{qlgEX@!?lBOAD=@g;TS1sR+X_sXHa~m$!aq06sM{Bk8*LQL!Zz?sT;ClP`f?`pC(m=2ap@+qY4Z5?^m;w4vA%$(CBz6A&z6W}jFdi@IT$dtCeI6m zaCJoDICl4At*2uxrUy zT|q|DEi6k0>Wwb#U$h2sQ|xAXmPheY)~LP>KnzhN7!C#u`U6IzF=?7J8c!LGCd_6T zSze+HXzN~;3TC;bbklb(fIIm>Bn*0A3&hplaj_spp~&)*>1>WMmV<*Mq!e6UTrj>G zlk|G*@9$B~y#VJcfZ<+J)Zjx3>DgXR!IF{W-o1NlZ*Mc3&Dh@BB90T6Hn;RumzysC z>)n+5zi?JSYXwD7I11L@R)juHe$^!Bh{J~%)X&xt9hu4&;u%TPytQexAR{3lkR-=`s zC<~IP2i6tui)4fr+NHy6oOcys2B|>=K=CZTd?(aPz!-gAm3#hOL ztrbbK&DPcyWvMv7xTGvJmy zY&3rTCaqY!kW)gt2fXFtEtY!0JBM_xVLO0&;rENy_f7WrO;rtG$$WiNX>?mtiK)J8 zH*CqD&3mzK3VQmKww5d}xVoCMyR*&V;Ssahgz0p~@$oT(L7&e*f6C?P3atax1R_7* zO}+2CwSe&4-8>aik|Z&QM~C$LeP**6GK$c~Qj{fHIVy@2*Bhr{4YMIRzOSQXjX;&g zx3O`zwmag)k|FqeDHNOT_&V*n{kAVrBbOX?!*Okwd%V|OHyE-i#j^V(m+iOL=+o=W z-DR?91s4Q*a@{uJt2K0fM>XZJ*3+Y_xDoedQI-^CQN?0N8F>)zI#2k)9LDsjyQqzT zF8(UaTZE<8?=#O*XMFS?7;6k=;pmXV{{PS3mo`VPE7_d{$g0xPeQ*2Cvu9#rB0L|* z|Nr02IBd^mzt^`nNmWG>K&C$=0D?;`l1gfIH_wf_YL!S3B(dajGII@Mb+7XrG_|(T zpmml2V+eIsMGsAg#s@?Jc;4G_3@lPEN9w(R8cR^dSjS_M1c4j^QYdxkt@I=@_Qf#D zIycA0N0v_V8UrMeWgrb@`D%U*mboQnj3ZXiWA7G7`-+xd3#9U;A&NDbdL;zq{zC*Z zfN+cjf#f-v*V3Z>;%@Svjw&E;p~&ECUgtHSL1NEsWBB`Nfn+!k)Veo+PFiR(y(kLq z$_(<<%m@McpUH!n2J<`dYZ+k1X1#_bgU_G;z}Ij0=sJ(PhX#!UJR5)k;+}Rj-Q0#2 zrb*uQ5tlQnZi6qz1-xe1A=BqIc2!>>vG~b3ccyUn{C-TWRukdxnaCT}5tz|?NkQH- zEgb}(l9uGO5>jrCb949{FXLFzNS4t=`;r(y#@a^k8IB=v`sn9hL#!68^5ETo-&CLzozki1il;;jH zTCc+?2R&Qw&Ul^_r2&Zs3lYuF#hfh5QFsxf>qRnb^iLGeEUEQmOvk|3GdxybmM!|OcB(=WRA z>$$POTQV|6xp~%tkDdl(q9)PDF&PAQXuA%b@8CngX0yR|`v6--BVS%m)gUJ*-gkT@ zCJLvJShOzL%Y;EhxW2l=m#<&YG!4A-nMUcnhjR{9RjJHD>Y*WHu->fE?0490x3I?O z-fMRd=Ybduo~V|G#G45vt$6M!sCr;Bp;Eb17_xw*;VQ7Du$@mFU5^2#UdfqLK$1+7 z4XOajB4vpdo(ml6{AcjT>EO)-yC3r;@D6=^VjPp1&K4ZUY@c#YpuvLvOU%%nwAD{5;+dcl_AN~RL%0|kQ&B-Bv z!$y^~8Q$LBB7}g4`v)|w7C~*B4&H|u55?m;pR#=>g-io$&!BlOSnvW!R{9BM43W$n zg>Q|eAOPNbH2Vfk(_nM8(e^@8q+?U(j%@)DdiuAC22JmM4uX%GJUD1vND6X>sBtVM zvSDW2-QD48bA_9m8&t-qe85sDM#KAiY1%m#F&cWiGNoK$M8#sBd@yQb5c^Twi+=x> z(tDTm^l@E~p2l7#3Lr!M`fd8-O%5t43|J$Y)}nwccrutd=i`@R(ed8r$!V=cy{aRh zGdnZSzQ!)8Oea%1SYV!wJz-|7Rx5n{ z`W0jhZfDk5p#552)pzt-H^@fwQMQ<1ZT&c$>P&Qe0m2_>X#f03FN zJOR=1D1GJ{i8KYR$QpVJi7~LYDi$_+q);$+vmQAzE?S4kWw{45Y{!}kfAcs06`wzU z!C(IJSKK{p@v!T#?>wAm1eTa~7en`KwCJSIf&~}FD-EejY=(zgXq>|zpTFY%euvd+ zgZ26f_YYhArA-df(2Oy-T5oWBeS<&$&7V=%HTL@k-UWR9 z@(mD*6ju!(?+NmcQ$lzk$i=>#wt*~I@MK&n1ENS@NVA_0TKunR_V6J9(Db=uoa^07 z0yGh6@lRtAg4aTb#Xgw40mekyc`=7X1Xou#nz^p)u&OL7Yd}W8C==`)>eD%WecCe} zC?UxhQF=hQ_go}T$lRy&gp}TnNznS7>@cIWJRgrR{3b654O8?^ln^sjLg<7NLPeWP z5c)5H$RK1T=aqw|R2EJ(p%e+Vc2t#ewuvfSU0q@O@BrZe=Nw$;VXcKQ&~*;Xk<&=g zsv$t~7|gAC9NoA^0GZIQrm$5XSHmK(WyBWsgFa_+0D+V}i}h-SKmF-X`1t8FzI?mG zuF+JxFcuO7k^)U~aWcwW^aU5jFaKO;GPUHn6B9iY>8vjmkX~Bk%3#7rwL$$Usg{R8 zJ8xA^kq6aM(i9|#?zbpiX%sn9i8F;06>$JSQmXY2RgORg7yf|I{s!DBEM zI?GtDkP7mVfi)&8BT4Y(xGqQl>bjo92lSwsk3sMrzGAQs`1bW1K79Cq{l1HPS-<}e z)_}zOM<0YiCdDfB*IuvJu-1Zupz8v>7l;^;sdAi+saL308(3QdR*Ulq8SM5uuv3}g z`sxZFKKzb-+hNx@1kpqg#%N5IKC|gFJr0$7%H!vhtgIl7+GdXHP>fJ#n7l5DNan>E zL)mB{QWPqU0W5=PCkDs}WE2;n=c>m@1KlxLXHp8~al(*eO5^lz1?TPnG_1qG#6pA| zW2Ch9Qm=$)$dCn?Av9E^l!?NF>z6Shsc#E!WWl4L^vuYMPF%+bijyvcph0h)i&aO9 z)NL=!pB`66JqChi=dx9WtLtlg{Od>D-9Mo10)#Y^j}L;teHGd{j8pF|Sn#VuH&@+3 znSr+R_;$a=Zo5Yag6pdr0KmWh`@f;8Ds0y4xLF;aL&5?M0rjfJdZQK4JEw6U!ADCa zVgd~@F(~p-74+j!>Pqhgks0N1!Gd$~z4i$r1CRj+M%#JZ-QDB%_7+uT&#pgmu+^f!0>$@P0KsiE~dww>}SC`d1(yTD)EOIy@MP;AE{Ux3mhp{W58_ss2V6VwuxA7iR(x zVtmTY%?+BS!C(K-O6RQ);FxYN@y%nIgCziU!Gbd(lrrh5pwYG+zTAC7=R2&{YqV{P zzy9$NfBEkR;ncL51ZBp~QK4WrNVk(G!;N=OPq zFO-n^f`o-a!aP+_%In2E<4KV@x1TAwK2iSsy3QeZhOzcZO0^Kc)Kfz%YjfNub5Lu!wOV*ERy41w3bw9L+e-f(Dbqxh z>xF2#UW=>iE7Y~kLOu~k5Gd6hh>bMt7aW{z<`F?Z6=jKqm`E^EwzE}L3m?_kT&-d18vpzM{2Lw~S_D?hO(+)zp%Ig`Xm3zB zPylB2xY(p<^?c2|0i76UlMyy+2_E-jG+s^|e-8KpjhvHmdA#^Oc5@cUG*ADqMO@;E zjGu>kllw0vu}9@EKLUDFgFqlb&^8|H6{D^;xVruw4-fbF z&;R=ofB*M?kIiO-dQ~fBgJUXP$&72|s#CyfW$^C(HTJu`Dj&gM+j?|9fRWa%@*(v> z?T0Lm;~0?AYAmdjGJX@qs7j+y`CWT z3P|^72{CRnvJkqigL55>F<7rxSl2b2cZ1Z|6yp#|jHmMKK3IMgV=@K@6fqV0iSkIY z;G-BPB3dv|bk9lqyHSURUO&aGifB4v0$}?Uh>Lz15~Z%k*uHWvWEpy0Gs`5>OwH(B zX!ECJ&my}Zjox#K6v85sV>gl#(#L=3vl2*DT14MZECQ0^jUfz*MG09{rj%5Ok>7v^ zq3ryE^sfBh5iGBBmYH?9Pv2l{5pdxO}elJ5qM^B&`GPAFf0hV`1`qMNQkJ6Wg=O^L_-{|a&oKs6 z*bD)-cW^%FrhVv%%?K;KAUHGQ2Abbn#a!QM=5vX5AG;zw_M9j5d*+Zty-j=MQ`-1m zNFF>Lc7^2_`la-LEKVB!o@E1x#q|=`S%BaLfdx&|>0WMVNOhzG#+<>vohhLD`Q9*pE!5)d(xHzX^CrZ4`S zDeuYJZKj#COytw$S|MhM0a!(t3fWzn=9{{27x6V5X{2QW0{E^|4l1p`Z5 zUVJY7fEO(Ii4o_i_a6IwgSK^8uQv!m@YjzY@vz-vwOYYeHq$i$6heY-esyJGtij#g z9roKDy3S$WG?O|#0*vBF(mXDI{v7Jvf?ortyu%_G(1el@Tw2csz;3_qVEr9vXcfe<05v!3*odj*79&~^<%=x}p=g{!NpNa2|(zeF!TXZtvg zDgAO7V1B9}eVj*6;~K?}l)tBR{^NyalHY)& zmkedMMFClG7Lwpj(T&N0!J!O7Hv`L4U{-rv9B@^#Y!afY4hNO-Jcu!J{gr*8J{R@j zPWNq4^n#>`yF1+9Z2?iGDzGk~9C)CB^j6XZ3l=;HaZ^uO@|vc>I%dCe-p6X@ znwhSus*wPq!QypY;pXNB?XJOoyT`Y$clh%4%V1;7py`Yq&f`S^S@82A3B>}5G@fZu z&!%ahY=WnG$h6uz!%$Q}q97eay&&M|fr$xeLeKzjN>%}&_b$YF>s+S+^46eU)v7S% zBoPwjhSc5HnX{@ia4rB@et*mh(jPZcSV~Vwynil~k8}Ey6-*2>kG))sS{B*{ipKco zyz)^Rk*A7^F=nX)^42isSzZLuGhcEUEg6WB%+H(9BgfVjO4bzZBr`)|whKgSN>7oZ zoQkimeO_pcwPkbV4Ns70OouR@1j7KonMnXuLQG5@x83gW$H!0D?KCwgk;xV$7S5K7 z=b$`)+|LE)fef9s*#Eppn-?jw`@}dJ%maebBHzGs6Chh;y>n>W7WJw^T~`p%dO@!9 zs({!k^2t?jNs7bO<_iDi!v~$So^gNofc>t4wG~7>Af)SAA*5u3VH9G{eHKHi$bw5F zt<#r&#-;a~X5ulQN`<@xj_qgVcsnN!&cu3N!}EGtPJcb6O(#yF_a1H2_Ff4pen>j% z8D%~QYpjVDBNULS0U-n*P}vHrdIf7*rEiM>Q5>+=!h4M)xw(0d+FEGr zfX1a{d!^~uvN;yQ__@u+(hj~G3q3P!jbGw8k9w zW|RVamgg{j*T<38;CSM!=vB6p9uE;jHwT*e zT&n3_c$ubDn;VT~7ie%?&p}KZ$?zl*z;(+1Op{D4n=u71dLgOo-x>4h2&DI#&3Zlik{L3L<7OCH7Y zx|1`cci~Uw*LOsk??f7B)wC_VbGe9F+qCGK1~yVQRghtX;Ne+Qd|q8&;r+Ycu)W^` zOmLk;2wWH|ipV&UXhD6tl`u#1EK10Wpckkv^Nh#q?a?*&jm(P@=j<2Bb1+9io_Ew6 zKqy+{H8rktE+%>?jCG}B@C4#nAVa9CDk~kSfUt;Y!M)fn2SH_PrC3tmALU3A4B4FO z`uge`o7E~Of=K%?$@rNF@=c}B8P9k0#pz=(@9ikPAY%{7xV^!77#d^7RIg*7;kBPK zUy?OuXq-8XVw}u*IE)!&#!AN~x_lWMG9K590Ke%+@X~ugUMTg7K(rwHz}W2@Y#;VuQQF7YT9()2a+WS~$S;qRg3W@XNb5rg zDhMUZ-fp+YZoh-I7L~ElNXPK(5hTD?2Di7jxV?Rc&wqRd2LXh%PRAKm<3)*B@G~F@ zt#fe|GN^BV~W%+LX(Peu( zwE$OD1#2oa`wkEHTg{vk1fVlAfC(0B=z<9rn$F{qHX!F55}%-JJM8zZ&QWVMCm%)k zzcE;E)_C{s9oFkLK7aj+rtRRvSP+Yr-tTZv3l_XI67A(sZJLQ;{J=!;KEQiF{ll3P zCkf-RQ>xH}pz#cafGU#WDyK#MR6*)#KyBA*YzhJOszzN^Ft+r5Cn8xAULVonCn75y zg|}l!0ZVL&!=Ksrls<(QI9)kPZM{B>Vr-X+>%>Q(=J`h9HS8~=XJNFR_4Wa!01j&GQB zZQRVD#qY^`PndL(02cym=b$yw1g>*PhK}?^K|o~q|eSL+_d9<+&iU}NaC5lrFH#OUrx1g&b^%SCINF93?GXuOc9N0x12_r zhQoI`wXq*QKMJ=aPWm~J2_5LXD;{qY{SY0TGu?AICQ9k|?$I8eSV;!`_$_G2KKQJF zP^3D|8{y{{8yWoF6cV+@o20Wuv}iGh5YuZ@e%@NsD*!}b2)uV_y9Um6AOfn|!nOp5 z5ZM*UxFA_6nFye##t=>8;pn6QOUPnMTu=JvN!c)YUJ zj~d2fnW^8yvcIf|`%U%N^bw@*`k&*Ar(+4_aYGqS@Js~+I4!haq5$A)DLJRFbIM^f zZQyAd>#2@Bk_$hhV+=LeGn$CUXhqKuwQ+)X0d)D|MzKMd^MF2q~3kuaz?sx_yhgFZbAPcZhie(fI&t62G=r zJcgZkyxVhX`Is9YGxz&UoHOU1eHnV>diO-6SR)4VqBZHriMu%!oOm9_*2bYFOvwVH zqGw~l;heQlhT~4DiUcqXf-rMGc)akBCpuzrHaUFED`6lj000NHPEZBi?3~kj0M=R< zW567Rm-Pz2y?c+F>l^I%ErJL>e))`@??A=^o@3n}eO3%)BZ*$C;MQaAy^It`A_M{S ze}xeDn0p-yI%hod6m;L8bE>G|oX|41HqY%@)B7FvB(~a4KM!E@!lm;6V9^H5o?oc!&%H z1A-3{_hqF&LIjnq;De7v3rk`2(7H+x(S2QDAckOURbwCM*sRxC0T2lw zXfg__syvoD@37r%QCW+d>uYTH9l#w#7({*Lc8W^N{Bu6~BBpn9a$Xjee~vPf>A%wj z_SAKu(=rk*$PC-mn77l$&AzX(=rYHGgeI+O=Z@oYEEb9grJ(Y(?)4eS0L}||GCm}w zB0pAxoYV*gFhZGLHc(ble?=Aw)`Ihp1amI)jE_*J=@stNZmNqh>(x)~LGc-Hey1`> z5Ui~s4D5D$bj~9M=cWDx4!O@mS8K9RyoR{FV8O2gW>(=RbG!xnz19P{zPeIRsi@a< zyNVK!TLc##?Rko=i-^R5?p4zXI7Wk>B12MiM!I-joeV-^{K*Iw1T5gZ;<$_-ohl=wg^r->$ zSm(TE-0&)7WMw_#wJfnBnymxS^t)}_O(t6(Oh3QSELgDMvJlaFLaB)0ez(WxPoGd* zi_Ll+Eo6jMRpDy0f;EJ%clYSL$8O)EbsmgG0r}OCy&t3ResrYdE-yKrllNx7--Ck( zFcfM}_7O7heh~vEuZ^Uj9ic^bAZq-1`789LolZdM%@c8_Wn^=XCxqf{eggz#4aWks`EXku!tt~&enPw#<`?lW}lxRa-JE?2F5%?S(X%&QFxl5{13xC z%eK6P;MlXm&$|@NH!#v=nD5aLuZRGK`C?E?Wroovd5*IbaRE!U<>l)5`koS-f zgF>UAuQ3}s;Ec7ny1qi!sa3?9$2s8$Db<_F13%}RKET8>#zF#P_waz-b`Kv8Q?~la z{itp&ctQNC7H&cu=Mm~f{CS@Ri@>!G92iwqVO>{PS;o!PO5-wofI#r={sBRNw(XQ! zS#ZP0p3Q798DMezzZHyysUHz)r#i2I;7Jt1?4^Xq9RC#KAbs#y)dsF};PqMyc`_q7 zpJVNk!Evs8#1i9EIHte`Qf-gmLyTiFu&YX$1wLR^t*~CLQ5y@XYM=tFLCmbFo=^@z z)9hgg`0(3r`0e%<`+bK1^@RFlG-Lr&vdZ^!Sw^Xe=f>3dnQ|D#U5(_Ca(ssPPAsC( zA1(Xi>y#DBm1ZO{jz!~UrBEb@SYxgvJ&U-`kNP>S`O?4B{yUz;jmfAl=OBBR3ZAN1x?d{eL!UltT9>}q^b~@@$j(4x4Q@2Z}+HIn^=8$ znWPKeKHknaHBs1a;6upykH$2gc|Oux}c4tyXEb)}pFcurc#ym`VqKv!XSZ3g*t~2=n2kBpZVd(Eu ztd|TOEUPe~V-=pTMFClGain3$%u2yxU2IhAFE#Sl8GhCnc;~U(srMtyOwzK-wod9- z>Dq!5c-26^1+NYwicSjyVKrE6x7(PGlTgQuIGfE1_xE@B*MIwW1QD#)SE%dt(SY7L z{`Gg5bZ8 z5|XHLLq!8XE>4Ki-wc`TFR3~J*mjf;31~Z zX3+}FbLlwCHWw^-3wU!z6w%C&&be5{T(SowrA(ci67Rv73c`Tvf+{h-19QmJO)1d^ zLQti{d)TUiwFb5#SgVQ#K~vCHm4USeM8Ll7@NfU|fAQ_^PJ`2<2ZSYz=n{-amm|0! zN+skqFz|wmu3P>-R6<^B%oR$;j2C30gc!ik#yIuUZWHeu47dYix%xsH{b0Eke-renJM0fz~lh zCE(z(UTv^4D~({I5C?2=;gm+)AilITXyZUmeNKNs!kpWw_>{SHyaF|+VN5H2%sNH>h{eE8r-RS zxg641OJk#-3pv+Gp-dA9QhNJ9Fqq-zmg8d-!Vsy|gWu0jyfC~rs0dn&jZnIN$s?xo z-pg~4pOR7DI5CuBApyP$*zI<>*{pE2+2CqpQB^g94``YW`_{n+H5LR>{`BDe7ai9( zGk$7xa?Z^O731%w29>ceG>O#2DZ&Af9sNFyXE}YHQ?)}b0W)*m(N#xQ*hw$Wn77B? z)3JFnxaJ#?`^lUVR#hN_^f3S_z96OW9h><~)mxHtEW!aRxgKM|>~dSl@Mhv(%9Vw9 zE~r?2?FRi-;DZ3L02{5n!{~Ya!aCoq0UJVqpo>-4ZB<1tOU#xjHt*HR^8pz6;IZ8` z*f$+uDws&!GKMk@OIO=8kBf0;M$dO_)U5WCY zV?KmIx&8}Ao~Zts`CX=djk_M-GmFHvRieBU6#(huaSuO_(`&`tx}>$&1Y~TE#Su~J zy)eV1@fqWbU^FA@vx3lnmMZ9%!drP9lYS78m;2)1xfsq9R%u=9BrkfUi!Ba!h!DEg z!?}RlcNGGA2n$TKgp}^y+*RsR^=fYh>&U`+piTsSlo56X_d~cL@zM#YrVQ%!bpsWe3zmG-3lNoVF%|3bf92myq z+4ri$lZLf{Lh*dO*YvvAR~y{ktWnh!nx?_s{XLvxgaCkt#T84W&0ht!Sp6&g4UZq& zPqT79eq)~=GyZ+trp$adr+~Z!9@j37{{5`6HkoVlz4?*Wu|Yo%*}G;Q+OgX2W&D5& zi1D11ohmRm#J#ZK<^YTIhY~#Tc(S zQLYjZRaBxfF*d$rA)li)Tr}{#OkMqy!1VKu?`5;WHKG!dYQ$wF1PcZ07eX>?UT#(n z1ns#^T7b?e@6$wOBCzHj@2wtFO+RP0ni=dweZ7%G$Al2_-W-BbI-0iCB2PzI;V)q= zT(IE#VlHFK+kg}?AW+K#Q7l|0*lgCgy}m(R)!6U%`1y5eOI>80|x^fK-)@^ec{rNE_>hDTBh0dNTtv zI`0u!P}Mc8wUZ22-xC9agkZLI%fQPJnG|z@1Q^^9I4-S zX_R$95>gVPalFW)fGl`21_Nx)Vj>KP29}1HF+zj31XW$lP6ln}Z~$ETTRd!5pR&fl zSfx~TU5B=9V?7edT-OB)ekB|#B=r{18+>(MYrrX^vK3bAbxiRcu-!G_5ET#MS)rt- zJ}k3*!Gf{CoA*`HRQW#moT>8A`7_^Y6Zua?VaQ_o2~uVE5ET%O14%3e6O|!Obw46S zi<#k_1GB_)Y!L&I;vn9}VkSr#H0P?<_s^tEY%_c($eyK>9~9>!5jCbtNB$}0wM!*r zuCJHFIAw5t8f7d<{JGQt`9+ZyBV!Cgc-n@YT!Q0vOHEqaF_I5E8; z%-HW6>~{OeH&>TF3u90RKzh~!;l3dG6aYa0>gQa^-5zjf-wVuIGvw5VPY1oo~A5Q!-hzVFVp)-9!+Zv ztSyw4*zZ-nLU2x%jmm-pW4F^Z+jX^4FG-?YrMXK}+2fedJG_s>ICTugeh+C(iH;?( zIUa`xo(g4d0L(O+#|#paJsf2p4#`&KSn7)qPRV##;T%x<5+!T?`OVYs(&-1jAIF5G zaWa1PiAI*YG8e17;FmxjnD#tN!olL4@ecG1vKtN z&dfYAelAazbqc{@a@q74xc4kjab8(pj9D{+&Ur#&Q$t$&q#N^|5f%YhX3u?;(tC;> z@`%Sbp9=A6%=b6*`qITobf@q4$%|$8+A##LrvS7i2 zGe88-F+P*euN$FRGZRf{#C$M1FUuMkJ_Iz)9(7$~v(dtZ#!ys3v=)dapg^)mAcO!P zVv7G$21)cf(f9Cc=GYYO<6e&+KUFBs?e9D_uJpnjKX2-5xjg2WFyr@^eqUlhE{N~7 zAyV=G^C6?pW3o)>wO9m3Civ(FM(M5>wErJViK`7kHN8i*jW#qA#A?}`W#xY!KjJ*l_(dhBaZ8ADK z-$HK>DW=-PNO3JNw+)Utwagl`SK{H>ZG>_*Te+EttZUVrsJi&8Qgmp_)$Hh+JK!(JY7wg88o zvXtW|&azq$TbcHgX8wKXnA0tePaU$HMgiqzBeP?oQ1JY%*aZJA%Hoc>pEOrZ%|hiL;@g;x*}8-pp=rEq99Ki+v1%=+qMuyR)Zf^9UsxZ zXtbY5y7m6SP5M0dCdYptP`PH-4>f@HGR$CWC}kl^Z=jF0px7pjx;9yhi@lR*t={~c98F5%^RjY5T&=*KxY*;krg z=Ou1$+>e7)rlSxD7%ZU}Bxm+E-eiC-6aoVw16x&~pn;(*1cAVSU?eFH2ErZ^1m1bv zUf-bW?xNCQa>aBlgeEYf)g+~DGw6Tz$E!Q9idGd49xi)7FrjPA26*xO-_FI@Ei5C& z=Nz0h#va$L*=xK7k$z04n*-;~?&)~S`<$K=P{tILRwxmU6oGyo&c!!PUarVUIGj8Cjo3ZKlw`(h~Q4)qDQE4d-DxzyQ!pGRBx_Jtfrj8rIg>?ONR3 z@3fF8^%h5v^3l`0>u>8+@2BsfG{8&~W1JpE3|h?VhoIyh_ntOs%5R zX8>+Nk-m6%UlRdLF=HOnrFl#$!)23a&J^_LnP1zbBIb92jle!jM5_Dv&Tp z9B!^^tk*jCi8#s0S$S!j28Of{>3Y3E)4BZo2++DC<^7JTaMNg`n~NJA*Akjk@}9GC zj9_4U<7le#DnKlT()T6S`ygV?_{EY9rXm7UXcc3xh-7I#3YpTszw{vy5Ra7#WZ5Zo zD=Dffo~W@WX>TXvK+ZyX&WuvCO81$@5R5DykOj{}nVHHM18X$5HyHv#FBov>)A0cs zpHasg-ysADREX<~84fq&b2emS4C<WW=5+&GsAn2)vCtL%{A7W6)b98Z7Qr+y7yLP3<&@chVu@UbEsDa#*U3BuV=hI z2ZKQJ86}gY$oNFPbd$bkk!%>sf#m}f-h-t4A!a0t4KpOx9-Ml|#`{s3MGMR@)&htL z2I#WuVb0o>JTArcml%)*7lR>_V?a!%WAzIVGv`#iZQEkC(iD3sJA}11^GL_V2nhs3 zW@@3x*CyD?qRI-$9$nYPw4Dd6PA7u57A$xUmT`3+%5;?>0NyiP=fMoLoyV6i_juUu z!I8Q!{F50`Sgy$Hs!7>AJJVk9Qoc$Dn%h`l5ka}M=&%mhhsyf2A9 z3l_4MjmRt>kY5D{gShhdM?3^aQ05Fz%0sGZ8jvzW1Te=ybuaQ3{3&(-` zNLe-7wu!Yr;zWn}iN%!#ZyCq(s?QvapINXzjFkVL00e{pbk1YDYvH>N-@e_WZM(Qe z{Ho9BX_!2%H}d$Nw%aVYBqYXfmRX$6U8{yEp?j;gegLKO09>TjuBsZF%?9iB3h;aU z>BH~1x~{QnT9`zaic?%-rYr9Qc)!vhbsaXwN3pt@dRx7%qBaqnVJ(!dT_AbI|u>j2ZIQ4}ehekwg5>gIl zo_R{25f*S(^7SXdAb1aJ2*C@251Dy@6UIO?1S?w6JVa_$RoU1+Fb+Bj%-HX@a9xYd zYK@z#4fYQ^+}>;;@USFUuU4q7)!0rUAlDFKi0cqIt&f64+?fjLqf49P&k)e!fJg5& z(!zB=nx_qoM_Ax8o@{2GCNIdD)N&xkuS8`-AX(wirj3SM*z$N}FUVs&sv!gz+k3R0 zIxqE_1}2ZYSW1#q`ABQQud%x?cr_SfwQx|LTq($k!R!HOPVW!`ymxVeBh$5xBbrAD zD3<|46wlYn=WWxX>l~PaE?3W-Os7y5U0kr>vG^saTExg%9|+1LCrw5H5o~u2zTG|G z_WByZ5g-=*{3=zD1q&9OLQ+C##=y<7=0+J$aH_4o?gje5bt&kSh(ad|f?>D-gaFvJ z!D>~(*Mzz@U!2~}m{*q7c~#{%bc z@vzpQZCg0!hJ}bwa!HYe`jmBpifj!fQoISXplMp{+ZJ8$2qH*DQ{Tv1wP3*jzohbU z04Z}8L-4TOfFi&R2jt+~O%usBV@VfK*7 zWY8GPbP>_)SjpR^mPJ4S?_Fd)5H?pEa9d%svS6;jEZEd5T(8$yuh!VN0cz5phBByZ z5WjpQOXp+icmRcW;*nkujJ)JyWoPnx^28(xOo7)>Mv@mK^)1VTg%YwTAPX)Cfw300 z(kkdc^lXs24K>J3gO9xT@ZPKPU=28o-Q_g+aS9X_kgj!b&Vz$Q9;po4cr4fT;a@O(aV>nU? z0HK1el#^}56%FG>L(L<)_tCQ+aSB4wWPsT=Un zNpdf)fu*Mr(TrU?^D?XmB$Gg0%+1 z`uEBxSJaS2({yNC59b8l1I#A!j0w!CQfa?97Iy#^wY=(Y=kf6={x83e3s_{v?{ad@ z6JBz`=`obNLIyj3hKEz8t+lli{u0_>AdmMR36I`hoBhRKWJ#+aUF zl`QFE;#CbG0boYgwqRkTnj4yyUuz->;yuK;zOHL?s^4l=5qE8K~zo@Myg?G#u+RsFx9s8PSr2G)v}TPqc=66G`D4Dz7`;f2M8q?W+SI z(vhM!K%cWM=vY$)1kgGkTC9-(qj7j9ww1cTko+7D$%V%Zup$g$+p|rgurQXF20c3> zE6r6vPu>s+|0o`iQ!BvpSIoDRQ~Fhq?>7kof^wsM3^PQS;hodP1v{dUhpWZoF{kP>}D7jT~$UZRMiSZz;?UCZrj2;fiOWBATqtBhWnW` zQAmqNQU*yKz3=$*E$!C__Mb;#p{&iJR6geZc1Te={50R9@&RDd-SHia_C*k$5ML$5 zh2{h%5SrG(i_TTg0YMn{XwlU}3dkvpk2zq0J^J@k*ExSi9XgVV|DXM|f2E21lzYwl zz*yjWxy#O7W+JklX9LT7RZp#&79yQqB(7Ik7}Qvd5Cmlm`@CS8QekVBs=yps3>L;% zXhA`v#ukZ!urNCBG?Se$s`UzWS7WtWL)c)wUSV_PvEJ0E>I#(ODbUA8h{W0-@qW*p zg2khlD-Nr|d;i_U=o%X-h5V^79-qkpFGv=+(ZAPZoYq4}?`vjbpyF==tr;SWTv(Fg zvr?I7sL1xG#)|7Rw5@S!UJ;60+T~+s`#vpcco!r*!|cVv@F5@s4@iYnU6X)ZUA3$Q zLa`P|)3o^Tdkt&5oM}R6Ec&O}*b8}yc1kh$+!#U#0lV!E-uqa55XjlD-lUYbV8O4B zfg2+u{T2d)yAA-wY<6nlTPTkUE|2-Kc+o2y5p=&7k~4%^a)!#INEw`1w1^=ys;WX= z)v(qKl&0)}h}561)8sHj2F^L`n+7C4dtnS2SYxyvm@!H<8hi9eRV>isjHD-@LrGWq z0ia5PlBSZkBqvef+S^WPBFP8(zO=_WWnZP&>WOWrgO;Ks%)dLd}GA%*m+qNCpXV1$AQ#Y-MwiVV%E2PaTI4 zYjb(jbp_`f+NJ?P(|Ehi!Fi`~2-d<_1BAkJ07Un}i#>>Eqb#WU8b%VO$4P$fN|h>p zMhyCrzMoS*AW?aFodV7D8ZjUX-VTxg&cYxmwObYBSsrNxNM4iZi4Ycmqnj^t7<=M? zh`@UX2vAiuoO5u#i`Eb2EGFy5FsS@npA)5lYn1T-^H|;+aN@3{*$qG}iHxBxY!8(lLK{GfdUzL4Z(ue#? zkfb#a1I*LF&xZ|W{xL)$A8NHoF%|@5{$Q>JLK*_qb()TEywQvF-dhjQu#x;G? zcN0TZqBc$Yf#Q9p$5x)36v1O3jN0Y#--jJPTOQK$foDDR5z1$~C?E?i1ky}WtOL~; zqZW}u+F%B{&Y^Q1Hk%EY8BNouE3T??h6-H(;=j=^tYwxQLk2_!%nau`>~?!N=cc`) z5s7ON78krBENPraC@s9x(A??9yI{eBCnM36nHj-*h^R48V=WFSFfmT^xJar2AlZh_ zf~INEHcb|=Yg?hNs%T8KU=}#5_53oy7$5qwMM*VJo>m6y?s%J1^D$rnxHzZ_ zzurU0v!0Z<9HlCBApPelAV~gx=(E1h+a2TBjGVT+XEP^Pvr%6F^@1IL_3oh2Z>O(f z@zK}w_q01tx>{XrGWV*zry3uA63SWtiOWtO&R*L{3AoVCXX1Nl#tq`{X z28H?=au2{vp>n#5F%yQY(0EVib{?B&N!9C|i}9%G^q(_7<2jghp0d{ZNcwmQbx``9 zS{daC3rq&HAH;QlHC1P-0~H5VG>`|5v%0V|f=fI0yvsb$edyXHYf;{UBI@hiuit^sHeNHk-H50J`N7eJ0O(=%2UAI$we4`n}GEY3MuXy2+<$gzG*!PtBD z&;oyw;&xa#YbXq)mgKLfh=oaL8sL7$BtUYpj6@l6}|p`0@0FUEfPlI?n*4Ujq|2w~4h*rk`qW z)`$Zb1=Aj5DrN|Fa|(C9bQ#i2!hC5=6(t(crs6&LBky9)vz(J*dOX&#mTR!bM#3jW z?V`3-9$xCdEeDUEweCFI;`iY=3j40Bf4JRQ=S$hC3+fiY5;*fthgfYkRMJ%HWLgQx|W1wa;_U0CN?F!h0F z%Aalc#rT^2JO)M!UuMV*=$c`KqRJjV27XH$gQOaO@I5^ITRu+V%K+g{H07`X!w8=g zcq%Z14N2#*4M0zn$lnyttQ*u2-x4hSXlI9|azYbQv|o?WN~;xaAjgtj)+1KDggST2_UB;P-?hi#g| zf-_#}>F||&*Sl_qr>7@4C(qzx@cX=M78Mm0C*WFC*Pjra{k#>bMtj^=DlP-{Jn$1W zGY;LOKbgg6KotO+kWI3&lGSSL8DPil9qKro*USv(dbnOv_!>6oI}hIj-LA)ay}|SI z7Q3!P@7zGKa#d&e%=nNzc4aDVE&U#n2}D=0q@>Li4(VDS?-_s8B`zr`7?~yI3aLgF zKt3^42)V@mW(HjEVk$oXu~tWRRj^v@uwJjRT8RMC_Z^ry5IxZfkfDJvLP>}ZS{xB( zwwo=UH_uXsBoosqm)@_5fK^o7Fe)?oo~X2}B0-~vm*$p|uJ0t!J^U^;yBM3z4&Q%&#E3+&uZu(QgNZiDWcNLQv+WbxpXD7?6tBfCwNA?_FjN1Ws0SvH>90 z6z}~6VWMbtW0rJ(yWJMwzI_9L7;u&sB#f30LNfV=HYj}OyvMHF!Ml(#!;7ny`lIKj z&WehUh&>)t9|L0OtWa>yR9ru*_2X@zibTyKIrAnk03W$k`F*$Tu-kPo#tzz(%H;Xl z%anu2ResvO{$TiU7Q5b~_YUHYXyAGe?-;va0oiP}*zJ0F7#PdP!&%=66g(`{(P|%3 z;CJqJm44Uhjn<0XYU0y(ocOe0Krb<$%(&!>sRGC+hD!44 zer2tNtkQl^fq3uztOP3fA{vRa@F6!gm_TfTlEwv1hDX=$;GD<({T+xX6c6;U)}pZn z-bp2Mm=q_VVO53<+I9(J8mu3m@bvg7niVHLqsHhauiC;}MSX9lAO#{G`g1%04Mkt_ zYeSihhG`r(Q5o8biXl!(8EZ+Efgy<5hj4L{SWV{`-@iZM>(^)8-+w{hIjo=7Xj=;( z;ydl4f$t?~eYe}eSSbkH+65NP5}qAg=Jvy$;hhUE-=e7OIuB!85H)Z+kFIm*oCm{z z$O0h_0wFHd5XBG;A0E{oJ11t4xnr|Ngcu8x5KE!OL2SlfsdrU=r~jbVtSNTD^Mwg|ym3u`Ub>owMEb-%h!rmCW%qN3vC zV(52&JIb%Az-Z?jzW@Fm-@pHWwH9})J2YBk7=Sf|QH&@{MR8+8z_#Z>Jhw8f%synd zVJsFcykl&hx7h8v5c6OlENN9TTm+ovT|VLKVtpPJK_u{D!VY*Z z0?7088rB+d-Et1bN`RXrbO-`R5Ij`!Cu4%82H1qW;-#zAtsDRq1vvRAS5#C~+zf7n zWO+&-0Fp#TKalgab3ML4KH_<^LDMv`XfU&bv18%D$27KkQgJg!>xc6`RQ4wP+yDCa zpiFE482GGku_p4>e4dQqxaX(I`Mik1=?HFLnOpj$gb7a|2O(BRvs zK8a4vx5g$P)?o1y?ihGuK#d$1X_CQGI4@g`|-Yyg1z;G<_Af5?kvBai#Sa%LP z$Av&K6mrT)i;D>#vod}jj`O$}xR_TuyR-ym5T?n%q$yy@%v#N8qe@=K3;ogiWxX<9P=2NJnP>?b{!31 zec|NKsWtRcXnmf2YlTRe)Lh41;u8ANa_=8{IL@xg{PvI1=>#&7^gT#GM^q5GIpZoUBD0X2)xQcrN ztx3NUj_*kb?=n~$B5;8{3~^R8aT@X(hu6sa57r(n9LOfpJ*n#}AmRhs%~Eg)~g zfSo}b&MKM~SLKO)F|stgfxr=g%xT$6^h(d_;;E)T(Ju`<5KF=o-Ljlx>~;>jT^9-;9`GHFwByt_KD_=`yi#vvoF4=GSuDor_q$8h8fN_~|lvQN$y7TUg{gYxNz`xeTVh>3G3%4kYU{2t%T=w znLx)TGAuDAZ45Y`m>bXNdk@b5$l&k4|BiL&Z5YVkQfMH74MQ2TKY z=4;Z&Pgy4Q6liv={bQD=inMURSEUA+Sqe%QmWrrAjYAK~0P_1Z%a+giGYqUCrA06Q zeq=$x5lcz-Xgc+uRsg9;C>PTySv885M&E?GfCVVVfw)iz(L0~AXX#a;_y<8^LYk%p z0Ib)~*lr|WxMZQw%{s*hz7-o-QBiRU3?+uGU0AGE3oMrlSS!cUrxvC^^~29ZrtywU z&qYZ6$DRC1QIFEcgF|}z4v$Z3{P^(**E@Xq`W40)^iGt1&WlTyrr!7Q-)_W!suiT~ zd-To$L}(XFJgqnQ{`iPp*9W()V2;VNM$vfOZ-F5XG$k1G+e-7=>maTp69i(0F+UI} z1|KBXs%dvDzgMt0W%A4Se@G~)0>~%F)PlQ2s>`@u3llGA2S(rb@LsGiQq6VWDi`aA z1gim$KYn1d*@WEZc5>{u;DcLH@tGm6U<@Lu0km2zvASCZZR+RTyL&Ak6_e1|i{i_Q zJs7dlBQGX~V~^f>Y_}bremrCI++n#`1rG$32vqpv^dApo!coW0IXEYAB&*dOoOAf~ zx8JbYJf~QLWFAYm1|Gr(cT-`A&wt4(T-<6#?Q@QQ;u~>PqFk`l}vVgc2;iUO&S& z6#KSDQvP|ppY(4cD|em4@87?p3vnM)7`Qb?_q3OtN_sb8DCAUJ7IjV!!v{a!e1G05 zoO6&;AB@FffiDmD=zIB2x9#xpc!KY^Q7+;DYORwK5j>|P{*mI(kB-6KXT-W$XSRmu zFTmmo<$B=xxySeKPq@2#z_)LI!1wRJgN)SUGKPb-l0)*CVS5_epl;pFjIQg@cMhwE zJ1_&k{`xD3S}7Qd}mll}~z zt(d}+6I8)y&RVP%kO~0EBxXUmDN6){>0s%}!t+s^p$j!%ps7g-HvRGR2(fV3Sk2YM z^3L)R_baIv^7y#M^Yb>uelVP$3YH&8AeTX+h83@b`To#WxI})zvtTi8e}00yre!NKdN5M-oD&Tb@=DtMXW4aDDdNdH=a3 zWH{P+^i|!uCjU|hE>qW40i+@#m%uO3*sMcgyWPgDH(l2at2Aq%5)qoFLEm@y?YG}z zyomRHZxO;<)(lrvd`J{q(iq9ovRp3l_3Ibh-Q5q|2kIWKn2owrE&>8#FgSZc*Eu{r zuJP-yzhS*z%lXVh)gx)mu28pY#6C!)V3x|@&N(a=3ov_hyAIv1i`I{HeSs)sy}!7b zT$3(0ihY6Imu$0a|Bw(ezY`33w)FL^=cocm1po{Ve&Frg0GxB!bscuQ9h`F*Xibn@ zv%K?Iub=Vs^c2S`<4szV^EJY^t3|G;xCwA+;bM62V2r`T!vh{3zQC9!YCRF z4z+(UQ=4R*RUE;rsad7wnBP)sX@TcHgQ7{ve6cW?G!oy@?RJaj%?5XOOSzIns{(jf zYhjGR`gwyNKOWKdJ)Cnx0+46T$V`~!6YqbHamqjJ%?{nJ2Rn!x2uW%lzCaAuVa#cp2LGD!d#*SFDt=xV z$Iijm!y!sHrcLxp>v{0}`;PG9JNA3x970+w55O_7+x7VU$0NRe`vPd?rEsG)R^kt0 z8s0pS0>yqnfDeJ_hNOae*L!%D%v#fW^sJejjmR(2N6vG*9`hgy3K5}O!`RNU|FN5S zZUxeT9b0$%2gA%TVNE1XgyScViOKINi!|qd%lnI-En%1gwuVyS=D2>sTg6+itOb-r(z(2e_aKk(dt`d@~KU+bz~lYxJFyK-J(o&TN3-znM$*!)F`U z2pL{eZf4!!>38!+N83kA{!3V4vm6@(lZ5D1d_4neS~~Y&jn4aLm#Y>>Y_Q`Pox4v=1Y+)1PoHOUhf%;w#A}p z@#mj@!u{Pno_;)`>v{kLgHYg$42bnP&v=qq=jY*fNj#(yvnoSsSPd=Wm~AQsxi%sDN|XGt$P+lS%$(OEck&s0Ck>=OWEuc zbn&8WPReJI0CN-j;8>V46?Rp{koO)GxR@w(43ZX5Xd}yf1x5_VO*63fSvfF-n!!-y zogs4?1KY#zRRF1o(5wlhqVh@5>-Df6=uG!?Xe4(y_t@=r*zP(w&uFZH_q}XePoXH% zGq&3uyhk7a(lpUr&2hywqx$=Q9E=aG+;SoeQoCqjj3^queE9-vj4&oK;F9(#aVsh+ zCcp(9J!=E0(l9nDE|El_-lOXr3>rD*LlC-iPAnD50-cd!&rI0d0c^Gd%=1{*mkCQ7cE)mJ#A(haR@Q{rXQ!i42%Mpc;eP?LO zX6iv~V4zjflfcT%@E{n^U=suo;>@E(waR;VI+Hv`^smgBT5Jy+SOt&|fb`;Nv^Gw+ z(gX?kjjTJ&9_$_Zu9H-J-h-Mp)C7^gZ?{`)HkOhA384%!O*M2eFV@YT7d! z4N6Hu*Lie(kJbiDs3BNu(RUKJMP{1OBv33m>YPL0N$h508-M}V_aUpL)ERk8@|vGH z9v+6@r#{HJXWz*}h+GV#NqG4Hj~IQWP8(Zpg-ceD={~9eQZYcZf*jNY9KL_PfWRU3 z9>Y10-L8Xo9%R%CBLwgH`3bx27OwYjj$ut$wrqkMS4Bm|YvCM9K+r_p+5!ic5D#M- zELRKs{PSPI+=IwtwOZhhfBcGHfBlN}(=(no+Y@O?MMcHP3i8tCSoW;7Qt;RHJ$!3p zu~0*V-Z^;Zp%nwCdKYhoPz}BBuwJj>yhHDLIPaswMADj{4^_l6?HNo4?>vkx6&(i{ za=ebBtPBc2MRBY`2-!nINQxx!Q3&A)D+t3d10G<*kh+A~JK6K-CRoV;0G>fMxm<~; zD&s1qvSZk`jkP@3i+L8j zK-)9`0lVEAk55l{TCV{h2_I?(Wm9>SicgN5(Z3u#YaV%y8cYr17{%F?bb=7$zI2Et z#M(SO+~Kc({ZD9H1ELmBPe1VW>jVC;|MS11+wSnoFTY~Fegi-|ZqPB~-A5T2f%aJO23$OO0P4Hyo_nvj(e zDQ-fA3WRtJlFWC{n`Z#PPe1*H-@kA0ym4op;U;6anK{+-U^D(}}V9b;7 zfMK3?{*a_M9%ZSLb0GYZEygacZcWon5j$YH^U*#R)2FeHu&F&yIRh=+wc%M4i~ zfp*Ujm{Po-7D3G0R3Tqn-W@K(Gpj5SjIoEe+#4%_WExR6o^UhC0yVi`&CC3V?W zRNMx{lF97gz2yJDf4IZl-F-YPz3PK_c6AhNZ@obRQaK4f#(Qidf(i=#mvBg zitFzMW)7}w>G$;Uac+m-U$*v0?AP<~cMhq3V=#RXye`&j#TDUpG=)keoM-HIJFGX) zV8b9Q*&%xG(77I-xf(huDsBL!HB~=NWAXLtJ-&YZiq)z`(=-4WUDsjr{EWx-BYNM% zHa5%d^6^2+EFG|Z_$rK5TnW5)Xj_ZLqJ;!wGpw~}TMl#%i?+?oyXI*$2!t zh4jG3GI^@D|0QuUR|2(Gs}OQbun>2*2`k5o$8lUZQz4|fU{y>&br^56DajDXiy3jZ z+kx2tWZ=1na{#-b9eI<{R8&-)gF^?7F$T-!0{3_KSlul^M6g!eJiETbuG;`4g-MM~ z?wp?m2N@;RTz;-8skl<~y~AShfYyj$EHNSs+aSb_a&Vty^&OJ5yV3o|pmQE~cXw!- z22InTcUlY&(3(i+toNJ-Mb~gLm{UJJWqUf{x`b3K?d_}|^IdFZYzEG0+Vp%*QuZrp zjVFWp5khbYLNOwU@5B(COPH?_ali2Nb7|(JLm)0PwUc}o`vBnIq_|2+4XF6&XfA`f zfr@jAXO8v@%`};PfyQSES7SOGQprw|1izoA(ZhL#vmvzk|1Ho09N|<&X4IP`E1{j z1aoKWR$dpA>`+ z^74!x9_*~}WMO}rEO^}WP>_prmC-MeU&8u!Li^J#>JlekQ~~4#xMe=D+%oy$Mx$>g zZl2{=S@c1BHYy#WUw(lXEVn_?`ly^pkc%ZqLw_VDL3m)XIa z-fp^%ADEXy3if;D{&GeHI<@~9X>GDvW_LL)8qbO0cva% zPEKCp6E9r&VEM_!F&4yx;;o+jP+^6n%!#oIUabe&`qko!r51A5faktj(g6*GqFKm> zl3bu@U;#;cEugkku=5#bKph0Mq1E{ zdBiAGRGf=>*1~z(>Pulzuj7iL*`?$|=(`=(>qp$(uke5WxBrH=ZP9f*crVt=Sd#+D z@(~dpw|5T9)g5fp;54aYKeY0B>`#T+E&7)?&W4tXhSWyvkr;JwFYv%z+|10b4`6a-ajjQ05MR8)LYFf!_{=JyF#$V)d_@baZR3{{Iy8nq#oR6`~d*7Kl4u@oqnamqx)30itdCYH!8YSqOshuVG z1I&CsPZeV3_A?SF80fkVUDt`WrSCJXN@)bG0!YQ@#9sFePV|BJtu=N$_%{(@v1sw- z%RN@BC9Jgod6g{^hZ1~`JxgU}#L!)WM56C;O<;zm0j;RG6iz)a&jx2q=h1!o_5w(5 z`2Y|t1Ktxh&l~*vzyAk(zrp|U&ws(g*9Wvsi^et}+JX>jd!!Hwi8JtwuJ7Ucke~)q z9MBQ(wNA{1FxrVx&%-DP@*IZE-juroMq8Dn86T9-%d2ZXhNLw2g4%z5u|(9}inC&a zQ|?d_=Ybd(l3RnoiTp;(NvYh{i7#6&<`Y0vI4Pxqo|>fjI& zn93qIb%!#x$@MGMx==SPWvr(#wt^fS)`5{+0@*_gCx3binH5CmW1l62S$QxI4uXrc z!9#1^s1I?Y({$w{SFF4pBzcZe1&|lvT8_VHB?vpOBv|K|lVu|hvqnVMjx#sgco>vT zV$h5-!8;G{0BHK?qiT_hIgjrBj*EFc*F=A8q;p!mL>t!vE{M~atz`e6Ew!G6(Ls0d z9_At+iER4e9?iE4$HRjr4u@mC#G{<2;xEfh8SV(K7SEw^z z4NUQTWtIxY)>t??5@=6c5Qx_SWo`(ux3^pXL30})_a?yctxP)3#-+vS%ah;b9@o71 zuk&ZC++{BXN0%UF1f^wl8G0S6bq*+wQ)hw8R}|ctzJC3Rzy16tJpT9xmdhpDwny8x zFxCL%ViS2hDMA~&ZYPBYfs}e%Jsot8bVNn}`a6#%EjO*|nXUf_iV-5IPh(Qr-9ZZ@ z`j{B*I6UEuGVMO&P(G(Pj)TaBp3D`Ev`3EVg;SfQ&(%be%;{&6$|g+KKK5%=*a&X4 z9BS&M`5O1j078Anp}SRXK~mu^jDxufASYp>cHdQi!y-%hmS0B7;--oPuqdXu17u4Y zn2|&=7JT*{5TX4^X`0aY9)0J~G~)kFA(!_dv*}9{y73G*j+eT9hVM4XNV{e9FXy*$}pJU{e2D@XB?IX__zhCa2qB?Q5zdP4aig>AFk_84E2 z!KnG10Uz2^b|kGo6=;TSP{Af8f?dO!Lsa==l17RQWsvA|-n zMAvVpCwqkb@ApzQD97y-wC~9Mht`KDna5_*bw%&L9_eGwS||1(qymzLC}XBV8z#J4 z4}gQa?MZaJ6m2m4y%c!o3f~h<=u5%upp1LW5N9BX7^Y~}D`8F$zC!4;AH;x|Ae?x| zuw?LXe~L-|^EQ zzhZTFhjy`)d!UNS3@y+TOMf>m*CP+}3yeOE}7D=IFA5467JkT+N? zmiY4J0S{m9&{zXEvRo>o87v_|W13rYk}3b17=emg#dR^1V@>@$jX^&;RuEAXYorG{KZHxvYO->~5 z=_B`{8-}_+Mb|Y9-=+TTWqqhXky}NO=k^WXO$#Aa0I8UVH=>ZLabYb4R8jH4AVGVD z1tdjW+$2<;Qsy|%Vp4rviN|NOrRg_Z<1>0>c&w~aRzRs!Z?r#Mi)5iE|HW% z>t==&bK?M^HpA;3%noP#IN zi=IVdSjSf8IaZK(|2qmHV;SuH(?J9Cr5PFsSu<`14qlhLjs>?h&#=HOA*2c*6?>7# zdc>4J*=`KT#^;xWf%~AUBbb- zP9XSEJj-pt7)h&n|8S3ohkG>E!pC{bV#w6*nlz3~u5rDCH5N_N#CaF3-J3s3MMZD} z_9I5>9K3}<#LQqPT78&I5Hr|Y`(oV41U($V0Bjn((QVTlbf{paYWViv0WnQ}`6QLOu+ z*XN37wa&-CNp$*XfVl-ETC@CEcrUFWRh3+ERfsNF@#pmh5LQ%F2)t)0Jh)m(;lZX2 zf0=h`s-~}^qT;fkkaqUc&}>wj?G}az%jH5cDS3C)4Ne)+#bSZR+QIl}GP~J%$f*xA z!#NiP5VdOLwN9=@DH0iaKA$$uc@`c@i^U&jez%9l`-5$Vq$Nj3Sn)|QV5f+Oj?(UXo+BRc8W@KyO{2;sz2B6~7PWRE~gVo=Gj z?nfzjlrC0NHA3DNrTf`174b5(s*uv5?2-F{BAWasK$$uaCAa8T(Z{hEs#s$k5 z6SeBc18d$7b24%6%fqe-EDbKHKgd7DUzq`KWG)S|Ui0gZjD&{7UgJCZ-bt_<5v(;Z zhT!@T>@;YTXXK@?OwHvnhgsZ{=8$uuSlm(i;2v8(B|)a7U5nXGj>HU&T!W5`IJq9Y4miYs7+3>1<2ItkRIDXBsc8;|{Y%r#!;wNtkiuMt}WTfC*aHY}u1N!U~-Ya>JNC={D zduroIFBrf7Jh*C%$r>Mn%68!_^BTlFu2uEcP%i+_Ssjr;N(SyiR1m3IF~$;5o?McK zUI+48yu@jkRFg{^cb>5{7RIz7vrC+Ecs1$D?3`x_K>qRL5sTG5$eO`ebDD9O;hrNX z5}0Maq(+H}n^}br@BQH4?*R`FEyyaP4!m4u{2-1?Q(mw5n8jHr*Y>Dzvbthr*{@>Y zIP|p%-S#jb6&UihG_e`d9#1hK@wu2`xJ*Kj{{CqMkQ}dj5Wf=$`EI`eGQzy~bj_*8 zWnCzIR`^ip?Zghy!i%%&C%pGDQlUt`rI+7>s&O0m`<~jyf$S%jUqvSf>of2!RE-#9 z0NYqtYejB=kD9b8Xflgr7(<7Bi-A4D$u+Ti6&2TwgK1>d z{pb~-0=WuAX)jYD$Q;=JAXTIMQS=Z;zP40>{ zqP#iZ6_mf8lRMVPE$fK;Ro?Ctt=Hc_r3IwodZCNhN5WK5QE~0Cw!xxZplw?@#1C3O z-rK*rqT=?Uu2_?MDQK=QoyCbE``05AL^OjG0c1>awL9Py!NCnH_jCY>5rp@MAlk=E zNr%Ll4uy`~3Q}6oa|^)DBX`9DkeRdw79RxHJF#x5h59hv_MYQA%;!@3opKlaGy;fD zp5(=@hM!|l%U*PY!rSO4b>a4XkFM)tO#r1wlpF0nr0YV(wcr-qH1nX!^yS-+qiq_r zix$>4Nt0>Ju%KwH>rwaZXG88z8+?df&PjeK9B{JNE0i(zh!w_j1{niaWh7UvDH>6< zJM06K%;E?vwR`$7>c(0aL!qXJ$sT3ue!_0o!8sRVOZGnYsLvOe0OFkFX?PE2>)O{D z0b~e<4cvt?b&9nBfafrW`LIdc;D}<#lzONz58w0NXPJlznGo~h8uRcVauVX3#bXUz zwm4^}efkkBOL>fm32Axr?>`MEK5Ii&3^0&*-)PSC-8kpa_kH|Mefx*+uJi$}sQA2~ z$phtWV4Fsi1$OAt5D!dDoBsELPGCwZt{!rhsBidtkLDV2e7(nbncyaqhs{jwtQbiZ z_aeq0z4m}R*PvJ*$1f$8!@HQ;)>s>g2Ac_2rg6vH?GC+jkxm>!(7}i2JpJ4sYVTQST(ZD}ThgtCWrTx; zyQr&@+`ER~e;NU#qN1Xr;zB6SEo%*|G9WX~e` z0zgD?&cUgYG5mL+^MkPlv&Nr2!>R7wDtzT}61S%ROhUp?FnEoAri2hbA%uJy7p#ig zMD8C7ypCUMMaApmhEA#(NSbVUE^8ZDYtQtoDfjbnjc7&1MZidF?PG1xS3wjsqE~`4 z=OJajv9pUFI`7cCUfRSPc&6k61gB@2eY?89y<&j>Yh&`6H7?}B{dhp)NswH`Jc8R* zaL1|w$V*VXw;NrP)7VRxDf@YeC!W|0P(Rer15pTWgJ>Q!UEg4^C-(F5k&f`8-v107 zoze2u>I@|t@Zh4CBwbT@Bwe?TZEIrNwr$&(*tTtZV%s(+o@8Qcf{u;T-}$flvYxKm zd+Eg@6lSU}H|XsZD1dusEPRnfIz2LA?&EcNj!5w?G(VCuT7p14_-IJ|%rN6Fg*9tj zv8NuefZ8*WZZ=*Y4?9fKp1o5mDAWr&&yH8Wm%&GN$e zbxBT%00$ZlDe0Ud3R?6jh9EQ{4}&_yhlI-tV@_|k3}TrHOB`n=Q4tZ!XU`}p&h;F) z9m-{*S`VQH`3s3q9TXh>4bsnA0)K4i*=wVt_Zsi?D z!wpS$!4qmIixRGoT_nt}u{GZEQwZGqAoD?XG2KjnikZCAIZyATd2Yy5j{h=Lj^It{6 z-nZf@@@534Ur(490v&Ru$Q5ttvXHCY`gDSVPjEz9Ft!Xic!f|mO75K4iM8OsAdD~o;GiS{+5W&-7f!H$o^cw#L<8VrwJcK*f`fR zH&F!%6a#=TGPa3CEN(xG-t^E#NJzz4(DhAH^8EK__4z3HCjmLWmvbsXpv%*H|7q5b zzjK5s8{DI-7%}XYLS6}YVPCK}x}cKpeEf_rN7-mVoL^6lLQ@fCR;1Y1@uxq=LV(kMG9MRopG{1W1?4Vi+$B-awMu96-S_fFi>j-y~%K1)h;jqf>yL;di9OGzM z3?B~VgoW^6kh$UST2Z-fsZOD7p%!TA0amezfUt2=-9iXjL0>0EI5M#>P#Yl9!qCm#SHoHOXt-mVw5bsY1L1z{za z*LxCzB*MAZEBAY!d`d%6H4|0@!4^ylaC6e9EZe2HkRiCE@#B#V-UFL2;KAwckMtCv z7a!Tc!>4U@$s>>GlHYlYR!HT#a9)3ICW=H>K!Qx&!Iu|fRlu2>*Q4lIsqPh~?Cckv z_{T=TmJ;`GR6b6wWk?$1u=4lPZcb^a2t#~IG}BsQ zR8X64guJ^dwDk9VF$1oa4`moHxV&K&R?0)Je?$4J9X*1y&m7Gxx@x^dLUt)pT0fIb z(&{ghry-^IQqDodqFRNbhe7J?PU+4Oj27*euE@Oub(GTCW*GTpeT#zK9NaA-7Bs5l z1N3P*o)T>DtEj)MK=J4w0Xhd3Z_a9Bb%=|wQ^Ii^pf&=ys81Nm+Ab8xwlfu7*5&7~ zy?NXe(0s;P|)6_zpip}U8O}El?90NW_Zt|vT(F7OS#n2z9u8hkDhTA{>;)QGEl23QpiKINfWpnD@ zQ}Y=zATuwo%|R!y5) zt?|Ky8lO3_qNo_9!<0|CABsVL_sX*`jS@!MPz!z@d>pd5cx-$>#rw+-{#F?2v zaFb}UY_f3YdARZ=0EkJ@rKh;?@hnbr;j{V;WhcD|^`7P-<|aOyDAI>6lOQv`4r5h&1#5VNnEE&>ZW zsQC-3kC#V)@xYklz$d6tzd(@=eWNa8lQlMQDUhJ~tgI~;a)=~`pv$<2AfLbP>_{yZ zJiPSule<7`lr_Pp50L3j-X-%l$*Z7hfXE&q%Hf_x6QvR>Ddr*+?gz_XMr$JkAQ&4H zV+)E}Y=am5-C4P-ZqZCfTaWk?6&N)nHbb&+TLZ@5o{fgSaOWIv4kwWP-p`eaW?KqP zf$Mc8L;KRb<24o<5I6*$ERqJSgxMOVp8r!cF;)f|7wL52hqPghZyX%RlGD=e;B^z_ zj?4a=ZfQ4Jib=UX?#>532z|?e6Xo8>1Ubu@ zv$6#YB>l5V5gqn;+H$HxZnP6G*;y@u36BpxwwD4(;I3k<+ney(XDpa{8a+{%Tb6^Pxvio_RsNWoGW`EYj)cN zUE9xv&K8B-CH=|ctifw8q~G3Wb}yUmJL9DCx;?7nQ*Z;;rCXdX7dyO(c&R8P_&5{f zMg`@`Dy*Q!(d^6?-1(e3Mn1RE0zXd&4urmEjBW?Mt^)s~p7k1r4lSw;v5hoB4@F>{ zzndz@SWQBt*_BoZ{EchGg9A_?=PCK4#%(x-MC#!!9j{Jcxc!hH&>8OiII!xhjL$)2 z&%al>-DA@hdd208aYhiz8loTGjzkfblo(`g>SGeUVlwgqWDNP9MMgOHl0yqY{vv)I z|B*#;G3U}X=+Y&;%f}5c`E8MK=UnhbniOLNLyWD1wANgnF#1n^b|Oyzdwp4%`a;4s z433kMKAhj9vX?ymz<6Y2Bso(4?IE~fxDmzZHyP8ckKkbKI;%EQE9*=5L)VYXmL`z; zu)r+)X*pVXrv3vec*#k+94`>bEtVWf2m(@Oc5OC4?&rce@NL|=CU1Qd_A`V#uT$CSkgq%oAM0(lN zw$lfRM;1sZros_25vmHAE5slU!?@L-)i8t*=uA~v(Us-hhyy}1c$Fd7)vj{WbC{@Apqa?Tl-tu05T2)F#*t(ivS6-kSn zfIILDKRU5SrPshHA9 znJy>?+Z2MJ11ARIh%kuMkY#sCJPIz$U@+LWEC&po;>3LPtW(NkOVIJ4uYOtFmkq}L z{cbn}t?S-!T(*dDy24R=z3`mFG4n{0nhW&@F4PtRzk~iLI|a+1c!mG$e!zB*RogeK zwI|RO+!@u?RB;ArF)cAHnV0EUd>)xpn85P`u1Ib8eu0ES@~nZHyV{E~(WK)Buw6jc z3;MIOk=-MQqVb=g%vd2H$l*DAA|{GLumakITX$CEZKWH-s0T7?w@o2|YAI-8?<}_b z>pVAD(OlLaP8l_Cy&iv~BUYeyN7A5_^@ZZ9L?F+I5Ebp%o?KbJ-eYDsZW5eHgfG35w zA@=T4l%oex?J4}09jVC9dcM5<2K@rsNI4V zU%ZvCapL1FgNz5+ga5_y@J3d`TH$Q84{f(`?$9^=!?>^kMY(#-nlzif23^KiQc8sD zuU4Zt<}KYsEK{su)pGDJ`CepP8>oSy$07~Oq74W38F1<^6~=d5P(V8XpgB0crY9OW zDI9lXtTvxR7E?_nH^g^v3bis>21+wxr_Em7*qrwzFEk)kV~Q`bf43OW?MZkWke0!K z$<2>!1{)5QqthgYDdvP4holZKJ%%U}l5w;);>?m(}7P6%~jV zy&Nj-0{^LR$UHF3LGJ7CT`cy_cU&8scS)+UCru9L0BxdxI+z_6&Rihg7ukjtD-r9l zILL`et&7-u%@?TBY1Fobc+Qzc0%svtoLM;n%weo^#ks+p7H^2Ag#<` z@}q@*#gv|mL&*|IM>n|V!G0}};^~+nSc>XTOsVN*)>6&bELi#eeDD)LbOzZu4TRp1 z2>tUjoJgp^G{8l?#qccC^=d3HBwVVUe~f;0`7i>5j(2OkEh(-k@JqYNY=((BZ)8O9 z%Y3}x{T3-+Az;XFVK1qGQiaD}MIE`aCf`A$xM z7)zL)hBy=ujc(M})t*WEc%Cp7Xa-tJ0ET>#PxBWt@4o!S{77D?)gJD=iSnf&1|gnR zOWfS$Ln9gOm@3RNb;Mi`e0NOq730mScW-7{Fh~;b1i$D@!|R@mnkshfN{{wi*p@KF z%CwC_(*G7=h-TpuR+S^;;`Y11Aw3?Kt?8 zh!fEf&f6zG!&N`Hu?gAP^YZ<$%4|M{(8P!GZeF5js589QEJ}Q2DF$;C5o!eeJD?9U z`Ppr(W*;H%o89;K3U-WKh9V`O;%8ov%!!lRVzH1Rl*Ek-TNoH*)wQ2VZ z@&OTx^qqt#H+C#m>@k|cX8-8|EgZRm&RE&aPQXMpH^ndY{7m`3sUr_PuL|k`U~;8@ z{1QENRc=*R+}&VKOM9Q@MtG2)2n+rbi~Q_@i{l5D^1tkJiz?O2Uwyn$3tr-jHAxcU z21bJ?Pjh1F4Ea(9^?$tdAvP7;*uy;{Et%uGm){X;bBY>&@t0h0bj z!X2QQRx@13ZH&&K8kf4_hGM0-07-bRh_##5PsU?+!LUH0v8`<$N}5& zb_3)!xm7q0^yQ$bs;~nbO0xjPpw)&-X==!5ohoakzo9<%|Db~c^ZhUTqyPjS+emXf z3%E(JUc zs1U**bU*m9!}xL1{3oZ3RBlD(;m*13bB`FqGV#}L)Sf(>>T#4$rT%Q34r^Z`|DBPp zG1a&*);1Fh7Nl9SiV}`Xa?pTCA9INY_9syelG)5wX=uWAZcpwlAmiQA03WYv3YgQv z{V~(|+$tb>5;5oyS!V>n3^5}XGGnF;hIk+Fhu$Z@6RTz~a>90w1@#TM5t`9A^Br6> zR<9UrycAkf-Zxv&ok>ewpC}7A0s*KQ*o2B-yw@@n>oo(uOvMGMntTx)80T@nA}e3$ z|9bjy;<#GGM`5g3EPFKp*akd5Ijr^z;i@wJgyM!fisV>4pyFM>@hDf!jNqr}G;<3IBN`Y?u&cO#6IgJ!H;w-)TSD~dwzXDNW zY23#*=>IINwlfa6Q#s$l%<)-kdIlVyYI$~_108*LJj?m$*oIUuLl*|@?FNHL9#?)5 z4`o>Bz>Ni}&t1{S3H+1d|LU?8`x6hdAT znm9FWS}y8I6@rIr2%;##?4b8{$c&%g(ph9Vpk;G*o^qMjw}eWpM=+axO3{Bmh5NZP zAdrftnH^=;KNxh5_|^|hXu4ec@1Uv4k7Q78NXU3!*#hG|^wsYRPPBq5KT+{S2w~vk z6lc-3x_;CHwHWhmxR`i`D`iP#Zi}<6EZSP&=sR{wbH#;nrou~LeYT@Ds zL}rV9IyK=|tXu0uchRNkm_hr_DJ(oUx!)!n90`4~4%d2#%IhxxIfJ!Ht7s*AK9=(+ z^>YSnT^u+BDY>7OeVu!gKYGY1;w1@WhSTkp|*9(IJ5S{A2puc;vmYm0*!Xn?^* z<9?!=OJvuS=bYnfNpm~RBvSM9wk1*>*NxxzP$C8S>M9f|VBQ&Jq_&&wUcP}7LPSK| zgubUa=fjKMzrGLqbQdB+m@{AN3dJ#5JebI!Edr}WZb*~j*C+dYLBpJX|D9#N%4>A$ zpSM8R%H!w186phe(yLrDAb*j(I2K%OibowbfT1w+*O1xFSTh!jK!AxN=iXvXfDRqm zkNPy^3v`Dh7raxYlg{FVH70M2AN#}4^n2{1iF198TBnQ3;SB4+4c4F6Q%EJeNQFCB zp*`DTUq_q^u)oq6R(^6Tb3Tf>`@})G)kqZT1&0GNjULmO36br8#Da9rJCXvKYNS`D z=|_s`zClT46~Em*lXq|^RBkQe5n~}N6HnMRc8`e@>JZc-S|0-n38|n85Mw>yfrkOr zl~FM^LwGWPFT)fZ5WIbQim1ORN`vvaJQ(M0{C#{SBnvY2elUFmX@P}KL6DGG=3M2) zAQG%UG^nPnD!ej6^&)(d@h=s|>)^~_pqC>jPt*hHUbYc^nB;X)M$$eky^%Ur{$yGz z1nyKFIJ=l()^m3~o}nn5ViQAP?-tnvdjEJ>;~CUOmJLgd(FLg(^f^E_2$+FP>I)?M#tjo2=fAtBD$7~Y+88+gba%gFQ*l6prr(=9-yiog4v_-7L zh--Rzc^RCTfKHeo!$G4Hl~?AsvyGh$>DLMCZDJoYL{Y(?zjhbcy8qR-1@F@PcBat_ z5;DHhgx&t;r+oq2GbIz>#}FN6FE$w4A>jKH`ul5b;P&+$ef6)1 ztjQz93~`A_IA$rqv>t4%*xhqya8CvX@^lD!VW2$1B!h9h^H)&y&)sC^pyAv&#@VVM z{rR*7d0oEfYvZc-{X1EJC*Q*W!q?Zk;|nnjsnEv?}ap4t~tX9uaRrC*4AoP9=v6QNtF?K zc5o0;&*U{s`Ya1nDC1sGy6T*QvOcmIbz_j|Rh9c*wt)mKgkMD?{gA5eMI0#Mrd~-; zNqETg+xf+-xjlmEq&_Hb$_1`jF~*Q?gi_B8v9&4)dVhca?ep$;5dd-L$#+T_R}5y5 zTr_IvKFGj4ib(2!9Lt=1Y7!4Dn%eHba4?h{ykp-!Yvu(p+mZHG@b=GMU93n7&Vb(> z;-kXHb8v|yMh{SR?MHh&@wtS<`RH!RzH!<6?eh0C2Sg6galp4>JhsI z76cuQa5PEx@`Hwo%B^%GX=VG+*A?PGz-KVfHv%5`I~>pkt&yM10X~Y)4!Isnd87xr%47*$pPmfpxT4xth2J zA?EUlZ>z+=8x5e*A~65@BhBR=SE*2F#;9S3TX5V(`e((udZ`f6mLYBR1?X>ZWAy!K z^fLebq4e=~h}~@vJA8j(!<>maLuZ5mlLLeI+#KR%cuDw?>B&TfAZF|*gocitzgCeM z4LW5Fc08nbL4iW|afoJQX#4xeZzoq|ode?4p}%Lhqt7!eH@SInpT^W<3%XrfYoj!;aqZfL3`-$U0!iwmV%;lbINPQ@4#u)^!0T$Bx>{`ggS&uxUWHeyfpumH2 z!RzP$!#GZ+fDVzBT0ZmEb$|&+k*~2d&3?z!UTpzkq9H}&WI3;;Py%2134*@(Q?EFj z5o*+Pt-DVJc2T>PonE9VL|o+sXJqu$Uj$t~h=NFoV>AUgNrZqE9LJOYM1u;rY-()X1jgUE?}|!%1$+gg8TDm`fJzoN1@-U z-{WXM>d8|fRZ+OJX`%nzUL3T*uRAQpACGQG2|M_x^vBcb4?ZFwBq2>$MBg9dLf`LP zt*S4FiO)rDUXtc5JSsVVM}Ja9d;d(@t-DElG7N(rpJOtL+WT5uCafFwg5UrrGQ$TTeZDxRMUwf zX_cb(Kfu(fW}wN>P(XquVJa+t5Rm-yjZsWmsG7WF+hG9c!J5*|*%Z(p0>bJvP+jSz z>Q%mV`2G(6{q;NmY>~d+E|^;F@e}=1vIa4_Fh?iqbeu&HX~t5C-+#kEY)WAY(-Ow8 z@=1Pyi7x#7+t}Ce^Mf#GrUAKi4$x`?BYQhVY6GMSiVYjf-u9ig4M)pm!{8xc4UMqM zJDO@{jUtW$?-E@0xD(1?)fU|wqAygLz21jU;6%Uxbn=Tax^;3h07C(HYZIMS9P@ds zj+ML2c8IOnMzbs}`v)B*&Q>tc!PS5$i+VY@69drfDNHRXW+Zv@7 zFGL)XE)N9y`yU<_BBQW<%@+n1FXAQW4V>lVg2`#eGRbGbnf;6q%y-9Cv{{J)q17@t zHANN$zI8(o(LNd2!=+Zljdqb`YU=pd>RnCs4~)DyZPo;KDD%PlOcN4p(F-U?M@L{W zD+N>;&=D|ty`H>V?*l==Z08t76x9D!Lbrbp8-%{asj48xmP4B<^%?bybR6 zbW3vodsqPx#vdtNR9>c_RyKW}T?*!XZZ3U#Xln_TxkeA&&K8DZFD_weQm-xDalY>QBoc^K>KC3aL z&=x5l{@!lW>ihS3g9Ze1lgf7TXRiEsVj{e)?-H+JAiA^v@_L4f-Cu7YaHDR!6h*GV z0mMd1{AP%w&UNU*+wB)P8IasYomH@EDdwwtcPHrIUuy~kfAuaq+pj*la#5n@@fr{1 z%l{Z2_KP@>)~ah$FDWogN(Cr&Fj?_(6({9z6>$BOtjKdV3e|tnh8qBW>$@dHo11KB zAGoGYorgZqmT*-Cr-2ms!5(U>ud5wITh_4NPD&P_l6Yrl$D}r#L$bO-@1j5;5kYrd z+BvZpJM++Gt;$U>eK&o#BG9)Zr>+rK?|cF+PKYkzaz1X^*UarR6%7Yu2nf!coQgS7 zo*ca#un$C1P;Fxf28+L4zvDIcjn6^a_tFE2=XWyF#&k@(TH z2%H;MH3v|wAZ{)knqRYyu8mCz_FrtD+T+Xfxb+e6e)YubDON%&tg26&6!*suJhvVC zFrY~f;l!Yx{j>%p98cRwg}9Y=@K{NE~52j<|Qt}DydaPDV1 zGE6Kvb11;xrdUx5zU5zbW|%30yg)-l`2VWhVM+5)iot>Um}DEE@=ylFj0u%9=*htZ zqlcBRI`{5mIo?W}Ot&_WuSWL^g=81IgGU5&9Ko47Kk0u+UCbfApEsI~?WP>`U+MO4 zasjWqkA=UOe-gO!(VABceeW(m|11Ll#x>>w#4#cuh9VV`=-Tv&^cuo&@)vV zy&>7`o%+@^O6RPl7jK7GD{&ek-0C7I4pLuYv=OvZysY66AXi>R1&H+YAbl?ZN@#qj2A%_ zHyr3VW!wdsQU18V$=2g{AQG;LD#CZJx$MSz3z26NdGUJbmCsIB^H+hfsHfwM;D8?x zz_Q@1;kce*%ZeO9-Q9&oNgg$$RT<_FK-|GXiwmgXVcogT=1pfyTB?3zQiV(+3SL7p zzq^r#3SMYu0Gdm6=VJ?#{?y%hWIp)}RES8Ac^83*oGC!pL*=i%r4gu*f7T-QqDO6A zJ@eqZeDbBPX!|3h{8ojN>5hGUj~?vS%}<1B1oD0Jd0=Yms78D;`WRd|5yg-kFrS_Ti?Os>#?!85!-sE0 zYF#Fh)3~X_-aSNYXi`daCS{{^y++eR!KSk^X9I zP*XHH{L<)~NikjyCQ)fHxQ?gTml*Cq^{D}DB7zwXFYe(d4pC)oq$LiP&C(|-1SHvL zR>Z8eXSdSl!Burt@q2^~o`P8mrpR_x$CN;5rni2`7|oBwVG_%)-C_WSXI}&ffT2j8 z4}*LO0_zt@nozo`m#^18!)97%c zEZyI)Y_Ru3e;{QrmK1}yxdGs#LHd$F7hk_AgO#9*+V{&8>l8*%6}}W&xp^#1?(-2{s*fYbsn8fsGQ3W*h6i$7^)v~TtyRkXAjrn7IicEl3Q<9lA0 z*su(z4nr)dnYA{@oUT<#{Syuy;KaC|b2crk;Fy>^tX$PgWlvx)M@6e;b3n?5J!Tr^ z-mqQvJV;Z>(Je~)N#6K9APh4V=3LXrZ3rCQMm!%L928iG+OG-Q9z!HmW&BgEd|Btt zWC4oK{Fy&1;AblGe!OrL{Q1g~;zcdK;Jtk6p@;F0#ysjW2F+0d=-$Hgp19sVhGuAs zC@#Z6qEKL=NS1695-f2Dq|PkxH&>sL+vJ4{3h)_;OufIcxAWwq2RNjB&=0(@diF7> zkt3_pCooUc$WL~i4!=%d`-oR*lDxI>5^0VZ!(0Ejx9q4JCmPtfeSdbGV2+lx)NU(gyiS3%Qp+Qzts}i~tr8KT@R}{nw ztp)+{>PH;dfxRBrc%mCrR+;kp)gq(&O5VO{^o@0n15avg0@b`CLEhUO}# zkjO%4oR$5hTR<)3+4!nuv>mPGBB5qL7{N5Bw@Hn^(49SwYLyt>ye~OUW~ZPRpX^>6 zHMmY>m#FcA*Ug@e5XPK%f1-jjisOWLiaQTv-cHr5LP~cwsO<*0lL#RqynWRk5kraox|w>gS$w*&@HEj;GEMA7zhCE^KtUBr#|r&DWdlW1E}!vqKTa^QuX@4vV->k?`|+_*F+bFcRDb(og+f zgrMeFRvs;Ww3)xu3zS89m$XA55yKyGtB@?N^uvyp1g8wV(tjsBsbTp#G9*Nn$n_5{ zR!fcW^mAI@?(`GT(3nf-!8;qu6oj5cp=Scztz)zb$|1jlH~=njSSJvA^#U%G$B+yY zbBqICQ}o)HLZ}H33G{K2aw@~+i^@61^3%jZQ<6HjN7{@*c0{I+tgA%;e}u_9cZ|_y z^QMg9P;;-~8`|iFb0IE;(9ZzmW|y6{HLHyMQ;pAAI@;Aij%;3NtmVaaOq^dhFJE8d zNw7kRmUft=N%k^4H~K*T!ZF)mU`#;)4p!_pg>4zmv+IWBPk@HvF}PVfgAX%mFNQ>0 zve73>#rAxaT=D(bLiZ#4_~ToG;m#6ciIP9lN0UIQ%7hz(AZuA(1pNlsL$9|5`z#J1 zkE(l$fef$QNF^!+QmuIlV{k$5eoeTZ`jPsJo6ko?c;_{c)R)|fxu^ayY|5+-QDzaQ zn-QUo-|P}cbWNFELQmj)E|jI3(dNUZ+yg;Vkz%m(r1jXcHrU{#IJrza>Ewi|zP{FV zMSStk&s^HJd2|`>6C0zhwhj#Q4UaXaExA2AO1p_NXZVrdw3?$^u7^i~iL8ASZ+{64%&SrlL4qsWZc zde{pnJQCInbm?727B^KB*KsCXvRYSnGHR>8|LbS);#!@u;oA>XFL)*9jtC^+43>Y{ z@>!RG60|Inzc@bDf6LbryrrE~XfaV@BtVM^O^81@Xi?JSNROSQi1`g3D|=^HF7}l> z=#Gf$m%p3 zI7JG^Qy7_i2j3C>-EdNTL)Qz!%xhN0xdrBAgEbKo@~kfj6by2Lk^0b^2yswX!y2^E zL&C6Uk4G((&>4Bi_gqrX5{h57jaQvLhWr)4@1_)N+a@HAq2EYLOIr`#g`m00{AQNe zZi`jF=qxvtXY^VX04$20pFJ~CDeE0kMGez7t13IO=k!IRe@KL(m+2!|!q;K2>-;TV z9$F4%2yLA=9}hws5~e|&c7c?IR0jHq*M;V5jt>}#x8ukrLlh%JBx$k3XxSmt5cj-^v;o+P%SwbXH*z8$CxxqW2*F2iM8QyQ8by)tKx8=L|J^U(^k#0$c^Mae; z7h}xKlGbdEmy)+GZ3a6%OOUG7*Ew)%oL`f7y5aJsh2&WAA<%*mw*UFjgUd}3E+5lG z#1bi}l;?geLDQQ_cA6inIMM_CMs|-8z4#C?bb_vk3lPhx3H*)AdqR-$dF4N7PtPe? zeIxcX&)9RtURlx`5hvvQ=K3#F@RF*>Iepj&aVM}ZI#~ksj0@Dr2(7|pxX$w9{oa7W zA(~-v*VZye9yX>dvj8kKI)*>|l7fpB0M}j_jb8sfLQ=rX`dTpljiJyl>!FAgAo10c zHUZO`L;=-etJ|szoyp3ki+%W}UH8}&##ze#`w9eMa9m+|{yndO6hTPAJ@harCM*>Y zTT@p7n>l((%GN8^Da8~d`tv+4kBclQ&qK?E>H4?Z32G5Wnm1^>RL1Kk^J(OQ>aKGJycZXRZiWM!edv)y8zs zyGNt5Jl||y;o#VKem$m^*l_<-(3oK<(zU^ja6Sn1qgQg#j7!VgQ-}To1B z-;K0C=Hp6n4vn9SJO z6x%6=0h8~bPx7}Ehb(>`$~5{B<)`GF=sNjy37RWEB4R>z$;Cw)F7A^jw>yn@CRojI z09EjrF-q;MZ-m?+YAiI`NeiqCsh8Ine9ypVcXz=6!CmJPV=y)b5|pZ7vOdwsM5_hJ zAIO>m8K>XPq@>hkMNviRA*x};6O)hWvPO8vl5Ld{H%dczhstn-n~|Ny>&Bp{k_OeF z(B5&q4sI(1E*w#C9Je(u#9lgV#U|BXMv(Qd)s%ISs~Dhk>SYU6%XeCQe>teah*vzZ#YN{;FPS%IqZ^1 z-0l1T|Pj#D>DsT$PxWgSZx`AM|4DVG#f#L_n~(Nc=A`ASGEZAHz{I|=eo&Ui`;cL z&{-1l{`9D89%crMRwImD>_yE?P!;>gxDA&@2Iwzr%0k$YRlmsW>IAJqu9;*HJelfS zSRJxUXJ*2+*;pqfwu1`Y%sW(>kw!F-Nnlsh>WbeleIx(DSdrifQn@z&dMzF*SP?N3 zM@p?er^@sS+_!rX83iq{^`T_yFnn4DHDC;Ba5jIsg>DB#emWqReR=Byh6?lLnYhy% zx-0x^*3gWwUeE8(14$XDS&2V#$;0Cl8u#!bSajPo@zyNqskjvpmrjmtwigKj(hzu9$AaxzEM?yN$kVtX{p2oI_bHu;;TI$5(vMKQY zsI+qu#kwI#g5kve_0XG4%ni=K5@RyKKn=oay(--G)@Y8|Af%#TIDwu$DdKyK9qw@r z_II2mV%d?{vP1M)l6*#OY;3|CnqlV4z@jfy{rGaj+ithH`TUGeOl-(~K1cI5Z2RVY@p-P*=pYYEa?bMQSvG2*!-WTIgu~EfX=bO9Lc_RHUaM{J z*cI+m2w}jvYQanZ`qsEFG+s2IrL6(#SBtj6`sQ@Wu@@kDJ&+?vw4E`#6HZW_q#_(a zZ}Rk05bA52G+Uc*&T=TIwdwy3{daqCM&7&i5Z~EXP~NW!>f|OA!nOmDx7mR(>&wSy zGyp(2VTPGQrx==6bKs8*OWs%C5)wsFJ=Z>2x_Vy5zGZva(+=Dmr(D|XRezk{K};cX z3C!#D7s91QQ6>R<9{UD^utHsw(xY;)#DtGS{fw$$UV`}8YzU~ea^cS(vwH%KVK!e~ ztBLxDp5FNLTpmCN`#$FwkkK%z)V)zz7c1_56&fH?1#I(xVAacMA+X}O;<1orcHEwE zh-oeuUNHf-VunjnHjp}E%|@hQ5FIpiV*A3*6}s2KgHzuS!W?11u{SbOR({JkulYE9xuhEBKN-=Wb$R8OL6kw5VkBUWmga>bH8yzI zSwDx4e&AvLecdj&gxZdj{L4|kC@-}(pLh*TrNku7Haw{<#GM_PA|{};cJ^5+v44GW zJ^SExl4LRB{Lc{Qwx8ye(Ho_8^xsL={gnPIneFcVJ;EUGNeBof6yyo5fc}!HEGIYY zO8=-XeU%*#vw22cZOJg$)l7=hi(-Nq54UF~oWMg|EEBE|m<&NQrka#>{3NE58iuao zI}%7KX29ph;r%^$9}lmtilHlPogQDECH+mi06lJEofGt{D-GLi)UY_cS*Z(ykeIbX z<-3Um;yVY{gs@2fzvX9!;`cPdv9wrDZG4IrIm9)`Fm+PQ`gv{1`uc_yR10Wwh$e%F zL2SD{!8S7p&NNGEb9*G6`c@k~PMjo{m+x*5=^$2VDdif<@k`prgtE;L8yISSQAtyL zs!-yCS#5k6)KJKtb&X>%zjuZ#p0d*iTu(PKzpd`wb=D{h(8o?x&f*yJB4~DqLPLnB zH6_A;E{L1*sZn-3D8veGAOi9zfDy5xzJKBJrcmbPfPTyO4IVNf&w({q zVppvU>{EkmYu35PebCuqe$IH{9@v9`vJb}u8%bUvrPg@_)%v>n;8i%;R60oR_m5x;FGND0fb9bwMuk6mi2&9?K4S#5 zhnk?oEv3wI8;Z8Iu4+@L5TPqzJ$L+fW3s0ro2;5z>UJ>whI0-bQp@bx;{+3s9G0$) zs|P@=%0CbyxVY`@_JXNY>kG6H=_ze!)_gRD`>{>5vYd6=8rx5&K=A#b@ch6ZIiR-z2YugWgl-M(9 zTBfz(mhjC4=Afh?(-N+wgwUziBxyvf&Uph=fyJ}eDdVWWwfV9Puo^z}Ub;F7(gXO( z#JM#O{tkms)vF$@&VIg$$Ov(d+8BhYiJX4rXvV}9+2FYuL>hF6Ew?MnnqHDBz&e1# zT-(fHPEMq~T$E)OWUW)wa0g{KUGP#rA__hcFHJ5ZsNH_~Wx=1{z$te*@~UB{cD9f{ zAHmS%0`?c3S2-y4t6qU9A1xuosYjOOh4wd?V#@(Wo|>Ig_ZcfC_4qvRFOfw!6y%fd1~s2zbg zHY-gD@#_CimFO5fB=nop&F0^SRd9?V<`G z6`O`->k6Jp(KBIcasi?&v@|7%C`D+rMx(2H&oV5dav|kvb*{eww!PE%wI*qJy|$BJ z?KZ7f!55jwbh{N5uZ40y)P?d*DTTIaoj{!M{WztJli%pQAAf!Y zNm1L z9|M%}2WOwZ${Z>;IOo%B-Hr@9kQ;}6oqx9BUduyy$QVtvJC-*0WIqTlK-oG|WbQBP z)?`3v4dOC5nN$=W-%A!OW=ZZ~ZQN87m#xfN$&`(2=~^sx6vvca@i?|J zUOs1mwSBnfFppCDz(R9mJ7t*uh{NF^g-)|z+j9lo85JhQf?)t+j8lrOX+tKlM1Wc* zkO^9T+eqDtm(eVWd!tKHFI=C*V=1LUECN8+HbQNb5}Lsl zk{*|b`)5#q$ezGh7VJ!lL^)xM8zXV4KK4C8J4dse8s^kWi-mzn>wr8;Ipq`-UVu#n5&vC9 z#WnCu*o0wkoon#;{&s!4R$Q%IMo0-lPm}|rmrE2O8RplLF&4+;(OF122;ROAsTE*Z zi_Zkko@4rYzFI`nf#lcV*}2e)m&e$Ik6Kj!@(hN@b1m+Qigl3b?C-G2c?QXt9=$Qa zmB}+TA>aL-mLox3ENEPsO1z@t7Py4~LPTiW7L6AG5^H7}Fut^{v2^L;79{OjYjHRp zadhri%#!u0X^!+rmPaik~sY;CMX1^k(#Um5{sC3ZjCRN8&`JR)N9saK!O=lwhckqq@rC zr%-BZcm-$OlAxuvD*Q_;f^o40w=eiA4gXP z-73n{j)peb7f9=IJRHPwLucgVuc%lN16t_kZbEZO5!jrbpD}KPT(GjQ$0P1fjmv_i zn+B%uVT=ja3pIlVp??xV9M_`X19D|XbT0~DpznJek4G4LMAvF`-42Jt7Zh8(?RNP7 zKA>w_80+Z)h3n15hdv@?D#Y3)u(LFESSG;#t+jUXaMPBO$-X4}B*WEHIje;WkRAu@0TZNB`pNZ5$A?t7coYLZa?Xn3La`(Sw4gyHmwb<=;;h49# zcXYd5h(&q6n27U`W&liu54EuH<13=NVEstkuR`$_kUIHD2C(GsDua*UXU&g_>tU>9 z!Ln?JPT+S0DauoE*J)g_Dy(-v+T_#{3uLUky#(hn{EJ+L{Pmp`ge}mv9hyec1T!1# z_Z`|+KU$->v&J(V&T>8_g{7#vUb01mGz88X)2ALB;%B^dbyor8X z6N1R>3AUcS7rFM_^S^#A*c&XTvhbOVnaO@HElu+1l&i1cwb{wSU;p%>joN!~zg}f~Filvk{FBJ7loDjywR8jnXZb{xsyV&y@jW!Op@IhTAVE7DO7 z39{>Zsaqfcr%Rvd+;Qs#X04C0eoYI4;hPz=cwp?12T)#$ptLUNZ1&pf7|${Oo;O&2 z%5r#KcCGd<>4J5|2SX~);e2o8&GG=lNoM_O~6)zy(#yH>;c$|ks;gb5JQ1w5l*f7Q!-$i=nEC$3=acdcU zZ*c6TW`PjW*Y!EQ20p9N)Y#;7=(*U5N8y!c8IfpHd()3FJNNGkQKE&-5EEJWmvM0` zx0}X8jdDfBb6{MVd3pwWRRaK$EQVj--%v_px7(p@8%RM!g{EzwwZ=~h!<3pUQF#YQ zG61F23Rw!*6o_w+AN1HOo*L}^TIUxR6@Pdh+FA*S>yN!;?72ZVUj?b~ankw`o=e5g z2EiJGzK_KSAJdB&L66J1TeHzrRIGu_BF+Fi7fNG#{Qmt3W=7LAPFZ1~T}=|nzJ=G; z&=e?RK?tW6ey@regkG^2k69d^sLAK>?O9B3bi=S#TrRA&@V@AiS57bTv*<#=`EfpH zv_0?}@@9Sn!N&Um$LGJ@2a#u<*{-8W%-6r}hG(pWu_kCfqYctKOgKj0jZL}-#>x+$c?$=Z>ln{vrt0GeluE{6r6Rbu8Cwh>c6!mk1oD zD=CSeOWm-nwUTuy5f)N!G9I{Lk@I5atRK^BHm(H%Nyxsj`1<<9;c$Sj#BMC(csOFe z+hf1qc*b3hnl@O%z=`8?&vyd9*1$I=v-kHRek5F)@D18aN?2n}A8j7=

FIN

", unsafe_allow_html=True) - - single_frame_time_selector(timing_details[idx].uuid, 'motion', shift_frames=shift_frames_setting) - single_frame_time_duration_setter(timing_details[idx].uuid, 'motion', shift_frames=shift_frames_setting) - update_animation_style_element(timing_details[idx].uuid) - - if timing_details[idx].aux_frame_index != len(timing_details) - 1: - with timing2: - current_individual_clip_element(timing_details[idx].uuid) - with timing3: - current_preview_video_element(timing_details[idx].uuid) - - st.markdown("***") \ No newline at end of file diff --git a/ui_components/components/mood_board_page.py b/ui_components/widgets/style_explorer_element.py similarity index 78% rename from ui_components/components/mood_board_page.py rename to ui_components/widgets/style_explorer_element.py index 19905967..5ec4226b 100644 --- a/ui_components/components/mood_board_page.py +++ b/ui_components/widgets/style_explorer_element.py @@ -3,7 +3,7 @@ from shared.constants import AIModelType -def mood_board_page(project_uuid): +def style_explorer_element(project_uuid): def get_varied_text(styling_instructions="", character_instructions="", action_instructions="", scene_instructions=""): text_list = [] @@ -34,50 +34,46 @@ def get_varied_text(styling_instructions="", character_instructions="", action_i return ", ".join(text_list) - data_repo = DataRepo() - st.subheader("Mood Board") + st.markdown("***") + data_repo = DataRepo() a1, a2, a3 = st.columns([0.5, 1, 0.5]) with a2: prompt = st.text_area("What's your prompt?", key="prompt") + def create_variate_option(column, key): + label = key.replace('_', ' ').capitalize() + variate_option = column.checkbox(f"Variate {label.lower()}", key=f"{key}_checkbox") + if variate_option: + instructions = column.text_area(f"How would you like to variate {label.lower()}?", key=f"{key}_textarea") + else: + instructions = "" + return instructions + b1, b2, b3, b4 = st.columns([1, 1, 1, 1]) + with b1: - variate_styling = st.checkbox("Variate styling", key="variate_styling") - if variate_styling: - styling_instructions = st.text_area("How would you like to variate styling?", key="variate_styling_textarea") - else: - styling_instructions = "" + styling_instructions = create_variate_option(b1, "styling") with b2: - variate_character = st.checkbox("Variate character", key="variate_character") - if variate_character: - character_instructions = st.text_area("How would you like to variate character?", key="variate_character_textarea") - else: - character_instructions = "" + character_instructions = create_variate_option(b2, "character") with b3: - variate_action = st.checkbox("Variate action", key="variate_action") - if variate_action: - action_instructions = st.text_area("How would you like to variate action?", key="variate_action_textarea") - else: - action_instructions = "" + action_instructions = create_variate_option(b3, "action") with b4: - variate_scene = st.checkbox("Variate scene", key="variate_scene") - if variate_scene: - scene_instructions = st.text_area("How would you like to variate the scene?", key="variate_scene_textarea") - else: - scene_instructions = "" + scene_instructions = create_variate_option(b4, "scene") model_list = data_repo.get_all_ai_model_list(model_type_list=[AIModelType.TXT2IMG.value], custom_trained=False) model_name_list = list(set([m.name for m in model_list])) c1, c2, c3 = st.columns([0.25, 1, 0.25]) + with c2: models_to_use = st.multiselect("Which models would you like to use?", model_name_list, key="models_to_use", default=model_name_list) d1, d2, d3 = st.columns([0.5, 1, 0.5]) + with d2: number_to_generate = st.slider("How many images would you like to generate?", min_value=1, max_value=100, value=10, step=1, key="number_to_generate") From 7512ca6c696d341336bdca51f523578a527cbcc0 Mon Sep 17 00:00:00 2001 From: peter942 Date: Sat, 14 Oct 2023 17:14:07 +0200 Subject: [PATCH 077/164] Rearranging things --- ui_components/setup.py | 6 +-- .../widgets/style_explorer_element.py | 42 +++++++++---------- 2 files changed, 22 insertions(+), 26 deletions(-) diff --git a/ui_components/setup.py b/ui_components/setup.py index 3e191a46..2efcd96e 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -146,12 +146,12 @@ def setup_app_ui(): st.session_state['frame_styling_view_type'] = option_menu( None, view_types, - icons=['aspect-ratio', 'bookshelf', "hourglass", 'stopwatch'], + icons=['compass', 'bookshelf','aspect-ratio', "hourglass", 'stopwatch'], menu_icon="cast", orientation="horizontal", key="section-selecto1r", styles={"nav-link": {"font-size": "15px", "margin":"0px", "--hover-color": "#eee"}, - "nav-link-selected": {"background-color": "orange"}}, + "nav-link-selected": {"background-color": "green"}}, manual_select=st.session_state['frame_styling_view_type_index'] ) @@ -169,7 +169,7 @@ def setup_app_ui(): st.session_state["manual_select"] = None st.session_state['page'] = option_menu(None, pages, icons=['palette', 'camera-reels', "hourglass", 'stopwatch'], menu_icon="cast", orientation="horizontal", key="secti2on_selector", styles={ - "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "green"}}, manual_select=st.session_state["manual_select"]) + "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "orange"}}, manual_select=st.session_state["manual_select"]) # TODO: CORRECT-CODE diff --git a/ui_components/widgets/style_explorer_element.py b/ui_components/widgets/style_explorer_element.py index 5ec4226b..0ad11fd5 100644 --- a/ui_components/widgets/style_explorer_element.py +++ b/ui_components/widgets/style_explorer_element.py @@ -34,35 +34,28 @@ def get_varied_text(styling_instructions="", character_instructions="", action_i return ", ".join(text_list) - st.markdown("***") - data_repo = DataRepo() - a1, a2, a3 = st.columns([0.5, 1, 0.5]) - with a2: - prompt = st.text_area("What's your prompt?", key="prompt") - def create_variate_option(column, key): label = key.replace('_', ' ').capitalize() - variate_option = column.checkbox(f"Variate {label.lower()}", key=f"{key}_checkbox") + variate_option = column.checkbox(f"Vary {label.lower()}", key=f"{key}_checkbox") if variate_option: - instructions = column.text_area(f"How would you like to variate {label.lower()}?", key=f"{key}_textarea") + instructions = column.text_area(f"How would you like to vary the {label.lower()}?", key=f"{key}_textarea", help=f"It'll write a custom {label.lower()} prompt based on your instructions.") else: instructions = "" return instructions + + st.markdown("***") + data_repo = DataRepo() - b1, b2, b3, b4 = st.columns([1, 1, 1, 1]) - - with b1: - styling_instructions = create_variate_option(b1, "styling") - - with b2: - character_instructions = create_variate_option(b2, "character") - - with b3: - action_instructions = create_variate_option(b3, "action") + a1, a2, a3 = st.columns([0.5, 1, 0.5]) + prompt = a2.text_area("What's your base prompt?", key="prompt", help="This will be included at the beginning of each prompt") - with b4: - scene_instructions = create_variate_option(b4, "scene") + b1, b2, b3, b4,b5,b6 = st.columns([0.5, 1, 1, 1, 1, 0.5]) + + styling_instructions = create_variate_option(b2, "styling") + character_instructions = create_variate_option(b3, "character") + action_instructions = create_variate_option(b4, "action") + scene_instructions = create_variate_option(b5, "scene") model_list = data_repo.get_all_ai_model_list(model_type_list=[AIModelType.TXT2IMG.value], custom_trained=False) model_name_list = list(set([m.name for m in model_list])) @@ -70,14 +63,16 @@ def create_variate_option(column, key): c1, c2, c3 = st.columns([0.25, 1, 0.25]) with c2: - models_to_use = st.multiselect("Which models would you like to use?", model_name_list, key="models_to_use", default=model_name_list) + models_to_use = st.multiselect("Which models would you like to use?", model_name_list, key="models_to_use", default=model_name_list, help="It'll rotate through the models you select.") d1, d2, d3 = st.columns([0.5, 1, 0.5]) with d2: - number_to_generate = st.slider("How many images would you like to generate?", min_value=1, max_value=100, value=10, step=1, key="number_to_generate") + number_to_generate = st.slider("How many images would you like to generate?", min_value=1, max_value=100, value=10, step=1, key="number_to_generate", help="It'll generate 4 from each variation.") + + e1, e2, e3 = st.columns([0.5, 1, 0.5]) - if st.button("Generate images", key="generate_images", use_container_width=True, type="primary"): + if e2.button("Generate images", key="generate_images", use_container_width=True, type="primary"): st.info("Generating images...") counter = 0 varied_text = "" @@ -95,6 +90,7 @@ def create_variate_option(column, key): variants = timing.alternative_images_list st.markdown("***") + num_columns = st.slider('Number of columns', min_value=1, max_value=10, value=4) num_items_per_page = 30 From d5cff19937186196b8ff4201da063ce98237dec1 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 14 Oct 2023 20:56:59 +0530 Subject: [PATCH 078/164] asset display fix --- backend/db_repo.py | 9 ++++++ .../components/frame_styling_page.py | 2 +- ui_components/widgets/sidebar_logger.py | 31 ++++++++++++------- utils/data_repo/api_repo.py | 4 +++ utils/data_repo/data_repo.py | 5 +++ 5 files changed, 39 insertions(+), 12 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index a21e62f7..260f200b 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -226,6 +226,15 @@ def get_all_file_list(self, **kwargs): return InternalResponse(payload, 'file found', True) + def get_file_list_from_log_uuid_list(self, log_uuid_list): + inference_log_list = InferenceLog.objects.filter(uuid__in=log_uuid_list, is_disabled=False).all() + file_list = InternalFileObject.objects.filter(inference_log__uuid__in=[str(log.uuid) for log in inference_log_list], is_disabled=False).all() + payload = { + 'data': InternalFileDto(file_list, many=True).data + } + + return InternalResponse(payload, 'file list fetched successfully', True) + def create_or_update_file(self, file_uuid, type=InternalFileType.IMAGE.value, **kwargs): # DBRepo._count += 1 # cls_name = inspect.currentframe().f_code.co_name diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index c08662c6..d2a7bf23 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -255,4 +255,4 @@ def frame_styling_page(mainheader2, project_uuid: str): with st.expander("🔍 Inference Logging", expanded=True): - sidebar_logger(data_repo, project_uuid) + sidebar_logger(project_uuid) diff --git a/ui_components/widgets/sidebar_logger.py b/ui_components/widgets/sidebar_logger.py index 155a0500..e9786e6b 100644 --- a/ui_components/widgets/sidebar_logger.py +++ b/ui_components/widgets/sidebar_logger.py @@ -5,19 +5,22 @@ import json import math -def sidebar_logger(data_repo, project_uuid): +from utils.data_repo.data_repo import DataRepo + +def sidebar_logger(project_uuid): + data_repo = DataRepo() + a1, _, a3 = st.columns([1, 0.2, 1]) log_list = data_repo.get_all_inference_log_list(project_uuid) - refresh_disabled = not any(log.status in [InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value] for log in log_list) + log_list.reverse() + refresh_disabled = False # not any(log.status in [InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value] for log in log_list) if a1.button("Refresh log", disabled=refresh_disabled): st.rerun() - a3.button("Jump to full log view") + # a3.button("Jump to full log view") - # Add radio button for status selection status_option = st.radio("Statuses to display:", options=["All", "In Progress", "Succeeded", "Failed"], key="status_option", index=0, horizontal=True) - # Filter log_list based on selected status if status_option == "In Progress": log_list = [log for log in log_list if log.status in [InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value]] elif status_option == "Succeeded": @@ -29,20 +32,26 @@ def sidebar_logger(data_repo, project_uuid): items_per_page = b2.slider("Items per page", min_value=1, max_value=20, value=5, step=1) page_number = b1.number_input('Page number', min_value=1, max_value=math.ceil(len(log_list) / items_per_page), value=1, step=1) - log_list = log_list[::-1][(page_number - 1) * items_per_page : page_number * items_per_page] + display_list = log_list[(page_number - 1) * items_per_page : page_number * items_per_page] + file_list = data_repo.get_file_list_from_log_uuid_list([log.uuid for log in display_list]) + log_file_dict = {} + for file in file_list: + log_file_dict[str(file.inference_log.uuid)] = file st.markdown("---") - for idx, log in enumerate(log_list): - + for idx, log in enumerate(display_list): origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) if not log.status or not origin_data: continue output_url = None - output_data = json.loads(log.output_details) - if 'output' in output_data and output_data['output']: - output_url = output_data['output'][0] if isinstance(output_data['output'], list) else output_data['output'] + if log.uuid in log_file_dict: + output_url = log_file_dict[log.uuid].location + + # output_data = json.loads(log.output_details) + # if 'output' in output_data and output_data['output']: + # output_url = output_data['output'][0] if isinstance(output_data['output'], list) else output_data['output'] c1, c2, c3 = st.columns([1, 1 if output_url else 0.01, 1]) diff --git a/utils/data_repo/api_repo.py b/utils/data_repo/api_repo.py index db904769..57cd3700 100644 --- a/utils/data_repo/api_repo.py +++ b/utils/data_repo/api_repo.py @@ -176,6 +176,10 @@ def get_file_from_uuid(self, uuid): res = self.http_get(self.FILE_URL, params={'uuid': uuid}) return InternalResponse(res['payload'], 'success', res['status']) + def get_file_list_from_log_uuid_list(self, log_uuid_list): + res = self.http_post(self.FILE_LIST_URL, data={'log_uuid_list': log_uuid_list}) + return InternalResponse(res['payload'], 'success', res['status']) + def get_all_file_list(self, type: InternalFileType, tag = None, project_id = None): filter_data = {"type": type} if tag: diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 88b23669..20941cab 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -74,6 +74,11 @@ def get_file_from_uuid(self, uuid): file = self.db_repo.get_file_from_uuid(uuid).data['data'] return InternalFileObject(**file) if file else None + def get_file_list_from_log_uuid_list(self, log_uuid_list): + res = self.db_repo.get_file_list_from_log_uuid_list(log_uuid_list) + file_list = res.data['data'] if res.status else [] + return [InternalFileObject(**file) for file in file_list] + def get_all_file_list(self, file_type: InternalFileType, tag = None, project_id = None): filter_data = {"type": file_type} if tag: From a3035e2a21adab0c564f0b4730de03e4063a843d Mon Sep 17 00:00:00 2001 From: peter942 Date: Sat, 14 Oct 2023 18:37:11 +0200 Subject: [PATCH 079/164] Setting up dynamic prompting --- .../widgets/style_explorer_element.py | 77 ++++++++++--------- 1 file changed, 42 insertions(+), 35 deletions(-) diff --git a/ui_components/widgets/style_explorer_element.py b/ui_components/widgets/style_explorer_element.py index 0ad11fd5..cb0071bd 100644 --- a/ui_components/widgets/style_explorer_element.py +++ b/ui_components/widgets/style_explorer_element.py @@ -1,40 +1,46 @@ import streamlit as st from utils.data_repo.data_repo import DataRepo from shared.constants import AIModelType +import replicate def style_explorer_element(project_uuid): - def get_varied_text(styling_instructions="", character_instructions="", action_instructions="", scene_instructions=""): + def query_llama2(user_instructions, system_instructions): + prompt = system_instructions + "\n" + user_instructions + "|" + output = replicate.run( + "meta/llama-2-7b:527827021d8756c7ab79fde0abbfaac885c37a3ed5fe23c7465093f0878d55ef", + input={ + "debug": False, + "top_k": 250, + "top_p": 0.95, + "prompt": prompt, + "temperature": 0.73, + "max_new_tokens": 30, + "min_new_tokens": -1, + "stop_sequences": "\n" + } + ) + result = "" + for item in output: + result += item + return result + + def create_prompt(styling_instructions="", character_instructions="", action_instructions="", scene_instructions=""): text_list = [] - if styling_instructions: - system_instructions = "PLACEHOLDER_STYLING" - # result = query_model(styling_instructions, system_instructions) - result = "Styling instructions" - text_list.append(result) - - if character_instructions: - system_instructions = "PLACEHOLDER_CHARACTER" - # result = query_model(character_instructions, system_instructions) - result = "Character instructions" - text_list.append(result) - - if action_instructions: - system_instructions = "PLACEHOLDER_ACTION" - # result = query_model(action_instructions, system_instructions) - result = "Action instructions" - text_list.append(result) - - if scene_instructions: - system_instructions = "PLACEHOLDER_SCENE" - # result = query_model(scene_instructions, system_instructions) - result = "Scene instructions" - text_list.append(result) + def process_instructions(instructions, system_instructions_template): + if instructions: + result = query_llama2(instructions, system_instructions_template) + text_list.append(result) + + process_instructions(character_instructions, "Input|Character Descriptions:\nSickly old man|Francois Leger,old Russian man, beaten-down look, wearing suit\nPretty young woman|Jules van Cohen,beautiful young woman, floral dress,vibrant\nIrish boy|James McCarthy,10 year old Irish boy,red hair,pink shirt,wheezing in a small voice\nYoung thug|Hughie Banks,23 y/o English football hooligan with skinned head") + process_instructions(styling_instructions, "Input|Style Description:\nmoody and emotion|watercolour style, dark colours and pastel tones.\nchildren's adventure|simple children's book illustration style with light colours\ngritty and realistic|Sin City style,black and white,realistic,strong lines.\nhighly abstract|abstract art style, vibrant colours and thick linework.") + process_instructions(action_instructions, "Input|Action Description:\ngoing on an adventure|exploring old ruins,with a flashlight\nbusy day in the city|walking through downtown at rushour\nfamily time|making dinner with the family\nbeing creepy|hiding in bushes,looking in window\nworking hard|finishing homework,late at night") + process_instructions(scene_instructions, "Input|Scene Description:\nForest|Misty woods with towering trees and glowing plants.\nFuturistic city|Skyscrapers, flying cars, neon lights in a futuristic metropolis.\nMedieval|Castle courtyard with knights, cobblestones, and a fountain.\nBeach|Golden sands, rolling waves, and a vibrant sunset.\nApocalypse|Ruined buildings and desolation in a bleak wasteland.") return ", ".join(text_list) - def create_variate_option(column, key): label = key.replace('_', ' ').capitalize() variate_option = column.checkbox(f"Vary {label.lower()}", key=f"{key}_checkbox") @@ -50,22 +56,21 @@ def create_variate_option(column, key): a1, a2, a3 = st.columns([0.5, 1, 0.5]) prompt = a2.text_area("What's your base prompt?", key="prompt", help="This will be included at the beginning of each prompt") - b1, b2, b3, b4,b5,b6 = st.columns([0.5, 1, 1, 1, 1, 0.5]) - - styling_instructions = create_variate_option(b2, "styling") - character_instructions = create_variate_option(b3, "character") + b1, b2, b3, b4,b5,b6 = st.columns([0.5, 1, 1, 1, 1, 0.5]) + character_instructions = create_variate_option(b2, "character") + styling_instructions = create_variate_option(b3, "styling") action_instructions = create_variate_option(b4, "action") scene_instructions = create_variate_option(b5, "scene") - model_list = data_repo.get_all_ai_model_list(model_type_list=[AIModelType.TXT2IMG.value], custom_trained=False) - model_name_list = list(set([m.name for m in model_list])) + + model_name_list = list(set([m.name for m in data_repo.get_all_ai_model_list(model_type_list=[AIModelType.TXT2IMG.value], custom_trained=False)])) c1, c2, c3 = st.columns([0.25, 1, 0.25]) with c2: models_to_use = st.multiselect("Which models would you like to use?", model_name_list, key="models_to_use", default=model_name_list, help="It'll rotate through the models you select.") - d1, d2, d3 = st.columns([0.5, 1, 0.5]) + d1, d2, d3 = st.columns([0.75, 1, 0.75]) with d2: number_to_generate = st.slider("How many images would you like to generate?", min_value=1, max_value=100, value=10, step=1, key="number_to_generate", help="It'll generate 4 from each variation.") @@ -73,13 +78,15 @@ def create_variate_option(column, key): e1, e2, e3 = st.columns([0.5, 1, 0.5]) if e2.button("Generate images", key="generate_images", use_container_width=True, type="primary"): - st.info("Generating images...") + counter = 0 varied_text = "" - for _ in range(number_to_generate): + num_models = len(models_to_use) + num_images_per_model = number_to_generate // num_models + for _ in range(num_images_per_model): for model_name in models_to_use: if counter % 4 == 0 and (styling_instructions or character_instructions or action_instructions or scene_instructions): - varied_text = get_varied_text(styling_instructions, character_instructions, action_instructions, scene_instructions) + varied_text = create_prompt(styling_instructions, character_instructions, action_instructions, scene_instructions) prompt_with_variations = f"{prompt}, {varied_text}" if prompt else varied_text st.write(f"Prompt: '{prompt_with_variations}'") st.write(f"Model: {model_name}") From 6480388641233cc560b8bf96ffd1e08892254e46 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 14 Oct 2023 23:55:40 +0530 Subject: [PATCH 080/164] wip: project update lock --- backend/db_repo.py | 19 ++++++++++++++++--- backend/migrations/0011_lock_added.py | 23 +++++++++++++++++++++++ backend/models.py | 8 ++++++++ banodoco_runner.py | 5 ++++- ui_components/setup.py | 7 +++++-- ui_components/widgets/sidebar_logger.py | 4 ---- utils/common_utils.py | 23 ++++++++++++++++++++++- utils/data_repo/api_repo.py | 11 ++++++++++- utils/data_repo/data_repo.py | 11 ++++++++++- 9 files changed, 98 insertions(+), 13 deletions(-) create mode 100644 backend/migrations/0011_lock_added.py diff --git a/backend/db_repo.py b/backend/db_repo.py index 260f200b..867d3d90 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -19,15 +19,17 @@ from shared.constants import AUTOMATIC_FILE_HOSTING, LOCAL_DATABASE_NAME, SERVER, ServerType from shared.file_upload.s3 import upload_file, upload_file_from_obj -from backend.models import AIModel, AIModelParamMap, AppSetting, BackupTiming, InferenceLog, InternalFileObject, Project, Setting, Timing, User +from backend.models import AIModel, AIModelParamMap, AppSetting, BackupTiming, InferenceLog, InternalFileObject, Lock, Project, Setting, Timing, User from backend.serializers.dao import CreateAIModelDao, CreateAIModelParamMapDao, CreateAppSettingDao, CreateFileDao, CreateInferenceLogDao, CreateProjectDao, CreateSettingDao, CreateTimingDao, CreateUserDao, UpdateAIModelDao, UpdateAppSettingDao, UpdateSettingDao from shared.constants import InternalResponse from django.db.models import F +from django.db import transaction + logger = AppLogger() -# @measure_execution_time +@measure_execution_time class DBRepo: _instance = None _count = 0 @@ -1629,4 +1631,15 @@ def restore_backup(self, backup_uuid: str): # payment def generate_payment_link(self, amount): - return InternalResponse({'data': 'https://buy.stripe.com/test_8wMbJib8g3HK7vi5ko'}, 'success', True) # temp link \ No newline at end of file + return InternalResponse({'data': 'https://buy.stripe.com/test_8wMbJib8g3HK7vi5ko'}, 'success', True) # temp link + + # lock + def acquire_lock(self, key): + with transaction.atomic(): + _, created = Lock.objects.get_or_create(row_key=key) + return InternalResponse({'data': True if created else False}, 'success', True) + + def release_lock(self, key): + with transaction.atomic(): + Lock.objects.filter(row_key=key).delete() + return InternalResponse({'data': True}, 'success', True) \ No newline at end of file diff --git a/backend/migrations/0011_lock_added.py b/backend/migrations/0011_lock_added.py new file mode 100644 index 00000000..11d1abb9 --- /dev/null +++ b/backend/migrations/0011_lock_added.py @@ -0,0 +1,23 @@ +# Generated by Django 4.2.1 on 2023-10-14 12:35 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('backend', '0010_project_metadata_added'), + ] + + operations = [ + migrations.CreateModel( + name='Lock', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('row_key', models.CharField(max_length=255, unique=True)), + ], + options={ + 'db_table': 'lock', + }, + ), + ] diff --git a/backend/models.py b/backend/models.py index 6083c00e..2a1c8ac8 100644 --- a/backend/models.py +++ b/backend/models.py @@ -18,6 +18,14 @@ class Meta: abstract = True +class Lock(models.Model): + row_key = models.CharField(max_length=255, unique=True) + + class Meta: + app_label = 'backend' + db_table = 'lock' + + class User(BaseModel): name = models.CharField(max_length=255, default="") email = models.CharField(max_length=255) diff --git a/banodoco_runner.py b/banodoco_runner.py index 11e7fca8..91e7dba5 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -8,6 +8,7 @@ from shared.constants import InferenceParamType, InferenceStatus, ProjectMetaData from shared.logging.constants import LoggingType from shared.logging.logging import AppLogger +from utils.common_utils import acquire_lock, release_lock from utils.data_repo.data_repo import DataRepo from utils.ml_processor.replicate.constants import replicate_status_map @@ -120,9 +121,11 @@ def check_and_update_db(): from django.db import transaction for project_uuid, val in timing_update_list.items(): - with transaction.atomic(): + key = str(project_uuid) + if acquire_lock(key): val = list(set(val)) _ = Project.objects.filter(uuid=project_uuid).update(meta_data=json.dumps({ProjectMetaData.DATA_UPDATE.value: val})) + release_lock(key) if not len(log_list): # app_logger.log(LoggingType.DEBUG, f"No logs found") diff --git a/ui_components/setup.py b/ui_components/setup.py index 4ea73313..2059ad2b 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -16,7 +16,7 @@ from streamlit_option_menu import option_menu from ui_components.constants import CreativeProcessType from ui_components.models import InternalAppSettingObject -from utils.common_utils import create_working_assets, get_current_user, get_current_user_uuid, reset_project_state +from utils.common_utils import acquire_lock, create_working_assets, get_current_user, get_current_user_uuid, release_lock, reset_project_state from utils import st_memory from utils.data_repo.data_repo import DataRepo @@ -96,7 +96,10 @@ def setup_app_ui(): _ = data_repo.get_timing_from_uuid(timing_uuid, invalidate_cache=True) # removing the metadata after processing - data_repo.update_project(uuid=project_list[selected_index].uuid, meta_data=json.dumps({ProjectMetaData.DATA_UPDATE.value: []})) + key = str(project_list[selected_index].uuid) + if acquire_lock(key): + data_repo.update_project(uuid=project_list[selected_index].uuid, meta_data=json.dumps({ProjectMetaData.DATA_UPDATE.value: []})) + release_lock(key) if "current_frame_index" not in st.session_state: st.session_state['current_frame_index'] = 1 diff --git a/ui_components/widgets/sidebar_logger.py b/ui_components/widgets/sidebar_logger.py index e9786e6b..78f332e4 100644 --- a/ui_components/widgets/sidebar_logger.py +++ b/ui_components/widgets/sidebar_logger.py @@ -49,10 +49,6 @@ def sidebar_logger(project_uuid): if log.uuid in log_file_dict: output_url = log_file_dict[log.uuid].location - # output_data = json.loads(log.output_details) - # if 'output' in output_data and output_data['output']: - # output_url = output_data['output'][0] if isinstance(output_data['output'], list) else output_data['output'] - c1, c2, c3 = st.columns([1, 1 if output_url else 0.01, 1]) with c1: diff --git a/utils/common_utils.py b/utils/common_utils.py index c3850f37..3436d2ee 100644 --- a/utils/common_utils.py +++ b/utils/common_utils.py @@ -2,6 +2,7 @@ import os import csv import subprocess +import time import psutil import streamlit as st import json @@ -182,4 +183,24 @@ def is_process_active(custom_process_name): except subprocess.CalledProcessError: return False - return False \ No newline at end of file + return False + + +def acquire_lock(key): + data_repo = DataRepo() + retries = 0 + while retries < 6: + lock_status = data_repo.acquire_lock(key) + + if lock_status: + return lock_status + + retries += 1 + time.sleep(0.3) + + return False + +def release_lock(key): + data_repo = DataRepo() + data_repo.release_lock(key) + return True diff --git a/utils/data_repo/api_repo.py b/utils/data_repo/api_repo.py index 57cd3700..3252e8fd 100644 --- a/utils/data_repo/api_repo.py +++ b/utils/data_repo/api_repo.py @@ -177,7 +177,7 @@ def get_file_from_uuid(self, uuid): return InternalResponse(res['payload'], 'success', res['status']) def get_file_list_from_log_uuid_list(self, log_uuid_list): - res = self.http_post(self.FILE_LIST_URL, data={'log_uuid_list': log_uuid_list}) + res = self.http_post(self.FILE_UUID_LIST_URL, data={'log_uuid_list': log_uuid_list}) return InternalResponse(res['payload'], 'success', res['status']) def get_all_file_list(self, type: InternalFileType, tag = None, project_id = None): @@ -447,4 +447,13 @@ def restore_backup(self, uuid): # payment link def generate_payment_link(self, amount): res = self.http_get(self.STRIPE_PAYMENT_URL, params={'total_amount': amount}) + return InternalResponse(res['payload'], 'success', res['status']) + + # lock + def acquire_lock(self, key): + res = self.http_get(self.LOCK_URL, params={'key': key, 'action': 'acquire'}) + return InternalResponse(res['payload'], 'success', res['status']) + + def release_lock(self, key): + res = self.http_get(self.LOCK_URL, params={'key': key, 'action': 'release'}) return InternalResponse(res['payload'], 'success', res['status']) \ No newline at end of file diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 20941cab..069a2819 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -379,4 +379,13 @@ def update_usage_credits(self, credits_to_add): def generate_payment_link(self, amount): res = self.db_repo.generate_payment_link(amount) link = res.data['data'] if res.status else None - return link \ No newline at end of file + return link + + # lock + def acquire_lock(self, key): + res = self.db_repo.acquire_lock(key) + return res.data['data'] if res.status else None + + def release_lock(self, key): + res = self.db_repo.release_lock(key) + return res.status \ No newline at end of file From a342d0a9f3c9df85b98990afbc004c0ecb67c6fc Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sun, 15 Oct 2023 14:55:28 +0530 Subject: [PATCH 081/164] image update fixed --- banodoco_runner.py | 3 +-- .../components/frame_styling_page.py | 5 ---- ui_components/setup.py | 24 +++++++++---------- utils/cache/cache_methods.py | 1 + 4 files changed, 14 insertions(+), 19 deletions(-) diff --git a/banodoco_runner.py b/banodoco_runner.py index 91e7dba5..5158d3a7 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -118,11 +118,10 @@ def check_and_update_db(): # adding update_data in the project from backend.models import Project - from django.db import transaction for project_uuid, val in timing_update_list.items(): key = str(project_uuid) - if acquire_lock(key): + if acquire_lock(key): val = list(set(val)) _ = Project.objects.filter(uuid=project_uuid).update(meta_data=json.dumps({ProjectMetaData.DATA_UPDATE.value: val})) release_lock(key) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index d2a7bf23..440ebfa7 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -108,20 +108,15 @@ def frame_styling_page(mainheader2, project_uuid: str): variant_comparison_element(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) elif st.session_state['show_comparison'] == "All Other Variants": - - - variant_comparison_grid(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) elif st.session_state['show_comparison'] == "Source Frame": compare_to_source_frame(timing_details) elif st.session_state['show_comparison'] == "Previous & Next Frame": - compare_to_previous_and_next_frame(project_uuid,timing_details) elif st.session_state['show_comparison'] == "None": - display_image(timing_uuid=st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) st.markdown("***") diff --git a/ui_components/setup.py b/ui_components/setup.py index 2059ad2b..9ace0296 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -60,7 +60,6 @@ def setup_app_ui(): project_list = data_repo.get_all_project_list(user_id=get_current_user_uuid()) if st.session_state["section"] == "Open Project": - if "index_of_project_name" not in st.session_state: if app_settings.previous_project: st.session_state["project_uuid"] = app_settings.previous_project @@ -89,17 +88,19 @@ def setup_app_ui(): # checking for project metadata (like cache updates) # project_update_data is of the format {"data_update": [{"timing_uuid": timing_uuid}]} - project_update_data = json.loads(project_list[selected_index].meta_data).\ - get(ProjectMetaData.DATA_UPDATE.value, None) if project_list[selected_index].meta_data else None - if project_update_data: - for timing_uuid in project_update_data: - _ = data_repo.get_timing_from_uuid(timing_uuid, invalidate_cache=True) + key = st.session_state["project_uuid"] + if acquire_lock(key): + project = data_repo.get_project_from_uuid(st.session_state["project_uuid"]) + project_update_data = json.loads(project.meta_data).\ + get(ProjectMetaData.DATA_UPDATE.value, None) if project.meta_data else None + if project_update_data: + for timing_uuid in project_update_data: + _ = data_repo.get_timing_from_uuid(timing_uuid, invalidate_cache=True) + + # removing the metadata after processing + data_repo.update_project(uuid=project.uuid, meta_data=json.dumps({ProjectMetaData.DATA_UPDATE.value: []})) - # removing the metadata after processing - key = str(project_list[selected_index].uuid) - if acquire_lock(key): - data_repo.update_project(uuid=project_list[selected_index].uuid, meta_data=json.dumps({ProjectMetaData.DATA_UPDATE.value: []})) - release_lock(key) + release_lock(key) if "current_frame_index" not in st.session_state: st.session_state['current_frame_index'] = 1 @@ -226,7 +227,6 @@ def on_change_view_type(key): app_settings_page() elif st.session_state["section"] == "New Project": - new_project_page() else: diff --git a/utils/cache/cache_methods.py b/utils/cache/cache_methods.py index 228a311f..38631329 100644 --- a/utils/cache/cache_methods.py +++ b/utils/cache/cache_methods.py @@ -197,6 +197,7 @@ def _cache_get_timing_from_uuid(self, *args, **kwargs): original_func = getattr(cls, '_original_get_timing_from_uuid') timing = original_func(self, *args, **kwargs) + StCache.delete(timing.uuid, CacheKey.TIMING_DETAILS.value) StCache.add(timing, CacheKey.TIMING_DETAILS.value) return timing From ac62d654c78a723b411c5e96bac1560147378695 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sun, 15 Oct 2023 20:02:52 +0530 Subject: [PATCH 082/164] file list cached --- backend/db_repo.py | 2 +- utils/cache/cache.py | 1 + utils/cache/cache_methods.py | 133 +++++++++++++++++++++++++++++++++-- 3 files changed, 129 insertions(+), 7 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index 867d3d90..2cf14906 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -29,7 +29,7 @@ logger = AppLogger() -@measure_execution_time +# @measure_execution_time class DBRepo: _instance = None _count = 0 diff --git a/utils/cache/cache.py b/utils/cache/cache.py index b7cdbada..6786a86f 100644 --- a/utils/cache/cache.py +++ b/utils/cache/cache.py @@ -9,6 +9,7 @@ class CacheKey(ExtendedEnum): PROJECT_SETTING = "project_setting" AI_MODEL = "ai_model" LOGGED_USER = "logged_user" + FILE = "file" class StCache: diff --git a/utils/cache/cache_methods.py b/utils/cache/cache_methods.py index 38631329..95701dee 100644 --- a/utils/cache/cache_methods.py +++ b/utils/cache/cache_methods.py @@ -8,21 +8,27 @@ # NOTE: caching only timing_details, project settings, models and app settings. invalidating cache everytime a related data is updated def cache_data(cls): + # ---------------- FILE METHODS ---------------------- def _cache_create_or_update_file(self, *args, **kwargs): original_func = getattr(cls, '_original_create_or_update_file') file = original_func(self, *args, **kwargs) - StCache.delete_all(CacheKey.TIMING_DETAILS.value) + + if file: + StCache.delete(file.uuid, CacheKey.FILE.value) + StCache.add(file, CacheKey.FILE.value) return file setattr(cls, '_original_create_or_update_file', cls.create_or_update_file) setattr(cls, "create_or_update_file", _cache_create_or_update_file) - def _cache_create_file(self, *args, **kwargs): original_func = getattr(cls, '_original_create_file') file = original_func(self, *args, **kwargs) - StCache.delete_all(CacheKey.TIMING_DETAILS.value) + + if file: + StCache.delete(file.uuid, CacheKey.FILE.value) + StCache.add(file, CacheKey.FILE.value) return file @@ -34,24 +40,131 @@ def _cache_delete_file_from_uuid(self, *args, **kwargs): status = original_func(self, *args, **kwargs) if status: + StCache.delete(args[0], CacheKey.TIMING_DETAILS.value) StCache.delete_all(CacheKey.TIMING_DETAILS.value) setattr(cls, '_original_delete_file_from_uuid', cls.delete_file_from_uuid) setattr(cls, "delete_file_from_uuid", _cache_delete_file_from_uuid) - def _cache_update_file(self, *args, **kwargs): original_func = getattr(cls, '_original_update_file') file = original_func(self, *args, **kwargs) + if file: - StCache.delete_all(CacheKey.TIMING_DETAILS.value) + StCache.delete(file.uuid, CacheKey.FILE.value) + StCache.add(file, CacheKey.FILE.value) return file setattr(cls, '_original_update_file', cls.update_file) setattr(cls, "update_file", _cache_update_file) + + def _cache_get_file_from_name(self, *args, **kwargs): + file_list = StCache.get_all(CacheKey.FILE.value) + if file_list and len(file_list) and len(args) > 0: + for file in file_list: + if file.name == args[0]: + return file + + original_func = getattr(cls, '_original_get_file_from_name') + file = original_func(self, *args, **kwargs) + if file: + StCache.add(file, CacheKey.FILE.value) + + return file + setattr(cls, '_original_get_file_from_name', cls.get_file_from_name) + setattr(cls, "get_file_from_name", _cache_get_file_from_name) + + def _cache_get_file_from_uuid(self, *args, **kwargs): + file_list = StCache.get_all(CacheKey.FILE.value) + if file_list and len(file_list) and len(args) > 0: + for file in file_list: + if file.uuid == args[0]: + return file + + original_func = getattr(cls, '_original_get_file_from_uuid') + file = original_func(self, *args, **kwargs) + if file: + StCache.add(file, CacheKey.FILE.value) + + return file + setattr(cls, '_original_get_file_from_uuid', cls.get_file_from_uuid) + setattr(cls, "get_file_from_uuid", _cache_get_file_from_uuid) + + def _cache_get_image_list_from_uuid_list(self, *args, **kwargs): + not_found_list, found_list = [], {} + # finding the images in the cache + file_list = StCache.get_all(CacheKey.FILE.value) + if file_list and len(file_list) and len(args) > 0: + for file in file_list: + if file.uuid in args[0]: + found_list[file.uuid] = file + + for file_uuid in args[0]: + if file_uuid not in found_list: + not_found_list.append(file_uuid) + + # images which are not present in the cache are fetched through the db + if len(not_found_list): + original_func = getattr(cls, '_original_get_image_list_from_uuid_list') + res = original_func(self, not_found_list, **kwargs) + for file in res: + found_list[file.uuid] = file + + # ordering the result + res = [] + if found_list: + for file_uuid in args[0]: + if file_uuid in found_list: + res.append(found_list[file_uuid]) + + for file in res: + StCache.delete(file, CacheKey.FILE.value) + StCache.add(file, CacheKey.FILE.value) + + return res + + setattr(cls, '_original_get_image_list_from_uuid_list', cls.get_image_list_from_uuid_list) + setattr(cls, "get_image_list_from_uuid_list", _cache_get_image_list_from_uuid_list) + + def _cache_get_file_list_from_log_uuid_list(self, *args, **kwargs): + not_found_list, found_list = [], {} + # finding files in the cache + file_list = StCache.get_all(CacheKey.FILE.value) + if file_list and len(file_list) and len(args) > 0: + for file in file_list: + if file.inference_log and file.inference_log.uuid in args[0]: + found_list[file.inference_log.uuid] = file + + for log_uuid in args[0]: + if log_uuid not in found_list: + not_found_list.append(log_uuid) + + if len(not_found_list): + original_func = getattr(cls, '_original_get_file_list_from_log_uuid_list') + res = original_func(self, not_found_list, **kwargs) + for file in res: + found_list[file.inference_log.uuid] = file + + res = [] + if found_list: + for log_uuid in args[0]: + if log_uuid in found_list: + res.append(found_list[log_uuid]) + + for file in res: + StCache.delete(file, CacheKey.FILE.value) + StCache.add(file, CacheKey.FILE.value) + + return res + + setattr(cls, '_original_get_file_list_from_log_uuid_list', cls.get_file_list_from_log_uuid_list) + setattr(cls, "get_file_list_from_log_uuid_list", _cache_get_file_list_from_log_uuid_list) + + + # ------------------ PROJECT METHODS ----------------------- def _cache_create_project(self, *args, **kwargs): original_func = getattr(cls, '_original_create_project') project = original_func(self, *args, **kwargs) @@ -75,6 +188,8 @@ def _cache_delete_project_from_uuid(self, *args, **kwargs): setattr(cls, '_original_delete_project_from_uuid', cls.delete_project_from_uuid) setattr(cls, "delete_project_from_uuid", _cache_delete_project_from_uuid) + + # -------------------- AI MODEL METHODS ---------------------- def _cache_get_ai_model_from_uuid(self, *args, **kwargs): model_list = StCache.get_all(CacheKey.AI_MODEL.value) if model_list and len(model_list) and len(args) > 0: @@ -142,6 +257,7 @@ def _cache_delete_ai_model_from_uuid(self, *args, **kwargs): setattr(cls, "delete_ai_model_from_uuid", _cache_delete_ai_model_from_uuid) + # ------------------- TIMING METHODS --------------------- def _cache_get_timing_list_from_project(self, *args, **kwargs): # checking if it's already present in the cache timing_list = StCache.get_all(CacheKey.TIMING_DETAILS.value) @@ -255,6 +371,8 @@ def _cache_move_frame_one_step_forward(self, *args, **kwargs): setattr(cls, '_original_move_frame_one_step_forward', cls.move_frame_one_step_forward) setattr(cls, "move_frame_one_step_forward", _cache_move_frame_one_step_forward) + + # ------------------ APP SETTING METHODS --------------------- def _cache_get_app_setting_from_uuid(self, *args, **kwargs): app_setting_list = StCache.get_all(CacheKey.APP_SETTING.value) if not len(kwargs) and len(app_setting_list): @@ -275,7 +393,6 @@ def _cache_get_app_setting_from_uuid(self, *args, **kwargs): setattr(cls, '_original_get_app_setting_from_uuid', cls.get_app_setting_from_uuid) setattr(cls, "get_app_setting_from_uuid", _cache_get_app_setting_from_uuid) - def _cache_get_all_app_setting_list(self, *args, **kwargs): app_setting_list = StCache.get_all(CacheKey.APP_SETTING.value) if app_setting_list and len(app_setting_list): @@ -332,6 +449,8 @@ def _cache_delete_app_setting(self, *args, **kwargs): setattr(cls, '_original_delete_app_setting', cls.delete_app_setting) setattr(cls, "delete_app_setting", _cache_delete_app_setting) + + # ------------------ PROJECT SETTING METHODS --------------------- def _cache_get_project_setting(self, *args, **kwargs): project_setting = StCache.get(args[0], CacheKey.PROJECT_SETTING.value) if project_setting: @@ -382,6 +501,8 @@ def _cache_bulk_update_project_setting(self, *args, **kwargs): setattr(cls, '_original_bulk_update_project_setting', cls.bulk_update_project_setting) setattr(cls, "bulk_update_project_setting", _cache_bulk_update_project_setting) + + # ---------------------- USER METHODS --------------------- def _cache_update_user(self, *args, **kwargs): original_func = getattr(cls, '_original_update_user') user = original_func(self, *args, **kwargs) From 5af0c777679e111179e3698f0ac89c5d1798d91e Mon Sep 17 00:00:00 2001 From: peter942 Date: Sun, 15 Oct 2023 16:40:52 +0200 Subject: [PATCH 083/164] Lots of small improvements --- ui_components/components/new_project_page.py | 6 +-- ui_components/methods/common_methods.py | 51 +++++++------------ ui_components/widgets/frame_selector.py | 2 +- ui_components/widgets/image_carousal.py | 2 +- ui_components/widgets/timeline_view.py | 16 +++--- .../widgets/variant_comparison_element.py | 3 +- .../widgets/variant_comparison_grid.py | 18 +++---- 7 files changed, 42 insertions(+), 56 deletions(-) diff --git a/ui_components/components/new_project_page.py b/ui_components/components/new_project_page.py index 4cac2a8d..b8e0d645 100644 --- a/ui_components/components/new_project_page.py +++ b/ui_components/components/new_project_page.py @@ -1,6 +1,6 @@ import streamlit as st from banodoco_settings import create_new_project -from ui_components.methods.common_methods import save_audio_file,create_timings_row_at_frame_number, save_uploaded_image +from ui_components.methods.common_methods import save_audio_file,create_timings_row_at_frame_number, save_and_promote_image from utils.common_utils import get_current_user_uuid, reset_project_state from utils.data_repo.data_repo import DataRepo import time @@ -86,8 +86,8 @@ def new_project_page(): if starting_image: try: - save_uploaded_image(starting_image, new_project.uuid, new_timing.uuid, "source") - save_uploaded_image(starting_image, new_project.uuid, new_timing.uuid, "styled") + save_and_promote_image(starting_image, new_project.uuid, new_timing.uuid, "source") + save_and_promote_image(starting_image, new_project.uuid, new_timing.uuid, "styled") except Exception as e: st.error(f"Failed to save the uploaded image due to {str(e)}") diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index b97d88fd..931fb2fc 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -121,7 +121,7 @@ def jump_to_single_frame_view_button(display_number, timing_details): if st.button(f"Jump to #{display_number}"): st.session_state['prev_frame_index'] = display_number st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid - st.session_state['frame_styling_view_type'] = "Individual View" + st.session_state['frame_styling_view_type'] = "Individual" st.session_state['change_view_type'] = True st.rerun() @@ -158,8 +158,8 @@ def add_key_frame(selected_image, inherit_styling_settings, how_long_after, whic timing_details = data_repo.get_timing_list_from_project(project_uuid) if selected_image: - save_uploaded_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, "source") - save_uploaded_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, "styled") + save_and_promote_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, "source") + save_and_promote_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, "styled") if inherit_styling_settings == "Yes": clone_styling_settings(index_of_current_item - 1, timing_details[index_of_current_item].uuid) @@ -227,7 +227,7 @@ def save_new_image(img: Union[Image.Image, str, np.ndarray, io.BytesIO], project new_image = data_repo.create_file(**file_data) return new_image -def save_uploaded_image(image, project_uuid, frame_uuid, save_type): +def save_and_promote_image(image, project_uuid, frame_uuid, save_type): data_repo = DataRepo() try: @@ -699,40 +699,32 @@ def delete_frame(timing_uuid): st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid -def replace_image_widget(timing_uuid, stage): +def replace_image_widget(timing_uuid, stage, options=["Other Frame", "Uploaded Frame"]): data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) timing_details = data_repo.get_timing_list_from_project(timing.project.uuid) - - replace_with = st.radio("Replace with:", [ - "Uploaded Frame", "Other Frame"], horizontal=True, key=f"replace_with_what_{stage}") - + + replace_with = options[0] if len(options) == 1 else st.radio("Replace with:", options, horizontal=True, key=f"replace_with_what_{stage}_{timing_uuid}") if replace_with == "Other Frame": - which_stage_to_use_for_replacement = st.radio("Select stage to use:", [ - ImageStage.MAIN_VARIANT.value, ImageStage.SOURCE_IMAGE.value], key=f"which_stage_to_use_for_replacement_{stage}", horizontal=True) + ImageStage.MAIN_VARIANT.value, ImageStage.SOURCE_IMAGE.value], key=f"which_stage_to_use_for_replacement_{stage}_{timing_uuid}", horizontal=True) which_image_to_use_for_replacement = st.number_input("Select image to use:", min_value=0, max_value=len( timing_details)-1, value=0, key=f"which_image_to_use_for_replacement_{stage}") - + if which_stage_to_use_for_replacement == ImageStage.SOURCE_IMAGE.value: selected_image = timing_details[which_image_to_use_for_replacement].source_image - - elif which_stage_to_use_for_replacement == ImageStage.MAIN_VARIANT.value: selected_image = timing_details[which_image_to_use_for_replacement].primary_image - - + st.image(selected_image.local_path, use_column_width=True) - if st.button("Replace with selected frame", disabled=False,key=f"replace_with_selected_frame_{stage}"): + if st.button("Replace with selected frame", disabled=False,key=f"replace_with_selected_frame_{stage}_{timing_uuid}"): if stage == "source": - data_repo.update_specific_timing(timing.uuid, source_image_id=selected_image.uuid) st.success("Replaced") time.sleep(1) st.rerun() - else: number_of_image_variants = add_image_variant( selected_image.uuid, timing.uuid) @@ -741,7 +733,7 @@ def replace_image_widget(timing_uuid, stage): st.success("Replaced") time.sleep(1) st.rerun() - + elif replace_with == "Uploaded Frame": if stage == "source": uploaded_file = st.file_uploader("Upload Source Image", type=[ @@ -750,25 +742,20 @@ def replace_image_widget(timing_uuid, stage): if st.button("Upload Source Image"): if uploaded_file: timing = data_repo.get_timing_from_uuid(timing.uuid) - if save_uploaded_image(uploaded_file, timing.project.uuid, timing.uuid, "source"): + if save_and_promote_image(uploaded_file, timing.project.uuid, timing.uuid, "source"): time.sleep(1.5) st.rerun() else: replacement_frame = st.file_uploader("Upload Styled Image", type=[ - "png", "jpeg"], accept_multiple_files=False, key=f"replacement_frame_upload_{stage}") + "png", "jpeg"], accept_multiple_files=False, key=f"replacement_frame_upload_{stage}_{timing_uuid}") if replacement_frame != None: - if st.button("Replace frame", disabled=False): - images_for_model = [] + if st.button("Replace frame", disabled=False): timing = data_repo.get_timing_from_uuid(timing.uuid) if replacement_frame: - saved_file = save_uploaded_image(replacement_frame, timing.project.uuid, timing.uuid, "styled") - if saved_file: - number_of_image_variants = add_image_variant(saved_file.uuid, timing.uuid) - promote_image_variant( - timing.uuid, number_of_image_variants - 1) - st.success("Replaced") - time.sleep(1) - st.rerun() + save_and_promote_image(replacement_frame, timing.project.uuid, timing.uuid, "styled") + st.success("Replaced") + time.sleep(1) + st.rerun() def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): data_repo = DataRepo() diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index 7849bcbd..8ac5db90 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -31,7 +31,7 @@ def frame_selector_widget(): st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid st.session_state['reset_canvas'] = True st.session_state['frame_styling_view_type_index'] = 0 - st.session_state['frame_styling_view_type'] = "Individual View" + st.session_state['frame_styling_view_type'] = "Individual" st.rerun() diff --git a/ui_components/widgets/image_carousal.py b/ui_components/widgets/image_carousal.py index 46b84b00..ddcfaff5 100644 --- a/ui_components/widgets/image_carousal.py +++ b/ui_components/widgets/image_carousal.py @@ -44,7 +44,7 @@ def display_image(timing_uuid, stage=None, clickable=False): st.session_state['current_frame_index'] = timing_idx + 1 st.session_state['prev_frame_index'] = timing_idx + 1 # st.session_state['frame_styling_view_type_index'] = 0 - st.session_state['frame_styling_view_type'] = "Individual View" + st.session_state['frame_styling_view_type'] = "Individual" st.session_state['counter'] += 1 st.rerun() diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index df493f44..7f35021d 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -1,5 +1,5 @@ import streamlit as st -from ui_components.methods.common_methods import delete_frame, jump_to_single_frame_view_button, move_frame,delete_frame_button,move_frame_back_button,move_frame_forward_button,change_frame_position_input,update_clip_duration_of_all_timing_frames +from ui_components.methods.common_methods import delete_frame, jump_to_single_frame_view_button, move_frame,delete_frame_button,move_frame_back_button,move_frame_forward_button,change_frame_position_input,update_clip_duration_of_all_timing_frames,replace_image_widget from ui_components.widgets.frame_time_selector import single_frame_time_selector, single_frame_time_duration_setter from typing import List from ui_components.widgets.image_carousal import display_image @@ -9,14 +9,14 @@ from utils import st_memory def timeline_view_buttons(i, j, timing_details, shift_frames_setting, args): - time_setter_toggle, animation_style_selector_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_frame_position_toggle = args + time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_frame_position_toggle = args if time_setter_toggle: single_frame_time_selector(timing_details[i + j].uuid, 'motion', shift_frames=shift_frames_setting) if duration_setter_toggle: single_frame_time_duration_setter(timing_details[i + j].uuid, 'motion', shift_frames=shift_frames_setting) - if animation_style_selector_toggle: - update_animation_style_element(timing_details[i + j].uuid) + if replace_image_widget_toggle: + replace_image_widget(timing_details[i + j].uuid, stage=WorkflowStageType.STYLED.value,options=["Uploaded Frame"]) btn1, btn2, btn3 = st.columns([1, 1, 1]) @@ -33,7 +33,7 @@ def timeline_view_buttons(i, j, timing_details, shift_frames_setting, args): if change_frame_position_toggle: change_frame_position_input(timing_details[i + j].uuid, "side-to-side") - if time_setter_toggle or duration_setter_toggle or animation_style_selector_toggle or move_frames_toggle or delete_frames_toggle or change_frame_position_toggle: + if time_setter_toggle or duration_setter_toggle or replace_image_widget_toggle or move_frames_toggle or delete_frames_toggle or change_frame_position_toggle: st.caption("--") jump_to_single_frame_view_button(i + j + 1, timing_details) @@ -73,15 +73,15 @@ def display_toggles(): expand_all = st_memory.toggle("Expand All", key="expand_all",value=False) if expand_all: - time_setter_toggle = animation_style_selector_toggle = duration_setter_toggle = move_frames_toggle = delete_frames_toggle = change_frame_position_toggle = True + time_setter_toggle = replace_image_widget_toggle = duration_setter_toggle = move_frames_toggle = delete_frames_toggle = change_frame_position_toggle = True else: with col2: time_setter_toggle = st_memory.toggle("Time Setter", value=True, key="time_setter_toggle") - animation_style_selector_toggle = st_memory.toggle("Animation Style Selector", value=False, key="animation_style_selector_toggle") + replace_image_widget_toggle = st_memory.toggle("Replace Image", value=False, key="replace_image_widget_toggle") duration_setter_toggle = st_memory.toggle("Duration Setter", value=False, key="duration_setter_toggle") with col3: move_frames_toggle = st_memory.toggle("Move Frames", value=True, key="move_frames_toggle") delete_frames_toggle = st_memory.toggle("Delete Frames", value=False, key="delete_frames_toggle") change_frame_position_toggle = st_memory.toggle("Change Frame Position", value=False, key="change_frame_position_toggle") - return time_setter_toggle, animation_style_selector_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_frame_position_toggle + return time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_frame_position_toggle diff --git a/ui_components/widgets/variant_comparison_element.py b/ui_components/widgets/variant_comparison_element.py index 2b51224f..7a44b441 100644 --- a/ui_components/widgets/variant_comparison_element.py +++ b/ui_components/widgets/variant_comparison_element.py @@ -56,8 +56,7 @@ def variant_comparison_element(timing_uuid, stage=CreativeProcessType.MOTION.val if stage == CreativeProcessType.MOTION.value: if number_of_variants: if not (timing.interpolated_clip_list and len(timing.interpolated_clip_list)): - st.error("No variant for this frame") - + st.error("No variant for this frame") if which_variant - 1 == current_variant: st.success("**Main variant**") else: diff --git a/ui_components/widgets/variant_comparison_grid.py b/ui_components/widgets/variant_comparison_grid.py index f88fa9f5..dd1a9060 100644 --- a/ui_components/widgets/variant_comparison_grid.py +++ b/ui_components/widgets/variant_comparison_grid.py @@ -9,9 +9,7 @@ def variant_comparison_grid(timing_uuid, stage=CreativeProcessType.MOTION.value) timing = data_repo.get_timing_from_uuid(timing_uuid) variants = timing.alternative_images_list - - current_variant = timing.primary_interpolated_video_index if stage == CreativeProcessType.MOTION.value else int( timing.primary_variant_index) @@ -20,17 +18,19 @@ def variant_comparison_grid(timing_uuid, stage=CreativeProcessType.MOTION.value) for i in range(0, len(variants), num_columns): cols = st.columns(num_columns) for j in range(num_columns): - if i + j < len(variants): + variant_index = i + j + if variant_index < len(variants): with cols[j]: if stage == CreativeProcessType.MOTION.value: - st.video(variants[i + j].location, format='mp4', start_time=0) if variants[i + j] else st.error("No video present") + st.video(variants[variant_index].location, format='mp4', start_time=0) if variants[variant_index] else st.error("No video present") else: - st.image(variants[i + j].location, use_column_width=True) + st.image(variants[variant_index].location, use_column_width=True) - if i + j == current_variant: + if variant_index == current_variant: st.success("**Main variant**") - else: - if st.button(f"Promote Variant #{i + j + 1}", key=f"Promote Variant #{i + j + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): + else: + st.info(f"Variant #{variant_index + 1}") - promote_image_variant(timing.uuid, i + j) + if st.button(f"Promote Variant #{variant_index + 1}", key=f"Promote Variant #{variant_index + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): + promote_image_variant(timing.uuid, variant_index) st.rerun() \ No newline at end of file From 3fd753bb59b8d0e0a9c3bb00ef457a2d60c3934f Mon Sep 17 00:00:00 2001 From: peter942 Date: Sun, 15 Oct 2023 21:53:48 +0200 Subject: [PATCH 084/164] More small improvements --- .../components/frame_styling_page.py | 16 ++--- ui_components/constants.py | 4 +- ui_components/methods/common_methods.py | 2 +- ui_components/setup.py | 2 +- ui_components/widgets/timeline_view.py | 67 ++++++++++--------- 5 files changed, 45 insertions(+), 46 deletions(-) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 2a446b28..7c6763b4 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -210,20 +210,14 @@ def frame_styling_page(mainheader2, project_uuid: str): elif st.session_state['frame_styling_view_type'] == "Timeline": st.markdown("---") - - header_col_3, header_col_4, header_col_5 = st.columns([4, 1.5, 1.5]) - - with header_col_5: - shift_frames_setting = st.toggle("Shift Frames", help="If set to True, it will shift the frames after your adjustment forward by the amount of time you move.") - - - if st.session_state['page'] == "Styling": + + if st.session_state['page'] == "Key Frames": with st.sidebar: with st.expander("🌀 Batch Styling", expanded=False): styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.LIST.value) - timeline_view(shift_frames_setting, project_uuid, "Styling", header_col_3, header_col_4) - elif st.session_state['page'] == "Motion": - timeline_view(shift_frames_setting, project_uuid, "Motion", header_col_3, header_col_4) + timeline_view(project_uuid, "Key Frames") + elif st.session_state['page'] == "Videos": + timeline_view(project_uuid, "Videos") with st.sidebar: diff --git a/ui_components/constants.py b/ui_components/constants.py index 5ec8c3b2..f2d1d8ff 100644 --- a/ui_components/constants.py +++ b/ui_components/constants.py @@ -14,8 +14,8 @@ class VideoQuality(ExtendedEnum): LOW = "Low" class CreativeProcessType(ExtendedEnum): - STYLING = "Styling" - MOTION = "Motion" + STYLING = "Key Frames" + MOTION = "Videos" class DefaultTimingStyleParams: prompt = "" diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 931fb2fc..aa3e9d86 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -608,7 +608,7 @@ def update_timings_in_order(project_uuid): data_repo.update_specific_timing(timing.uuid, frame_time=float(i)) -def change_frame_position_input(timing_uuid, src): +def change_position_input(timing_uuid, src): data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) diff --git a/ui_components/setup.py b/ui_components/setup.py index 2efcd96e..c6d7cefb 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -158,7 +158,7 @@ def setup_app_ui(): if st.session_state['frame_styling_view_type'] != "Explorer": pages = CreativeProcessType.value_list() else: - pages = ["Styling"] + pages = ["Key Frames"] if 'page' not in st.session_state: st.session_state["page"] = pages[0] diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index 7f35021d..3f1c0670 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -1,5 +1,5 @@ import streamlit as st -from ui_components.methods.common_methods import delete_frame, jump_to_single_frame_view_button, move_frame,delete_frame_button,move_frame_back_button,move_frame_forward_button,change_frame_position_input,update_clip_duration_of_all_timing_frames,replace_image_widget +from ui_components.methods.common_methods import delete_frame, jump_to_single_frame_view_button, move_frame,delete_frame_button,move_frame_back_button,move_frame_forward_button,change_position_input,update_clip_duration_of_all_timing_frames,replace_image_widget from ui_components.widgets.frame_time_selector import single_frame_time_selector, single_frame_time_duration_setter from typing import List from ui_components.widgets.image_carousal import display_image @@ -8,8 +8,8 @@ from ui_components.constants import WorkflowStageType from utils import st_memory -def timeline_view_buttons(i, j, timing_details, shift_frames_setting, args): - time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_frame_position_toggle = args +def timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle): + if time_setter_toggle: single_frame_time_selector(timing_details[i + j].uuid, 'motion', shift_frames=shift_frames_setting) @@ -30,24 +30,47 @@ def timeline_view_buttons(i, j, timing_details, shift_frames_setting, args): with btn3: delete_frame_button(timing_details[i + j].uuid) - if change_frame_position_toggle: - change_frame_position_input(timing_details[i + j].uuid, "side-to-side") + if change_position_toggle: + change_position_input(timing_details[i + j].uuid, "side-to-side") if time_setter_toggle or duration_setter_toggle or replace_image_widget_toggle or move_frames_toggle or delete_frames_toggle or change_frame_position_toggle: st.caption("--") jump_to_single_frame_view_button(i + j + 1, timing_details) -def timeline_view(shift_frames_setting, project_uuid, stage, header_col_2, header_col_3): - st.markdown("---") - +def timeline_view(project_uuid, stage): + data_repo = DataRepo() timing_details = data_repo.get_timing_list_from_project(project_uuid) + + header_col_1, header_col_2, header_col_3 = st.columns([1.5,4,1.5]) + with header_col_1: + shift_frames_setting = st.toggle("Shift Frames", help="If set to True, it will shift the frames after your adjustment forward by the amount of time you move.") + + + + with header_col_2: + col1, col2, col3 = st.columns(3) + + with col1: + expand_all = st_memory.toggle("Expand All", key="expand_all",value=False) + + if expand_all: + time_setter_toggle = replace_image_widget_toggle = duration_setter_toggle = move_frames_toggle = delete_frames_toggle = change_position_toggle = True + + else: + with col2: + time_setter_toggle = st_memory.toggle("Time Setter", value=True, key="time_setter_toggle") + replace_image_widget_toggle = st_memory.toggle("Replace Image", value=False, key="replace_image_widget_toggle") + duration_setter_toggle = st_memory.toggle("Duration Setter", value=False, key="duration_setter_toggle") + with col3: + move_frames_toggle = st_memory.toggle("Move Frames", value=True, key="move_frames_toggle") + delete_frames_toggle = st_memory.toggle("Delete Frames", value=False, key="delete_frames_toggle") + change_position_toggle = st_memory.toggle("Change Position", value=False, key="change_position_toggle") + with header_col_3: items_per_row = st_memory.slider("How many frames per row?", min_value=1, max_value=10, value=5, step=1, key="items_per_row_slider") - with header_col_2: - args = display_toggles() total_count = len(timing_details) for i in range(0, total_count, items_per_row): # Step of items_per_row for grid @@ -56,32 +79,14 @@ def timeline_view(shift_frames_setting, project_uuid, stage, header_col_2, heade if i + j < total_count: # Check if index is within range with grid[j]: display_number = i + j + 1 - if stage == 'Styling': + if stage == 'Key Frames': display_image(timing_uuid=timing_details[i + j].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) - elif stage == 'Motion': + elif stage == 'Videos': if timing_details[i + j].timed_clip: st.video(timing_details[i + j].timed_clip.location) else: st.error("No video found for this frame.") with st.expander(f'Frame #{display_number}', True): - timeline_view_buttons(i, j, timing_details, shift_frames_setting, args) + timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle) -def display_toggles(): - col1, col2, col3 = st.columns(3) - - with col1: - expand_all = st_memory.toggle("Expand All", key="expand_all",value=False) - - if expand_all: - time_setter_toggle = replace_image_widget_toggle = duration_setter_toggle = move_frames_toggle = delete_frames_toggle = change_frame_position_toggle = True - else: - with col2: - time_setter_toggle = st_memory.toggle("Time Setter", value=True, key="time_setter_toggle") - replace_image_widget_toggle = st_memory.toggle("Replace Image", value=False, key="replace_image_widget_toggle") - duration_setter_toggle = st_memory.toggle("Duration Setter", value=False, key="duration_setter_toggle") - with col3: - move_frames_toggle = st_memory.toggle("Move Frames", value=True, key="move_frames_toggle") - delete_frames_toggle = st_memory.toggle("Delete Frames", value=False, key="delete_frames_toggle") - change_frame_position_toggle = st_memory.toggle("Change Frame Position", value=False, key="change_frame_position_toggle") - return time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_frame_position_toggle From 0e9d25726c8237f0a2818c941cb4b640457e4728 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Mon, 16 Oct 2023 15:20:36 +0530 Subject: [PATCH 085/164] wip: log pagination added + minor fixes --- backend/db_repo.py | 218 +++--------------------- banodoco_runner.py | 6 - ui_components/widgets/frame_selector.py | 31 ++-- ui_components/widgets/sidebar_logger.py | 30 ++-- utils/data_repo/data_repo.py | 4 +- 5 files changed, 56 insertions(+), 233 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index 2cf14906..d4239bca 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -4,9 +4,10 @@ import sys from shared.logging.constants import LoggingType +from django.core.paginator import Paginator from shared.logging.logging import AppLogger -from utils.common_decorators import count_calls, measure_execution_time +from utils.common_decorators import measure_execution_time sys.path.append('../') import sqlite3 @@ -66,9 +67,6 @@ def __init__(self): # user operations def create_user(self, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) data = CreateUserDao(data=kwargs) if not data.is_valid(): return InternalResponse({}, data.errors, False) @@ -86,9 +84,6 @@ def create_user(self, **kwargs): return InternalResponse(payload, 'user created successfully', True) def get_first_active_user(self): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) user = User.objects.filter(is_disabled=False).first() if not user: return InternalResponse(None, 'no user found', True) @@ -100,9 +95,6 @@ def get_first_active_user(self): return InternalResponse(payload, 'user found', True) def get_user_by_email(self, email): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) user = User.objects.filter(email=email, is_disabled=False).first() if user: return InternalResponse(user, 'user found', True) @@ -114,9 +106,6 @@ def get_user_by_email(self, email): return InternalResponse(payload, 'user not found', False) def update_user(self, user_id, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) if user_id: user = User.objects.filter(uuid=user_id, is_disabled=False).first() else: @@ -140,9 +129,6 @@ def update_user(self, user_id, **kwargs): return InternalResponse(payload, 'user updated successfully', True) def get_all_user_list(self): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) user_list = User.objects.all() payload = { @@ -151,9 +137,6 @@ def get_all_user_list(self): return InternalResponse(payload, 'user list', True) def get_total_user_count(self): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) if SERVER != ServerType.PRODUCTION.value: count = User.objects.filter(is_disabled=False).count() else: @@ -162,9 +145,6 @@ def get_total_user_count(self): return InternalResponse(count, 'user count fetched', True) def delete_user_by_email(self, email): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) user = User.objects.filter(email=email, is_disabled=False).first() if user: user.is_disabled = True @@ -179,9 +159,6 @@ def delete_user_by_email(self, email): # internal file object def get_file_from_name(self, name): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) file = InternalFileObject.objects.filter(name=name, is_disabled=False).first() if not file: return InternalResponse({}, 'file not found', False) @@ -193,9 +170,6 @@ def get_file_from_name(self, name): return InternalResponse(payload, 'file found', True) def get_file_from_uuid(self, uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) file = InternalFileObject.objects.filter(uuid=uuid, is_disabled=False).first() if not file: return InternalResponse({}, 'file not found', False) @@ -208,9 +182,6 @@ def get_file_from_uuid(self, uuid): # TODO: create a dao for this def get_all_file_list(self, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) kwargs['is_disabled'] = False if 'project_id' in kwargs and kwargs['project_id']: @@ -238,9 +209,6 @@ def get_file_list_from_log_uuid_list(self, log_uuid_list): return InternalResponse(payload, 'file list fetched successfully', True) def create_or_update_file(self, file_uuid, type=InternalFileType.IMAGE.value, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) file = InternalFileType.objects.filter(uuid=file_uuid, type=type, is_disabled=False).first() if not file: file = InternalFileObject.objects.create(uuid=file_uuid, name=str(uuid.uuid4()), file_type=type, **kwargs) @@ -258,9 +226,6 @@ def create_or_update_file(self, file_uuid, type=InternalFileType.IMAGE.value, ** return InternalResponse(payload, 'file found', True) def upload_file(self, file, ext): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) url = upload_file_from_obj(file, ext) payload = { 'data': url @@ -269,9 +234,6 @@ def upload_file(self, file, ext): return InternalResponse(payload, 'file uploaded', True) def create_file(self, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) data = CreateFileDao(data=kwargs) if not data.is_valid(): return InternalResponse({}, data.errors, False) @@ -321,9 +283,6 @@ def create_file(self, **kwargs): return InternalResponse(payload, 'file found', True) def delete_file_from_uuid(self, uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) file = InternalFileObject.objects.filter(uuid=uuid, is_disabled=False).first() if not file: return InternalResponse({}, 'invalid file uuid', False) @@ -331,9 +290,6 @@ def delete_file_from_uuid(self, uuid): return InternalResponse({}, 'file deleted successfully', True) def get_image_list_from_uuid_list(self, uuid_list, file_type=InternalFileType.IMAGE.value): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) file_list = InternalFileObject.objects.filter(uuid__in=uuid_list, \ is_disabled=False, type=file_type).all() @@ -348,9 +304,6 @@ def get_image_list_from_uuid_list(self, uuid_list, file_type=InternalFileType.IM return InternalResponse(payload, 'file list fetched', True) def update_file(self, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) if 'uuid' not in kwargs: return InternalResponse({}, 'uuid is required', False) @@ -385,9 +338,6 @@ def update_file(self, **kwargs): # project def get_project_from_uuid(self, uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) project = Project.objects.filter(uuid=uuid, is_disabled=False).first() if not project: return InternalResponse({}, 'invalid project uuid', False) @@ -399,9 +349,6 @@ def get_project_from_uuid(self, uuid): return InternalResponse(payload, 'project fetched', True) def update_project(self, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) project = Project.objects.filter(uuid=kwargs['uuid'], is_disabled=False).first() if not project: return InternalResponse({}, 'invalid project uuid', False) @@ -418,9 +365,6 @@ def update_project(self, **kwargs): return InternalResponse(payload, 'successfully updated project', True) def get_all_project_list(self, user_uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) user: User = User.objects.filter(uuid=user_uuid, is_disabled=False).first() if not user: return InternalResponse({}, 'invalid user', False) @@ -434,9 +378,6 @@ def get_all_project_list(self, user_uuid): return InternalResponse(payload, 'project fetched', True) def create_project(self, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) data = CreateProjectDao(data=kwargs) if not data.is_valid(): return InternalResponse({}, data.errors, False) @@ -457,9 +398,6 @@ def create_project(self, **kwargs): return InternalResponse(payload, 'project fetched', True) def delete_project_from_uuid(self, uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) project = Project.objects.filter(uuid=uuid, is_disabled=False).first() if not project: return InternalResponse({}, 'invalid project uuid', False) @@ -471,9 +409,6 @@ def delete_project_from_uuid(self, uuid): # ai model (custom ai model) def get_ai_model_from_uuid(self, uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) ai_model = AIModel.objects.filter(uuid=uuid, is_disabled=False).first() if not ai_model: return InternalResponse({}, 'invalid ai model uuid', False) @@ -485,9 +420,6 @@ def get_ai_model_from_uuid(self, uuid): return InternalResponse(payload, 'ai_model fetched', True) def get_ai_model_from_name(self, name): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) ai_model = AIModel.objects.filter(replicate_url=name, is_disabled=False).first() if not ai_model: return InternalResponse({}, 'invalid ai model name', False) @@ -499,9 +431,6 @@ def get_ai_model_from_name(self, name): return InternalResponse(payload, 'ai_model fetched', True) def get_all_ai_model_list(self, model_category_list=None, user_id=None, custom_trained=False, model_type_list=None): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) query = {'custom_trained': "all" if custom_trained == None else ("user" if custom_trained else "predefined"), 'is_disabled': False} if user_id: user = User.objects.filter(uuid=user_id, is_disabled=False).first() @@ -529,9 +458,6 @@ def get_all_ai_model_list(self, model_category_list=None, user_id=None, custom_t return InternalResponse(payload, 'ai_model fetched', True) def create_ai_model(self, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) attributes = CreateAIModelDao(data=kwargs) if not attributes.is_valid(): return InternalResponse({}, attributes.errors, False) @@ -555,9 +481,6 @@ def create_ai_model(self, **kwargs): return InternalResponse(payload, 'ai_model fetched', True) def update_ai_model(self, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) attributes = UpdateAIModelDao(attributes=kwargs) if not attributes.is_valid(): return InternalResponse({}, attributes.errors, False) @@ -585,9 +508,6 @@ def update_ai_model(self, **kwargs): return InternalResponse(payload, 'ai_model fetched', True) def delete_ai_model_from_uuid(self, uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) ai_model = AIModel.objects.filter(uuid=uuid, is_disabled=False).first() if not ai_model: return InternalResponse({}, 'invalid ai model uuid', False) @@ -599,9 +519,6 @@ def delete_ai_model_from_uuid(self, uuid): # inference log def get_inference_log_from_uuid(self, uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) log = InferenceLog.objects.filter(uuid=uuid, is_disabled=False).first() if not log: return InternalResponse({}, 'invalid inference log uuid', False) @@ -612,26 +529,33 @@ def get_inference_log_from_uuid(self, uuid): return InternalResponse(payload, 'inference log fetched', True) - def get_all_inference_log_list(self, project_id=None): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) + def get_all_inference_log_list(self, project_id=None, page=1, data_per_page=5, status_list=None): if project_id: project = Project.objects.filter(uuid=project_id, is_disabled=False).first() - log_list = InferenceLog.objects.filter(project_id=project.id, is_disabled=False).all() + log_list = InferenceLog.objects.filter(project_id=project.id, is_disabled=False).order_by('-created_on').all() else: - log_list = InferenceLog.objects.filter(is_disabled=False).all() + log_list = InferenceLog.objects.filter(is_disabled=False).order_by('-created_on').all() + + if status_list: + log_list = log_list.filter(status__in=status_list) + + paginator = Paginator(log_list, data_per_page) + if page > paginator.num_pages or page < 1: + return InternalResponse({}, "invalid page number", False) payload = { - 'data': InferenceLogDto(log_list, many=True).data + "data_per_page": data_per_page, + "page": page, + "total_pages": paginator.num_pages, + "count": paginator.count, + "data": InferenceLogDto( + paginator.page(page), many=True + ).data, } - + return InternalResponse(payload, 'inference log list fetched', True) def create_inference_log(self, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) attributes = CreateInferenceLogDao(data=kwargs) if not attributes.is_valid(): return InternalResponse({}, attributes.errors, False) @@ -662,9 +586,6 @@ def create_inference_log(self, **kwargs): return InternalResponse(payload, 'inference log created successfully', True) def delete_inference_log_from_uuid(self, uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) log = InferenceLog.objects.filter(uuid=uuid, is_disabled=False).first() if not log: return InternalResponse({}, 'invalid inference log uuid', False) @@ -692,9 +613,6 @@ def update_inference_log(self, uuid, **kwargs): # ai model param map # TODO: add DTO in the output def get_ai_model_param_map_from_uuid(self, uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) map = AIModelParamMap.objects.filter(uuid=uuid, is_disabled=False).first() if not map: return InternalResponse({}, 'invalid ai model param map uuid', False) @@ -702,9 +620,6 @@ def get_ai_model_param_map_from_uuid(self, uuid): return InternalResponse(map, 'ai model param map fetched', True) def get_all_ai_model_param_map_list(self, model_id=None): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) if model_id: map_list = AIModelParamMap.objects.filter(model_id=model_id, is_disabled=False).all() else: @@ -713,9 +628,6 @@ def get_all_ai_model_param_map_list(self, model_id=None): return InternalResponse(map_list, 'ai model param map list fetched', True) def create_ai_model_param_map(self, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) attributes = CreateAIModelParamMapDao(data=kwargs) if not attributes.is_valid(): return InternalResponse({}, attributes.errors, False) @@ -734,9 +646,6 @@ def create_ai_model_param_map(self, **kwargs): return InternalResponse(map, 'ai model param map created successfully', True) def delete_ai_model(self, uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) map = AIModelParamMap.objects.filter(uuid=uuid, is_disabled=False).first() if not map: return InternalResponse({}, 'invalid ai model param map uuid', False) @@ -748,9 +657,6 @@ def delete_ai_model(self, uuid): # timing def get_timing_from_uuid(self, uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) timing = Timing.objects.filter(uuid=uuid, is_disabled=False).first() if not timing: return InternalResponse({'data': None}, 'invalid timing uuid', False) @@ -762,9 +668,6 @@ def get_timing_from_uuid(self, uuid): return InternalResponse(payload, 'timing fetched', True) def get_timing_from_frame_number(self, project_uuid, frame_number): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) project: Project = Project.objects.filter(uuid=project_uuid, is_disabled=False).first() if project: timing = Timing.objects.filter(aux_frame_index=frame_number, project_id=project.id, is_disabled=False).first() @@ -778,9 +681,6 @@ def get_timing_from_frame_number(self, project_uuid, frame_number): return InternalResponse({'data': None}, 'invalid timing frame number', False) def get_primary_variant_location(self, uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) timing = Timing.objects.filter(uuid=uuid, is_disabled=False).first() if not timing: return InternalResponse({}, 'invalid timing uuid', False) @@ -793,9 +693,6 @@ def get_primary_variant_location(self, uuid): # this is based on the aux_frame_index and not the order in the db def get_next_timing(self, uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) timing = Timing.objects.filter(uuid=uuid, is_disabled=False).first() if not timing: return InternalResponse({}, 'invalid timing uuid', False) @@ -809,9 +706,6 @@ def get_next_timing(self, uuid): return InternalResponse(payload, 'timing fetched', True) def get_prev_timing(self, uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) timing = Timing.objects.filter(uuid=uuid, is_disabled=False).first() if not timing: return InternalResponse({}, 'invalid timing uuid', False) @@ -825,9 +719,6 @@ def get_prev_timing(self, uuid): return InternalResponse(payload, 'timing fetched', True) def get_alternative_image_list(self, uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) timing = Timing.objects.filter(uuid=uuid, is_disabled=False).first() if not timing: return InternalResponse([], 'invalid timing uuid', False) @@ -835,9 +726,6 @@ def get_alternative_image_list(self, uuid): return timing.alternative_image_list def get_timing_list_from_project(self, project_uuid=None): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) if project_uuid: project: Project = Project.objects.filter(uuid=project_uuid, is_disabled=False).first() if not project: @@ -854,9 +742,6 @@ def get_timing_list_from_project(self, project_uuid=None): return InternalResponse(payload, 'timing list fetched', True) def create_timing(self, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) attributes = CreateTimingDao(data=kwargs) if not attributes.is_valid(): return InternalResponse({}, attributes.errors, False) @@ -955,9 +840,6 @@ def create_timing(self, **kwargs): return InternalResponse(payload, 'timing created successfully', True) def remove_existing_timing(self, project_uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) if project_uuid: project: Project = Project.objects.filter(uuid=project_uuid, is_disabled=False).first() else: @@ -985,9 +867,6 @@ def add_interpolated_clip(self, uuid, **kwargs): # TODO: add dao in this method def update_specific_timing(self, uuid, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) timing = Timing.objects.filter(uuid=uuid, is_disabled=False).first() if not timing: return InternalResponse({}, 'invalid timing uuid', False) @@ -1082,9 +961,6 @@ def update_specific_timing(self, uuid, **kwargs): return InternalResponse(payload, 'timing updated successfully', True) def delete_timing_from_uuid(self, uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) timing = Timing.objects.filter(uuid=uuid, is_disabled=False).first() if not timing: return InternalResponse({}, 'invalid timing uuid', False) @@ -1094,9 +970,6 @@ def delete_timing_from_uuid(self, uuid): return InternalResponse({}, 'timing deleted successfully', True) def remove_primary_frame(self, uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) timing = Timing.objects.filter(uuid=uuid, is_disabled=False).first() if not timing: return InternalResponse({}, 'invalid timing uuid', False) @@ -1106,9 +979,6 @@ def remove_primary_frame(self, uuid): return InternalResponse({}, 'primay frame removed successfully', True) def remove_source_image(self, uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) timing = Timing.objects.filter(uuid=uuid, is_disabled=False).first() if not timing: return InternalResponse({}, 'invalid timing uuid', False) @@ -1118,9 +988,6 @@ def remove_source_image(self, uuid): return InternalResponse({}, 'source image removed successfully', True) def move_frame_one_step_forward(self, project_uuid, index_of_frame): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) project: Project = Project.objects.filter(uuid=project_uuid, is_disabled=False).first() if not project: return InternalResponse({}, 'invalid project uuid', False) @@ -1135,9 +1002,6 @@ def move_frame_one_step_forward(self, project_uuid, index_of_frame): # app setting def get_app_setting_from_uuid(self, uuid=None): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) if uuid: app_setting = AppSetting.objects.filter(uuid=uuid, is_disabled=False).first() else: @@ -1150,9 +1014,6 @@ def get_app_setting_from_uuid(self, uuid=None): return InternalResponse(payload, 'app_setting fetched successfully', True) def update_app_setting(self, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) attributes = UpdateAppSettingDao(data=kwargs) if not attributes.is_valid(): return InternalResponse({}, attributes.errors, False) @@ -1180,9 +1041,6 @@ def update_app_setting(self, **kwargs): def get_app_secrets_from_user_uuid(self, user_uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) if user_uuid: user: User = User.objects.filter(uuid=user_uuid, is_disabled=False).first() if not user: @@ -1204,9 +1062,6 @@ def get_app_secrets_from_user_uuid(self, user_uuid): return InternalResponse(payload, 'app_setting fetched successfully', True) def get_all_app_setting_list(self): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) app_setting_list = AppSetting.objects.filter(is_disabled=False).all() payload = { @@ -1216,9 +1071,6 @@ def get_all_app_setting_list(self): return InternalResponse(payload, 'app_setting list fetched successfully', True) def create_app_setting(self, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) attributes = CreateAppSettingDao(data=kwargs) if not attributes.is_valid(): return InternalResponse({}, attributes.errors, False) @@ -1242,9 +1094,6 @@ def create_app_setting(self, **kwargs): def delete_app_setting(self, user_id): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) if AppSetting.objects.filter(is_disabled=False).count() <= 1: return InternalResponse({}, 'cannot delete the last app setting', False) @@ -1259,9 +1108,6 @@ def delete_app_setting(self, user_id): # setting def get_project_setting(self, project_uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) project = Project.objects.filter(uuid=project_uuid, is_disabled=False).first() if not project: return InternalResponse({}, 'invalid project_id', False) @@ -1278,9 +1124,6 @@ def get_project_setting(self, project_uuid): # TODO: add valid model_id check throughout dp_repo def create_project_setting(self, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) attributes = CreateSettingDao(data=kwargs) if not attributes.is_valid(): return InternalResponse({}, attributes.errors, False) @@ -1324,9 +1167,6 @@ def create_project_setting(self, **kwargs): return InternalResponse(payload, 'setting fetched', True) def update_project_setting(self, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) attributes = UpdateSettingDao(data=kwargs) if not attributes.is_valid(): return InternalResponse({}, attributes.errors, False) @@ -1381,9 +1221,6 @@ def update_project_setting(self, **kwargs): return InternalResponse(payload, 'setting fetched', True) def bulk_update_project_setting(self, **kwargs): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) attributes = UpdateSettingDao(data=kwargs) if not attributes.is_valid(): return InternalResponse({}, attributes.errors, False) @@ -1436,9 +1273,6 @@ def bulk_update_project_setting(self, **kwargs): # backup data def create_backup(self, project_uuid, backup_name): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) project: Project = Project.objects.filter(uuid=project_uuid, is_disabled=False).first() if not project: return InternalResponse({}, 'invalid project', False) @@ -1537,9 +1371,6 @@ def create_backup(self, project_uuid, backup_name): return InternalResponse(payload, 'backup created', True) def get_backup_from_uuid(self, backup_uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) backup: BackupTiming = BackupTiming.objects.filter(uuid=backup_uuid, is_disabled=False).first() if not backup: return InternalResponse({}, 'invalid backup', False) @@ -1551,9 +1382,6 @@ def get_backup_from_uuid(self, backup_uuid): return InternalResponse(payload, 'backup fetched', True) def get_backup_list(self, project_uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) project: Project = Project.objects.filter(uuid=project_uuid, is_disabled=False).first() if not project: return InternalResponse({}, 'invalid project', False) @@ -1567,9 +1395,6 @@ def get_backup_list(self, project_uuid): return InternalResponse(payload, 'backup list fetched', True) def delete_backup(self, backup_uuid): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) backup: BackupTiming = BackupTiming.objects.filter(uuid=backup_uuid, is_disabled=False).first() if not backup: return InternalResponse({}, 'invalid backup', False) @@ -1580,9 +1405,6 @@ def delete_backup(self, backup_uuid): return InternalResponse({}, 'backup deleted', True) def restore_backup(self, backup_uuid: str): - # DBRepo._count += 1 - # cls_name = inspect.currentframe().f_code.co_name - # print("db call: ", DBRepo._count, " class name: ", cls_name) backup: BackupTiming = self.get_backup_from_uuid(backup_uuid) current_timing_list: List[Timing] = self.get_timing_list_from_project(backup.project.uuid) diff --git a/banodoco_runner.py b/banodoco_runner.py index 5158d3a7..dd462feb 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -43,12 +43,6 @@ def main(): time.sleep(REFRESH_FREQUENCY) check_and_update_db() - # test_data_repo() - -def test_data_repo(): - data_repo = DataRepo() - app_settings = data_repo.get_app_setting_from_uuid() - print(app_settings.replicate_username) def is_app_running(): url = 'http://localhost:5500/healthz' diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index e5b3b228..8f015414 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -18,22 +18,11 @@ def frame_selector_widget(): if 'prev_frame_index' not in st.session_state: st.session_state['prev_frame_index'] = 1 - # st.write(st.session_state['prev_frame_index']) - # st.write(st.session_state['current_frame_index']) st.session_state['current_frame_index'] = st.number_input(f"Key frame # (out of {len(timing_details)})", 1, len(timing_details), value=st.session_state['prev_frame_index'], step=1, key="which_image_selector") - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid - - if st.session_state['prev_frame_index'] != st.session_state['current_frame_index']: - st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid - st.session_state['reset_canvas'] = True - st.session_state['frame_styling_view_type_index'] = 0 - st.session_state['frame_styling_view_type'] = "Individual View" - - st.rerun() + update_current_frame_index(st.session_state['current_frame_index']) with time2: single_frame_time_selector(st.session_state['current_frame_uuid'], 'navbar', shift_frames=False) @@ -65,4 +54,20 @@ def frame_selector_widget(): if st.button("Delete key frame"): delete_frame(st.session_state['current_frame_uuid']) - st.rerun() \ No newline at end of file + st.rerun() + + +def update_current_frame_index(index): + data_repo = DataRepo() + timing_details = data_repo.get_timing_list_from_project(project_uuid=st.session_state["project_uuid"]) + + st.session_state['current_frame_uuid'] = timing_details[index - 1].uuid + + if st.session_state['prev_frame_index'] != index: + st.session_state['prev_frame_index'] = index + st.session_state['current_frame_uuid'] = timing_details[index - 1].uuid + st.session_state['reset_canvas'] = True + st.session_state['frame_styling_view_type_index'] = 0 + st.session_state['frame_styling_view_type'] = "Individual View" + + st.rerun() \ No newline at end of file diff --git a/ui_components/widgets/sidebar_logger.py b/ui_components/widgets/sidebar_logger.py index 78f332e4..140fbb30 100644 --- a/ui_components/widgets/sidebar_logger.py +++ b/ui_components/widgets/sidebar_logger.py @@ -4,6 +4,7 @@ import json import math +from ui_components.widgets.frame_selector import update_current_frame_index from utils.data_repo.data_repo import DataRepo @@ -12,8 +13,6 @@ def sidebar_logger(project_uuid): a1, _, a3 = st.columns([1, 0.2, 1]) - log_list = data_repo.get_all_inference_log_list(project_uuid) - log_list.reverse() refresh_disabled = False # not any(log.status in [InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value] for log in log_list) if a1.button("Refresh log", disabled=refresh_disabled): st.rerun() @@ -21,26 +20,30 @@ def sidebar_logger(project_uuid): status_option = st.radio("Statuses to display:", options=["All", "In Progress", "Succeeded", "Failed"], key="status_option", index=0, horizontal=True) + status_list = None if status_option == "In Progress": - log_list = [log for log in log_list if log.status in [InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value]] + status_list = [InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value] elif status_option == "Succeeded": - log_list = [log for log in log_list if log.status == InferenceStatus.COMPLETED.value] + status_list = [InferenceStatus.COMPLETED.value] elif status_option == "Failed": - log_list = [log for log in log_list if log.status == InferenceStatus.FAILED.value] + status_list = [InferenceStatus.FAILED.value] b1, b2 = st.columns([1, 1]) items_per_page = b2.slider("Items per page", min_value=1, max_value=20, value=5, step=1) + log_list = data_repo.get_all_inference_log_list(project_id=project_uuid, page=1, data_per_page=items_per_page, status_list=status_list) + log_list.reverse() + page_number = b1.number_input('Page number', min_value=1, max_value=math.ceil(len(log_list) / items_per_page), value=1, step=1) - display_list = log_list[(page_number - 1) * items_per_page : page_number * items_per_page] - file_list = data_repo.get_file_list_from_log_uuid_list([log.uuid for log in display_list]) + # display_list = log_list[(page_number - 1) * items_per_page : page_number * items_per_page] + file_list = data_repo.get_file_list_from_log_uuid_list([log.uuid for log in log_list]) log_file_dict = {} for file in file_list: log_file_dict[str(file.inference_log.uuid)] = file st.markdown("---") - for idx, log in enumerate(display_list): + for _, log in enumerate(log_list): origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) if not log.status or not origin_data: continue @@ -81,11 +84,10 @@ def sidebar_logger(project_uuid): st.warning("Canceled") if output_url: - if st.button(f"Jump to frame {idx}"): - st.info("Fix this.") + timing = data_repo.get_timing_from_uuid(origin_data['timing_uuid']) + if timing: + if st.button(f"Jump to frame {timing.aux_frame_index + 1}", key=str(log.uuid)): + update_current_frame_index(timing.aux_frame_index + 1) + - # if st.button("Delete", key=f"delete_{log.uuid}"): - # data_repo.update_inference_log(log.uuid, status="") - # st.rerun() - st.markdown("---") \ No newline at end of file diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 069a2819..9abcbcf2 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -190,8 +190,8 @@ def get_inference_log_from_uuid(self, uuid): log = res.data['data'] if res else None return InferenceLogObject(**log) if log else None - def get_all_inference_log_list(self, project_id=None): - log_list = self.db_repo.get_all_inference_log_list(project_id).data['data'] + def get_all_inference_log_list(self, **kwargs): + log_list = self.db_repo.get_all_inference_log_list(**kwargs).data['data'] return [InferenceLogObject(**log) for log in log_list] if log_list else None From 4ac563ec76a3c6b28d227d70848d24e43ed1ec93 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Mon, 16 Oct 2023 18:33:38 +0530 Subject: [PATCH 086/164] log pagination fixed --- backend/db_repo.py | 2 ++ ui_components/constants.py | 1 + ui_components/widgets/frame_selector.py | 2 -- ui_components/widgets/sidebar_logger.py | 13 +++++++++---- utils/data_repo/data_repo.py | 7 +++++-- 5 files changed, 17 insertions(+), 8 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index d4239bca..b557ddf9 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -538,6 +538,8 @@ def get_all_inference_log_list(self, project_id=None, page=1, data_per_page=5, s if status_list: log_list = log_list.filter(status__in=status_list) + else: + log_list = log_list.exclude(status="") paginator = Paginator(log_list, data_per_page) if page > paginator.num_pages or page < 1: diff --git a/ui_components/constants.py b/ui_components/constants.py index 5ec8c3b2..7be01430 100644 --- a/ui_components/constants.py +++ b/ui_components/constants.py @@ -50,6 +50,7 @@ class DefaultProjectSettingParams: batch_animation_tool = AnimationToolType.G_FILM.value batch_animation_style = AnimationStyleType.INTERPOLATION.value batch_model = None + total_log_pages = 1 # TODO: make proper paths for every file CROPPED_IMG_LOCAL_PATH = "videos/temp/cropped.png" diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index 8f015414..69239ba5 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -27,8 +27,6 @@ def frame_selector_widget(): with time2: single_frame_time_selector(st.session_state['current_frame_uuid'], 'navbar', shift_frames=False) - - with st.expander(f"🖼️ Frame #{st.session_state['current_frame_index']} Details"): a1, a2 = st.columns([1,1]) with a1: diff --git a/ui_components/widgets/sidebar_logger.py b/ui_components/widgets/sidebar_logger.py index 140fbb30..a5e614b9 100644 --- a/ui_components/widgets/sidebar_logger.py +++ b/ui_components/widgets/sidebar_logger.py @@ -29,12 +29,17 @@ def sidebar_logger(project_uuid): status_list = [InferenceStatus.FAILED.value] b1, b2 = st.columns([1, 1]) - items_per_page = b2.slider("Items per page", min_value=1, max_value=20, value=5, step=1) - log_list = data_repo.get_all_inference_log_list(project_id=project_uuid, page=1, data_per_page=items_per_page, status_list=status_list) - log_list.reverse() - page_number = b1.number_input('Page number', min_value=1, max_value=math.ceil(len(log_list) / items_per_page), value=1, step=1) + project_setting = data_repo.get_project_setting(project_uuid) + page_number = b1.number_input('Page number', min_value=1, max_value=project_setting.total_log_pages, value=1, step=1) + items_per_page = b2.slider("Items per page", min_value=1, max_value=20, value=5, step=1) + log_list, total_page_count = data_repo.get_all_inference_log_list(project_id=project_uuid, page=page_number, data_per_page=items_per_page, status_list=status_list) + + if project_setting.total_log_pages != total_page_count: + project_setting.total_log_pages = total_page_count + st.rerun() + st.write("Total page count: ", total_page_count) # display_list = log_list[(page_number - 1) * items_per_page : page_number * items_per_page] file_list = data_repo.get_file_list_from_log_uuid_list([log.uuid for log in log_list]) log_file_dict = {} diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 9abcbcf2..bc1f33fa 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -191,8 +191,11 @@ def get_inference_log_from_uuid(self, uuid): return InferenceLogObject(**log) if log else None def get_all_inference_log_list(self, **kwargs): - log_list = self.db_repo.get_all_inference_log_list(**kwargs).data['data'] - return [InferenceLogObject(**log) for log in log_list] if log_list else None + res = self.db_repo.get_all_inference_log_list(**kwargs) + log_list = res.data['data'] if res.status else None + total_page_count = res.data['total_pages'] if res.status else None + + return ([InferenceLogObject(**log) for log in log_list] if log_list else None, total_page_count) def create_inference_log(self, **kwargs): From 0431bfa4af5348500e31aefb1bfdbca9d67031dd Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Mon, 16 Oct 2023 22:10:01 +0530 Subject: [PATCH 087/164] minor merge fixes --- .../components/frame_styling_page.py | 143 ++++++------------ ui_components/methods/common_methods.py | 28 ++++ ui_components/methods/ml_methods.py | 22 ++- .../widgets/style_explorer_element.py | 95 +++++------- 4 files changed, 135 insertions(+), 153 deletions(-) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 96adcd7b..22651957 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -1,37 +1,34 @@ + import streamlit as st from shared.constants import ViewType - -from ui_components.methods.common_methods import add_key_frame,compare_to_previous_and_next_frame,compare_to_source_frame,style_cloning_element from ui_components.methods.ml_methods import trigger_restyling_process from ui_components.widgets.cropping_element import cropping_selector_element -from ui_components.widgets.frame_clip_generation_elements import current_preview_video_element, update_animation_style_element +from ui_components.widgets.frame_clip_generation_elements import current_preview_video_element from ui_components.widgets.frame_selector import frame_selector_widget +from ui_components.widgets.frame_style_clone_element import style_cloning_element from ui_components.widgets.image_carousal import display_image from ui_components.widgets.prompt_finder import prompt_finder_element -from ui_components.widgets.add_key_frame_element import add_key_frame_element +from ui_components.widgets.add_key_frame_element import add_key_frame, add_key_frame_element from ui_components.widgets.styling_element import styling_element from ui_components.widgets.timeline_view import timeline_view -from ui_components.widgets.variant_comparison_element import variant_comparison_element +from ui_components.widgets.variant_comparison_element import compare_to_previous_and_next_frame, compare_to_source_frame, variant_comparison_element from ui_components.widgets.animation_style_element import animation_style_element from ui_components.widgets.inpainting_element import inpainting_element from ui_components.widgets.drawing_element import drawing_element -from ui_components.widgets.list_view import list_view_set_up, page_toggle, styling_list_view,motion_list_view +from ui_components.widgets.sidebar_logger import sidebar_logger +from ui_components.widgets.style_explorer_element import style_explorer_element +from ui_components.widgets.variant_comparison_grid import variant_comparison_grid from utils import st_memory - -import math from ui_components.constants import CreativeProcessType, WorkflowStageType -from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo def frame_styling_page(mainheader2, project_uuid: str): - data_repo = DataRepo() - + data_repo = DataRepo() timing_details = data_repo.get_timing_list_from_project(project_uuid) - project_settings = data_repo.get_project_setting(project_uuid) if "strength" not in st.session_state: @@ -49,20 +46,16 @@ def frame_styling_page(mainheader2, project_uuid: str): if "current_frame_uuid" not in st.session_state: timing = data_repo.get_timing_list_from_project(project_uuid)[0] st.session_state['current_frame_uuid'] = timing.uuid - - + st.session_state['current_frame_index'] = timing.aux_frame_index + 1 if 'frame_styling_view_type' not in st.session_state: - st.session_state['frame_styling_view_type'] = "Individual View" + st.session_state['frame_styling_view_type'] = "Individual" st.session_state['frame_styling_view_type_index'] = 0 - if st.session_state['change_view_type'] == True: st.session_state['change_view_type'] = False - # round down st.session_state['which_image']to nearest 10 - - if st.session_state['frame_styling_view_type'] == "List View": + if st.session_state['frame_styling_view_type'] == "Timeline" or st.session_state['frame_styling_view_type'] == "Explorer": st.markdown( f"#### :red[{st.session_state['main_view_type']}] > **:green[{st.session_state['frame_styling_view_type']}]** > :orange[{st.session_state['page']}]") else: @@ -71,14 +64,15 @@ def frame_styling_page(mainheader2, project_uuid: str): project_settings = data_repo.get_project_setting(project_uuid) - if st.session_state['frame_styling_view_type'] == "Individual View": + if st.session_state['frame_styling_view_type'] == "Explorer": + style_explorer_element(project_uuid) + + elif st.session_state['frame_styling_view_type'] == "Individual": with st.sidebar: frame_selector_widget() if st.session_state['page'] == CreativeProcessType.MOTION.value: - idx = st.session_state['current_frame_index'] - 1 - st.session_state['show_comparison'] = st_memory.radio("Show:", options=["Other Variants", "Preview Video in Context"], horizontal=True, key="show_comparison_radio_motion") if st.session_state['show_comparison'] == "Other Variants": @@ -88,40 +82,44 @@ def frame_styling_page(mainheader2, project_uuid: str): current_preview_video_element(st.session_state['current_frame_uuid']) st.markdown("***") - with st.expander("🎬 Choose Animation Style & Create Variants", expanded=True): - animation_style_element(st.session_state['current_frame_uuid'], project_uuid) - elif st.session_state['page'] == CreativeProcessType.STYLING.value: # carousal_of_images_element(project_uuid, stage=WorkflowStageType.STYLED.value) comparison_values = [ - "Other Variants", "Source Frame", "Previous & Next Frame", "None"] - + "Single Variants", + "All Other Variants", + "Source Frame", + "Previous & Next Frame", + "None" + ] st.session_state['show_comparison'] = st_memory.radio("Show comparison to:", options=comparison_values, horizontal=True, key="show_comparison_radio") - - - if st.session_state['show_comparison'] == "Other Variants": + if st.session_state['show_comparison'] == "Single Variants": variant_comparison_element(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) + + elif st.session_state['show_comparison'] == "All Other Variants": + variant_comparison_grid(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) elif st.session_state['show_comparison'] == "Source Frame": compare_to_source_frame(timing_details) elif st.session_state['show_comparison'] == "Previous & Next Frame": - compare_to_previous_and_next_frame(project_uuid,timing_details) elif st.session_state['show_comparison'] == "None": - display_image(timing_uuid=st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) st.markdown("***") - - st.session_state['styling_view'] = st_memory.menu('',["Generate Variants", "Crop, Move & Rotate Image", "Inpainting & BG Removal","Draw On Image"], icons=['magic', 'crop', "paint-bucket", 'pencil'], menu_icon="cast", default_index=st.session_state.get('styling_view_index', 0), key="styling_view_selector", orientation="horizontal", styles={"nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "#66A9BE"}}) - - if st.session_state['styling_view'] == "Generate Variants": + st.session_state['styling_view'] = st_memory.menu('',\ + ["Generate Variants", "Crop, Move & Rotate Image", "Inpainting & BG Removal","Draw On Image"], \ + icons=['magic', 'crop', "paint-bucket", 'pencil'], \ + menu_icon="cast", default_index=st.session_state.get('styling_view_index', 0), \ + key="styling_view_selector", orientation="horizontal", \ + styles={"nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "#66A9BE"}}) + + if st.session_state['styling_view'] == "Generate Variants": with st.expander("🛠️ Generate Variants + Prompt Settings", expanded=True): col1, col2 = st.columns([1, 1]) with col1: @@ -130,7 +128,9 @@ def frame_styling_page(mainheader2, project_uuid: str): detail1, detail2 = st.columns([1, 1]) with detail1: st.session_state['individual_number_of_variants'] = st.number_input( - f"How many variants?", min_value=1, max_value=100, key=f"number_of_variants_{st.session_state['current_frame_index']}") + f"How many variants?", min_value=1, max_value=100, \ + key=f"number_of_variants_{st.session_state['current_frame_index']}" + ) with detail2: # TODO: add custom model validation such for sd img2img the value of strength can only be 1 @@ -150,6 +150,7 @@ def frame_styling_page(mainheader2, project_uuid: str): custom_models=st.session_state['custom_models'], adapter_type=st.session_state['adapter_type'], update_inference_settings=True, + add_image_in_params=st.session_state['add_image_in_params'], low_threshold=st.session_state['low_threshold'], high_threshold=st.session_state['high_threshold'], canny_image=st.session_state['canny_image'] if 'canny_image' in st.session_state else None, @@ -160,12 +161,9 @@ def frame_styling_page(mainheader2, project_uuid: str): st.rerun() st.markdown("***") - st.info( - "You can restyle multiple frames at once in the List view.") - + "You can restyle multiple frames at once in the Timeline view.") st.markdown("***") - style_cloning_element(timing_details) with st.expander("🔍 Prompt Finder"): @@ -176,71 +174,30 @@ def frame_styling_page(mainheader2, project_uuid: str): cropping_selector_element(project_uuid) elif st.session_state['styling_view'] == "Inpainting & BG Removal": - with st.expander("🌌 Inpainting, Background Removal & More", expanded=True): - inpainting_element(st.session_state['current_frame_uuid']) elif st.session_state['styling_view'] == "Draw On Image": with st.expander("📝 Draw On Image", expanded=True): - drawing_element(timing_details,project_settings,project_uuid) with st.expander("➕ Add Key Frame", expanded=True): - selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image = add_key_frame_element(timing_details, project_uuid) - if st.button(f"Add key frame",type="primary",use_container_width=True): - add_key_frame(selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image) st.rerun() - elif st.session_state['frame_styling_view_type'] == "List View": - + elif st.session_state['frame_styling_view_type'] == "Timeline": st.markdown("---") - header_col_1, header_col_2, header_col_3, header_col_4, header_col_5 = st.columns([1.25,0.25,4, 1.5, 1.5]) - with header_col_1: - st.session_state['list_view_type'] = st_memory.radio("View type:", options=["Timeline View","Detailed View"], key="list_view_type_slider") - - with header_col_5: - shift_frames_setting = st.toggle("Shift Frames", help="If set to True, it will shift the frames after your adjustment forward by the amount of time you move.") - - if st.session_state['list_view_type'] == "Detailed View": - - with header_col_4: - num_pages, items_per_page = list_view_set_up(timing_details, project_uuid) - start_index, end_index = page_toggle(num_pages, items_per_page,project_uuid, position='top') - - st.markdown("***") - - if st.session_state['page'] == "Styling": - - with st.sidebar: + if st.session_state['page'] == "Key Frames": + with st.sidebar: + with st.expander("🌀 Batch Styling", expanded=False): styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.LIST.value) - - styling_list_view(start_index, end_index, shift_frames_setting, project_uuid) - - st.markdown("***") - - # Update the current page in session state - elif st.session_state['page'] == "Motion": - - motion_list_view(start_index, end_index, shift_frames_setting, project_uuid) - - start_index, end_index = page_toggle(num_pages, items_per_page,project_uuid, position='bottom') - - elif st.session_state['list_view_type'] == "Timeline View": - - with st.sidebar: - styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.LIST.value) - - - if st.session_state['page'] == "Styling": - timeline_view(shift_frames_setting, project_uuid, "Styling", header_col_3, header_col_4) - elif st.session_state['page'] == "Motion": - timeline_view(shift_frames_setting, project_uuid, "Motion", header_col_3, header_col_4) - - - - + timeline_view(project_uuid, "Key Frames") + elif st.session_state['page'] == "Videos": + timeline_view(project_uuid, "Videos") + + with st.sidebar: + with st.expander("🔍 Inference Logging", expanded=True): + sidebar_logger(project_uuid) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index e45f930f..6eeac9bf 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -19,11 +19,13 @@ from ui_components.methods.file_methods import add_temp_file_to_project, convert_bytes_to_file, generate_pil_image, generate_temp_file, save_or_host_file, save_or_host_file_bytes from ui_components.methods.video_methods import calculate_desired_duration_of_individual_clip, update_speed_of_video_clip from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject +from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo from shared.constants import AnimationStyleType from ui_components.models import InternalFileObject from typing import Union +import streamlit as st from utils.media_processor.video import VideoProcessor @@ -351,6 +353,32 @@ def delete_frame(timing_uuid): st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid +def save_uploaded_image(image, project_uuid, frame_uuid, save_type): + data_repo = DataRepo() + + try: + saved_image = save_new_image(image, project_uuid) + + # Update records based on save_type + if save_type == "source": + data_repo.update_specific_timing(frame_uuid, source_image_id=saved_image.uuid) + elif save_type == "styled": + number_of_image_variants = add_image_variant(saved_image.uuid, frame_uuid) + promote_image_variant(frame_uuid, number_of_image_variants - 1) + + return saved_image + except Exception as e: + print(f"Failed to save image file due to: {str(e)}") + return None + +def jump_to_single_frame_view_button(display_number, timing_details): + if st.button(f"Jump to #{display_number}"): + st.session_state['prev_frame_index'] = display_number + st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid + st.session_state['frame_styling_view_type'] = "Individual View" + st.session_state['change_view_type'] = True + st.rerun() + def replace_image_widget(timing_uuid, stage, options=["Other Frame", "Uploaded Frame"]): data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index b2214765..214df22e 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -303,4 +303,24 @@ def dynamic_prompting(prompt, source_image): source_image, "the person is looking") prompt = prompt.replace("[looking]", "looking " + str(prompt_looking)) - return prompt \ No newline at end of file + return prompt + +def query_llama2(user_instructions, system_instructions): + prompt = system_instructions + "\n" + user_instructions + "|" + output = replicate.run( + "meta/llama-2-7b:527827021d8756c7ab79fde0abbfaac885c37a3ed5fe23c7465093f0878d55ef", + input={ + "debug": False, + "top_k": 250, + "top_p": 0.95, + "prompt": prompt, + "temperature": 0.73, + "max_new_tokens": 30, + "min_new_tokens": -1, + "stop_sequences": "\n" + } + ) + result = "" + for item in output: + result += item + return result \ No newline at end of file diff --git a/ui_components/widgets/style_explorer_element.py b/ui_components/widgets/style_explorer_element.py index cb0071bd..e65a12a1 100644 --- a/ui_components/widgets/style_explorer_element.py +++ b/ui_components/widgets/style_explorer_element.py @@ -1,84 +1,36 @@ import streamlit as st +from ui_components.methods.common_methods import promote_image_variant +from ui_components.methods.ml_methods import query_llama2 from utils.data_repo.data_repo import DataRepo from shared.constants import AIModelType import replicate def style_explorer_element(project_uuid): - - def query_llama2(user_instructions, system_instructions): - prompt = system_instructions + "\n" + user_instructions + "|" - output = replicate.run( - "meta/llama-2-7b:527827021d8756c7ab79fde0abbfaac885c37a3ed5fe23c7465093f0878d55ef", - input={ - "debug": False, - "top_k": 250, - "top_p": 0.95, - "prompt": prompt, - "temperature": 0.73, - "max_new_tokens": 30, - "min_new_tokens": -1, - "stop_sequences": "\n" - } - ) - result = "" - for item in output: - result += item - return result - - def create_prompt(styling_instructions="", character_instructions="", action_instructions="", scene_instructions=""): - text_list = [] - - def process_instructions(instructions, system_instructions_template): - if instructions: - result = query_llama2(instructions, system_instructions_template) - text_list.append(result) - - process_instructions(character_instructions, "Input|Character Descriptions:\nSickly old man|Francois Leger,old Russian man, beaten-down look, wearing suit\nPretty young woman|Jules van Cohen,beautiful young woman, floral dress,vibrant\nIrish boy|James McCarthy,10 year old Irish boy,red hair,pink shirt,wheezing in a small voice\nYoung thug|Hughie Banks,23 y/o English football hooligan with skinned head") - process_instructions(styling_instructions, "Input|Style Description:\nmoody and emotion|watercolour style, dark colours and pastel tones.\nchildren's adventure|simple children's book illustration style with light colours\ngritty and realistic|Sin City style,black and white,realistic,strong lines.\nhighly abstract|abstract art style, vibrant colours and thick linework.") - process_instructions(action_instructions, "Input|Action Description:\ngoing on an adventure|exploring old ruins,with a flashlight\nbusy day in the city|walking through downtown at rushour\nfamily time|making dinner with the family\nbeing creepy|hiding in bushes,looking in window\nworking hard|finishing homework,late at night") - process_instructions(scene_instructions, "Input|Scene Description:\nForest|Misty woods with towering trees and glowing plants.\nFuturistic city|Skyscrapers, flying cars, neon lights in a futuristic metropolis.\nMedieval|Castle courtyard with knights, cobblestones, and a fountain.\nBeach|Golden sands, rolling waves, and a vibrant sunset.\nApocalypse|Ruined buildings and desolation in a bleak wasteland.") - - return ", ".join(text_list) - - def create_variate_option(column, key): - label = key.replace('_', ' ').capitalize() - variate_option = column.checkbox(f"Vary {label.lower()}", key=f"{key}_checkbox") - if variate_option: - instructions = column.text_area(f"How would you like to vary the {label.lower()}?", key=f"{key}_textarea", help=f"It'll write a custom {label.lower()} prompt based on your instructions.") - else: - instructions = "" - return instructions - st.markdown("***") data_repo = DataRepo() - a1, a2, a3 = st.columns([0.5, 1, 0.5]) + _, a2, _ = st.columns([0.5, 1, 0.5]) prompt = a2.text_area("What's your base prompt?", key="prompt", help="This will be included at the beginning of each prompt") - b1, b2, b3, b4,b5,b6 = st.columns([0.5, 1, 1, 1, 1, 0.5]) + _, b2, b3, b4, b5, _ = st.columns([0.5, 1, 1, 1, 1, 0.5]) character_instructions = create_variate_option(b2, "character") styling_instructions = create_variate_option(b3, "styling") action_instructions = create_variate_option(b4, "action") scene_instructions = create_variate_option(b5, "scene") - model_name_list = list(set([m.name for m in data_repo.get_all_ai_model_list(model_type_list=[AIModelType.TXT2IMG.value], custom_trained=False)])) - c1, c2, c3 = st.columns([0.25, 1, 0.25]) - + _, c2, _ = st.columns([0.25, 1, 0.25]) with c2: models_to_use = st.multiselect("Which models would you like to use?", model_name_list, key="models_to_use", default=model_name_list, help="It'll rotate through the models you select.") - d1, d2, d3 = st.columns([0.75, 1, 0.75]) - + _, d2, _ = st.columns([0.75, 1, 0.75]) with d2: number_to_generate = st.slider("How many images would you like to generate?", min_value=1, max_value=100, value=10, step=1, key="number_to_generate", help="It'll generate 4 from each variation.") - e1, e2, e3 = st.columns([0.5, 1, 0.5]) - + _, e2, _ = st.columns([0.5, 1, 0.5]) if e2.button("Generate images", key="generate_images", use_container_width=True, type="primary"): - counter = 0 varied_text = "" num_models = len(models_to_use) @@ -92,12 +44,10 @@ def create_variate_option(column, key): st.write(f"Model: {model_name}") counter += 1 - timing = data_repo.get_timing_from_uuid("c414f700-680b-4712-a9c5-22c9935d7855") - + timing = data_repo.get_timing_from_uuid(st.session_state['current_frame_uuid']) variants = timing.alternative_images_list st.markdown("***") - num_columns = st.slider('Number of columns', min_value=1, max_value=10, value=4) num_items_per_page = 30 @@ -122,4 +72,31 @@ def create_variate_option(column, key): if st.button(f"Add to timeline", key=f"Promote Variant #{i + j + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): promote_image_variant(timing.uuid, i + j) st.rerun() - st.markdown("***") \ No newline at end of file + st.markdown("***") + + +def create_variate_option(column, key): + label = key.replace('_', ' ').capitalize() + variate_option = column.checkbox(f"Vary {label.lower()}", key=f"{key}_checkbox") + if variate_option: + instructions = column.text_area(f"How would you like to vary the {label.lower()}?", key=f"{key}_textarea", help=f"It'll write a custom {label.lower()} prompt based on your instructions.") + else: + instructions = "" + return instructions + +def create_prompt(**kwargs): + text_list = [] + + system_instruction_template_list = { + "character_instructions": "Input|Character Descriptions:\nSickly old man|Francois Leger,old Russian man, beaten-down look, wearing suit\nPretty young woman|Jules van Cohen,beautiful young woman, floral dress,vibrant\nIrish boy|James McCarthy,10 year old Irish boy,red hair,pink shirt,wheezing in a small voice\nYoung thug|Hughie Banks,23 y/o English football hooligan with skinned head", + "styling_instructions": "Input|Style Description:\nmoody and emotion|watercolour style, dark colours and pastel tones.\nchildren's adventure|simple children's book illustration style with light colours\ngritty and realistic|Sin City style,black and white,realistic,strong lines.\nhighly abstract|abstract art style, vibrant colours and thick linework.", + "action_instructions": "Input|Action Description:\ngoing on an adventure|exploring old ruins,with a flashlight\nbusy day in the city|walking through downtown at rushour\nfamily time|making dinner with the family\nbeing creepy|hiding in bushes,looking in window\nworking hard|finishing homework,late at night", + "scene_instructions": "Input|Scene Description:\nForest|Misty woods with towering trees and glowing plants.\nFuturistic city|Skyscrapers, flying cars, neon lights in a futuristic metropolis.\nMedieval|Castle courtyard with knights, cobblestones, and a fountain.\nBeach|Golden sands, rolling waves, and a vibrant sunset.\nApocalypse|Ruined buildings and desolation in a bleak wasteland.", + } + + for instruction_type, user_instruction in kwargs: + if instruction_type in system_instruction_template_list and user_instruction: + result = query_llama2(user_instruction, system_instruction_template_list[instruction_type]) + text_list.append(result) + + return ", ".join(text_list) \ No newline at end of file From 34cb35a649ee16a8d294d683b5b3b1fdc1df87a0 Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Tue, 17 Oct 2023 13:03:43 +0530 Subject: [PATCH 088/164] wip: explorer view changes --- backend/db_repo.py | 46 +++++- banodoco_runner.py | 34 ++++- shared/constants.py | 7 + .../components/video_rendering_page.py | 6 +- ui_components/constants.py | 1 + ui_components/methods/common_methods.py | 59 +++++++- ui_components/setup.py | 18 +-- .../widgets/style_explorer_element.py | 133 +++++++++++++----- utils/data_repo/data_repo.py | 14 +- 9 files changed, 246 insertions(+), 72 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index b557ddf9..0728027b 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -14,7 +14,7 @@ import subprocess from typing import List import uuid -from shared.constants import Colors, InternalFileType +from shared.constants import Colors, InternalFileType, SortOrder from backend.serializers.dto import AIModelDto, AppSettingDto, BackupDto, BackupListDto, InferenceLogDto, InternalFileDto, ProjectDto, SettingDto, TimingDto, UserDto from shared.constants import AUTOMATIC_FILE_HOSTING, LOCAL_DATABASE_NAME, SERVER, ServerType @@ -180,7 +180,8 @@ def get_file_from_uuid(self, uuid): return InternalResponse(payload, 'file found', True) - # TODO: create a dao for this + # TODO: right now if page is passed then paginated result will be provided + # or else entire list will be fetched. will standardise this later def get_all_file_list(self, **kwargs): kwargs['is_disabled'] = False @@ -191,11 +192,42 @@ def get_all_file_list(self, **kwargs): kwargs['project_id'] = project.id - file_list = InternalFileObject.objects.filter(**kwargs).all() - - payload = { - 'data': InternalFileDto(file_list, many=True).data - } + if 'page' in kwargs and kwargs['page']: + page = kwargs['page'] + del kwargs['page'] + data_per_page = kwargs['data_per_page'] + del kwargs['data_per_page'] + sort_order = kwargs['sort_order'] if 'sort_order' in kwargs else None + del kwargs['sort_order'] + + file_list = InternalFileObject.objects.filter(**kwargs).all() + if sort_order: + if sort_order == SortOrder.DESCENDING.value: + file_list = file_list.order_by('-created_on') + + paginator = Paginator(file_list, data_per_page) + if page > paginator.num_pages or page < 1: + return InternalResponse({}, "invalid page number", False) + + payload = { + "data_per_page": data_per_page, + "page": page, + "total_pages": paginator.num_pages, + "count": paginator.count, + "data": InferenceLogDto( + paginator.page(page), many=True + ).data, + } + else: + file_list = InternalFileObject.objects.filter(**kwargs).all() + + if 'sort_order' in kwargs: + if kwargs['sort_order'] == SortOrder.DESCENDING.value: + file_list = file_list.order_by('-created_on') + + payload = { + 'data': InternalFileDto(file_list, many=True).data + } return InternalResponse(payload, 'file found', True) diff --git a/banodoco_runner.py b/banodoco_runner.py index dd462feb..48e61279 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -5,7 +5,7 @@ import setproctitle from dotenv import load_dotenv import django -from shared.constants import InferenceParamType, InferenceStatus, ProjectMetaData +from shared.constants import InferenceParamType, InferenceStatus, InferenceType, ProjectMetaData from shared.logging.constants import LoggingType from shared.logging.logging import AppLogger from utils.common_utils import acquire_lock, release_lock @@ -68,6 +68,7 @@ def check_and_update_db(): is_disabled=False).all() timing_update_list = {} # {project_id: [timing_uuids]} + gallery_update_list = {} # {project_id: True/False} for log in log_list: input_params = json.loads(log.input_params) replicate_data = input_params.get(InferenceParamType.REPLICATE_INFERENCE.value, None) @@ -99,10 +100,20 @@ def check_and_update_db(): origin_data['log_uuid'] = log.uuid print("processing inference output") process_inference_output(**origin_data) - if str(log.project.uuid) not in timing_update_list: - timing_update_list[str(log.project.uuid)] = [] - - timing_update_list[str(log.project.uuid)].append(origin_data['timing_uuid']) + + if origin_data['inference_type'] in [InferenceType.FRAME_INTERPOLATION.value, \ + InferenceType.FRAME_TIMING_IMAGE_INFERENCE.value, \ + InferenceType.SINGLE_PREVIEW_VIDEO.value]: + if str(log.project.uuid) not in timing_update_list: + timing_update_list[str(log.project.uuid)] = [] + + timing_update_list[str(log.project.uuid)].append(origin_data['timing_uuid']) + + elif origin_data['inference_type'] == InferenceType.GALLERY_IMAGE_GENERATION.value: + if str(log.project.uuid) not in gallery_update_list: + gallery_update_list[str(log.project.uuid)] = False + + gallery_update_list[str(log.project.uuid)] = True else: app_logger.log(LoggingType.DEBUG, f"Error: {response.content}") @@ -113,11 +124,22 @@ def check_and_update_db(): # adding update_data in the project from backend.models import Project + final_res = {} for project_uuid, val in timing_update_list.items(): + final_res[project_uuid] = {ProjectMetaData.DATA_UPDATE.value: list(set(val))} + + for project_uuid, val in gallery_update_list.items(): + if project_uuid in final_res: + final_res[project_uuid][ProjectMetaData.GALLERY_UPDATE.value] = val + else: + final_res[project_uuid] = {ProjectMetaData.GALLERY_UPDATE.value: val} + + + for project_uuid, val in final_res.items(): key = str(project_uuid) if acquire_lock(key): val = list(set(val)) - _ = Project.objects.filter(uuid=project_uuid).update(meta_data=json.dumps({ProjectMetaData.DATA_UPDATE.value: val})) + _ = Project.objects.filter(uuid=project_uuid).update(meta_data=json.dumps(val)) release_lock(key) if not len(log_list): diff --git a/shared/constants.py b/shared/constants.py index 4adae1e1..9b898311 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -59,6 +59,7 @@ class InternalFileTag(ExtendedEnum): COMPLETE_GENERATED_VIDEO = 'complete_generated_video' INPUT_VIDEO = 'input_video' TEMP_IMAGE = 'temp' + GALLERY_IMAGE = 'gallery_image' class AnimationStyleType(ExtendedEnum): INTERPOLATION = "Interpolate to next" @@ -78,6 +79,7 @@ class InferenceType(ExtendedEnum): FRAME_TIMING_VIDEO_INFERENCE = "frame_timing_video_inference" # for generating variants of a video SINGLE_PREVIEW_VIDEO = "single_preview_video" # for generating a single preview video FRAME_INTERPOLATION = "frame_interpolation" # for generating single/multiple interpolated videos + GALLERY_IMAGE_GENERATION = "gallery_image_generation" # for generating gallery images class InferenceStatus(ExtendedEnum): QUEUED = "queued" @@ -93,6 +95,11 @@ class InferenceParamType(ExtendedEnum): class ProjectMetaData(ExtendedEnum): DATA_UPDATE = "data_update" # info regarding cache/data update when runner updates the db + GALLERY_UPDATE = "gallery_update" + +class SortOrder(ExtendedEnum): + ASCENDING = "asc" + DESCENDING = "desc" ##################### global constants ##################### SERVER = os.getenv('SERVER', ServerType.PRODUCTION.value) diff --git a/ui_components/components/video_rendering_page.py b/ui_components/components/video_rendering_page.py index d6a23147..04242564 100644 --- a/ui_components/components/video_rendering_page.py +++ b/ui_components/components/video_rendering_page.py @@ -55,7 +55,11 @@ def video_rendering_page(mainheader2, project_uuid): st.markdown("***") # TODO: only show completed videos - video_list: List[InternalFileObject] = data_repo.get_all_file_list(InternalFileType.VIDEO.value, tag=InternalFileTag.COMPLETE_GENERATED_VIDEO.value, project_id=project_uuid) + video_list, _ = data_repo.get_all_file_list( + file_type=InternalFileType.VIDEO.value, + tag=InternalFileTag.COMPLETE_GENERATED_VIDEO.value, + project_id=project_uuid + ) video_list = sorted(video_list, key=lambda x: x.created_on, reverse=True) for video in video_list: diff --git a/ui_components/constants.py b/ui_components/constants.py index c8902936..c13192e3 100644 --- a/ui_components/constants.py +++ b/ui_components/constants.py @@ -51,6 +51,7 @@ class DefaultProjectSettingParams: batch_animation_style = AnimationStyleType.INTERPOLATION.value batch_model = None total_log_pages = 1 + total_gallery_pages = 1 # TODO: make proper paths for every file CROPPED_IMG_LOCAL_PATH = "videos/temp/cropped.png" diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 6eeac9bf..86234852 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -12,16 +12,18 @@ from io import BytesIO import numpy as np import urllib3 -from shared.constants import SERVER, InferenceType, InternalFileType, ServerType +from shared.constants import SERVER, InferenceType, InternalFileTag, InternalFileType, ProjectMetaData, ServerType from pydub import AudioSegment from backend.models import InternalFileObject from ui_components.constants import SECOND_MASK_FILE, SECOND_MASK_FILE_PATH, WorkflowStageType from ui_components.methods.file_methods import add_temp_file_to_project, convert_bytes_to_file, generate_pil_image, generate_temp_file, save_or_host_file, save_or_host_file_bytes from ui_components.methods.video_methods import calculate_desired_duration_of_individual_clip, update_speed_of_video_clip from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject +from utils.common_utils import acquire_lock, release_lock from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo from shared.constants import AnimationStyleType +from utils.cache.cache import StCache from ui_components.models import InternalFileObject from typing import Union @@ -1075,4 +1077,57 @@ def process_inference_output(**kwargs): del kwargs['log_uuid'] data_repo.update_inference_log_origin_data(log_uuid, **kwargs) - return True \ No newline at end of file + # --------------------- GALLERY IMAGE GENERATION ------------------------ + elif inference_type == InferenceType.GALLERY_IMAGE_GENERATION.value: + output = kwargs.get('output') + + if output: + if isinstance(output, str) and output.startswith("http"): + temp_output_file = generate_temp_file(output, '.mp4') + output = None + with open(temp_output_file.name, 'rb') as f: + output = f.read() + + os.remove(temp_output_file.name) + + log_uuid = kwargs.get('log_uuid') + project_uuid = kwargs.get('project_uuid') + log = data_repo.get_inference_log_from_uuid(log_uuid) + output_file = data_repo.create_file( + name=filename, + type=InternalFileType.IMAGE.value, + hosted_url=output[0], + inference_log_id=log.uuid, + project_id=project_uuid, + tag=InternalFileTag.GALLERY_IMAGE.value + ) + else: + del kwargs['log_uuid'] + data_repo.update_inference_log_origin_data(log_uuid, **kwargs) + + return True + + +def check_project_meta_data(project_uuid): + # checking for project metadata (like cache updates) + # project_update_data is of the format {"data_update": [timing_uuid], "gallery_update": True/False} + data_repo = DataRepo() + + key = project_uuid + if acquire_lock(key): + project = data_repo.get_project_from_uuid(project_uuid) + timing_update_data = json.loads(project.meta_data).\ + get(ProjectMetaData.DATA_UPDATE.value, None) if project.meta_data else None + if timing_update_data: + for timing_uuid in timing_update_data: + _ = data_repo.get_timing_from_uuid(timing_uuid, invalidate_cache=True) + + # removing the metadata after processing + data_repo.update_project(uuid=project.uuid, meta_data=json.dumps({ProjectMetaData.DATA_UPDATE.value: []})) + + gallery_update_data = json.loads(project.meta_data).\ + get(ProjectMetaData.GALLERY_UPDATE.value, False) if project.meta_data else False + if gallery_update_data: + pass + + release_lock(key) \ No newline at end of file diff --git a/ui_components/setup.py b/ui_components/setup.py index c830c31a..5b7b250c 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -15,6 +15,7 @@ from ui_components.components.mood_board_page import mood_board_page from streamlit_option_menu import option_menu from ui_components.constants import CreativeProcessType +from ui_components.methods.common_methods import check_project_meta_data from ui_components.models import InternalAppSettingObject from utils.common_utils import acquire_lock, create_working_assets, get_current_user, get_current_user_uuid, release_lock, reset_project_state from utils import st_memory @@ -85,22 +86,7 @@ def setup_app_ui(): reset_project_state() st.session_state["project_uuid"] = project_list[selected_index].uuid - - # checking for project metadata (like cache updates) - # project_update_data is of the format {"data_update": [{"timing_uuid": timing_uuid}]} - key = st.session_state["project_uuid"] - if acquire_lock(key): - project = data_repo.get_project_from_uuid(st.session_state["project_uuid"]) - project_update_data = json.loads(project.meta_data).\ - get(ProjectMetaData.DATA_UPDATE.value, None) if project.meta_data else None - if project_update_data: - for timing_uuid in project_update_data: - _ = data_repo.get_timing_from_uuid(timing_uuid, invalidate_cache=True) - - # removing the metadata after processing - data_repo.update_project(uuid=project.uuid, meta_data=json.dumps({ProjectMetaData.DATA_UPDATE.value: []})) - - release_lock(key) + check_project_meta_data(st.session_state["project_uuid"]) if "current_frame_index" not in st.session_state: st.session_state['current_frame_index'] = 1 diff --git a/ui_components/widgets/style_explorer_element.py b/ui_components/widgets/style_explorer_element.py index e65a12a1..f2124429 100644 --- a/ui_components/widgets/style_explorer_element.py +++ b/ui_components/widgets/style_explorer_element.py @@ -1,14 +1,20 @@ +import json import streamlit as st -from ui_components.methods.common_methods import promote_image_variant +from ui_components.methods.common_methods import process_inference_output, promote_image_variant from ui_components.methods.ml_methods import query_llama2 +from utils.constants import MLQueryObject from utils.data_repo.data_repo import DataRepo -from shared.constants import AIModelType +from shared.constants import AIModelType, InferenceType, InternalFileTag, InternalFileType, SortOrder import replicate +from utils.ml_processor.ml_interface import get_ml_client +from utils.ml_processor.replicate.constants import REPLICATE_MODEL + def style_explorer_element(project_uuid): st.markdown("***") data_repo = DataRepo() + project_settings = data_repo.get_project_setting(project_uuid) _, a2, _ = st.columns([0.5, 1, 0.5]) prompt = a2.text_area("What's your base prompt?", key="prompt", help="This will be included at the beginning of each prompt") @@ -19,7 +25,12 @@ def style_explorer_element(project_uuid): action_instructions = create_variate_option(b4, "action") scene_instructions = create_variate_option(b5, "scene") - model_name_list = list(set([m.name for m in data_repo.get_all_ai_model_list(model_type_list=[AIModelType.TXT2IMG.value], custom_trained=False)])) + model_list = data_repo.get_all_ai_model_list(model_type_list=[AIModelType.TXT2IMG.value], custom_trained=False) + model_dict = {} + for m in model_list: + model_dict[m.name] = m + + model_name_list = list(model_dict.keys()) _, c2, _ = st.columns([0.25, 1, 0.25]) with c2: @@ -31,48 +42,106 @@ def style_explorer_element(project_uuid): _, e2, _ = st.columns([0.5, 1, 0.5]) if e2.button("Generate images", key="generate_images", use_container_width=True, type="primary"): + ml_client = get_ml_client() counter = 0 varied_text = "" num_models = len(models_to_use) num_images_per_model = number_to_generate // num_models + varied_prompt = create_prompt( + styling_instructions=styling_instructions, + character_instructions=character_instructions, + action_instructions=action_instructions, + scene_instructions=scene_instructions + ) for _ in range(num_images_per_model): for model_name in models_to_use: if counter % 4 == 0 and (styling_instructions or character_instructions or action_instructions or scene_instructions): - varied_text = create_prompt(styling_instructions, character_instructions, action_instructions, scene_instructions) + varied_text = varied_prompt prompt_with_variations = f"{prompt}, {varied_text}" if prompt else varied_text st.write(f"Prompt: '{prompt_with_variations}'") - st.write(f"Model: {model_name}") + st.write(f"Model: {model_name}") counter += 1 - - timing = data_repo.get_timing_from_uuid(st.session_state['current_frame_uuid']) - variants = timing.alternative_images_list - - st.markdown("***") - num_columns = st.slider('Number of columns', min_value=1, max_value=10, value=4) + query_obj = MLQueryObject( + timing_uuid=None, + model_uuid=None, + guidance_scale=7.5, + seed=-1, + num_inference_steps=30, + strength=1, + adapter_type=None, + prompt=prompt_with_variations, + negative_prompt="bad image, worst image, bad anatomy, washed out colors", + height=project_settings.height, + width=project_settings.width, + ) + + replicate_model = REPLICATE_MODEL.get_model_by_db_obj(model_dict[model_name]) + output, log = ml_client.predict_model_output_standardized(replicate_model, query_obj, queue_inference=True) + + inference_data = { + "inference_type": InferenceType.GALLERY_IMAGE_GENERATION.value, + "output": output, + "log_uuid": log.uuid, + "project_uuid": project_uuid + } + process_inference_output(**inference_data) + + project_setting = data_repo.get_project_setting(project_uuid) + page_number = st.radio("Select page", options=range(1, project_setting.total_gallery_pages + 1), horizontal=True) num_items_per_page = 30 - num_pages = len(variants) // num_items_per_page - if len(variants) % num_items_per_page > 0: - num_pages += 1 # Add extra page if there are remaining items - - page_number = st.radio("Select page", options=range(1, num_pages + 1)) - - start_index = (page_number - 1) * num_items_per_page - end_index = start_index + num_items_per_page - - for i in range(start_index, min(end_index, len(variants)), num_columns): - cols = st.columns(num_columns) - for j in range(num_columns): - if i + j < len(variants): - with cols[j]: - st.image(variants[i + j].location, use_column_width=True) - with st.expander(f'Variant #{i + j + 1}', False): - st.info("Instructions: PLACEHOLDER") - - if st.button(f"Add to timeline", key=f"Promote Variant #{i + j + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): - promote_image_variant(timing.uuid, i + j) - st.rerun() + + gallery_image_list, res_payload = data_repo.get_all_file_list( + file_type=InternalFileType.IMAGE.value, + tag=InternalFileTag.GALLERY_IMAGE.value, + project_id=project_uuid, + page=page_number, + data_per_page=num_items_per_page, + sort_order=SortOrder.DESCENDING.value # newly created images appear first + ) + + if project_setting.total_gallery_pages != res_payload['total_pages']: + project_setting.total_gallery_pages = res_payload['total_pages'] + st.rerun() + + if gallery_image_list and len(gallery_image_list): st.markdown("***") + num_columns = st.slider('Number of columns', min_value=1, max_value=10, value=4) + + if len(gallery_image_list) % num_items_per_page > 0: + num_pages += 1 # Add extra page if there are remaining items + + + start_index = (page_number - 1) * num_items_per_page + end_index = start_index + num_items_per_page + + for i in range(start_index, min(end_index, len(gallery_image_list)), num_columns): + cols = st.columns(num_columns) + for j in range(num_columns): + if i + j < len(gallery_image_list): + with cols[j]: + st.image(gallery_image_list[i + j].location, use_column_width=True) + with st.expander(f'Variant #{i + j + 1}', False): + if gallery_image_list[i + j].inference_log: + log = data_repo.get_inference_log_from_uuid(gallery_image_list[i + j].inference_log.uuid) + if log: + input_params = json.loads(log.input_params) + prompt = input_params.get('prompt', 'No prompt found') + model = json.loads(log.output_details)['model_name'].split('/')[-1] + st.info( + f"Prompt: '{prompt}' -- Model: {model}" + ) + else: + st.warning("No data found") + else: + st.warning("No data found") + + if st.button(f"Add to timeline", key=f"Promote Variant #{i + j + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): + # TODO: add method to create a new frame with this as the main image + st.rerun() + st.markdown("***") + else: + st.warning("No images present") def create_variate_option(column, key): diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index bc1f33fa..a5df9424 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -79,17 +79,15 @@ def get_file_list_from_log_uuid_list(self, log_uuid_list): file_list = res.data['data'] if res.status else [] return [InternalFileObject(**file) for file in file_list] - def get_all_file_list(self, file_type: InternalFileType, tag = None, project_id = None): - filter_data = {"type": file_type} - if tag: - filter_data['tag'] = tag - if project_id: - filter_data['project_id'] = project_id + # kwargs - file_type: InternalFileType, tag = None, project_id = None, page=None, data_per_page=None, sort_order=None + def get_all_file_list(self, **kwargs): + kwargs["type"] = kwargs['file_type'] + del kwargs['file_type'] - res = self.db_repo.get_all_file_list(**filter_data) + res = self.db_repo.get_all_file_list(**kwargs) file_list = res.data['data'] if res.status else None - return [InternalFileObject(**file) for file in file_list] if file_list else [] + return ([InternalFileObject(**file) for file in file_list] if file_list else [], res.data) def create_or_update_file(self, uuid, type=InternalFileType.IMAGE.value, **kwargs): file = self.db_repo.create_or_update_file(uuid, type, **kwargs).data['data'] From 9829c2f2a2df49e739679720d92393f700b1ff6b Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Tue, 17 Oct 2023 14:38:17 +0530 Subject: [PATCH 089/164] wip: gallery inference fixes --- backend/db_repo.py | 2 +- backend/models.py | 2 +- banodoco_runner.py | 12 +++++++---- ui_components/methods/common_methods.py | 4 +++- ui_components/setup.py | 21 ++----------------- ui_components/widgets/frame_selector.py | 6 ------ ui_components/widgets/sidebar_logger.py | 17 +++++++++++---- .../widgets/style_explorer_element.py | 6 +----- 8 files changed, 29 insertions(+), 41 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index 0728027b..7057950c 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -214,7 +214,7 @@ def get_all_file_list(self, **kwargs): "page": page, "total_pages": paginator.num_pages, "count": paginator.count, - "data": InferenceLogDto( + "data": InternalFileDto( paginator.page(page), many=True ).data, } diff --git a/backend/models.py b/backend/models.py index 2a1c8ac8..a5e5113f 100644 --- a/backend/models.py +++ b/backend/models.py @@ -109,7 +109,7 @@ def save(self, *args, **kwargs): if self.project: video = self.project.uuid - file_location = "videos/" + video + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".png" + file_location = "videos/" + str(video) + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".png" try: urllib.request.urlretrieve(self.hosted_url, file_location) self.local_path = file_location diff --git a/banodoco_runner.py b/banodoco_runner.py index 48e61279..c4739084 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -96,25 +96,29 @@ def check_and_update_db(): if origin_data and log_status == InferenceStatus.COMPLETED.value: from ui_components.methods.common_methods import process_inference_output + # try: origin_data['output'] = output_details['output'] origin_data['log_uuid'] = log.uuid print("processing inference output") process_inference_output(**origin_data) if origin_data['inference_type'] in [InferenceType.FRAME_INTERPOLATION.value, \ - InferenceType.FRAME_TIMING_IMAGE_INFERENCE.value, \ - InferenceType.SINGLE_PREVIEW_VIDEO.value]: + InferenceType.FRAME_TIMING_IMAGE_INFERENCE.value, \ + InferenceType.SINGLE_PREVIEW_VIDEO.value]: if str(log.project.uuid) not in timing_update_list: timing_update_list[str(log.project.uuid)] = [] - timing_update_list[str(log.project.uuid)].append(origin_data['timing_uuid']) elif origin_data['inference_type'] == InferenceType.GALLERY_IMAGE_GENERATION.value: if str(log.project.uuid) not in gallery_update_list: gallery_update_list[str(log.project.uuid)] = False - gallery_update_list[str(log.project.uuid)] = True + # except Exception as e: + # app_logger.log(LoggingType.ERROR, f"Error: {e}") + # output_details['error'] = str(e) + # InferenceLog.objects.filter(id=log.id).update(status=InferenceStatus.FAILED.value, output_details=json.dumps(output_details)) + else: app_logger.log(LoggingType.DEBUG, f"Error: {response.content}") else: diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 86234852..5d45c44c 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -1083,7 +1083,7 @@ def process_inference_output(**kwargs): if output: if isinstance(output, str) and output.startswith("http"): - temp_output_file = generate_temp_file(output, '.mp4') + temp_output_file = generate_temp_file(output, '.png') output = None with open(temp_output_file.name, 'rb') as f: output = f.read() @@ -1093,6 +1093,7 @@ def process_inference_output(**kwargs): log_uuid = kwargs.get('log_uuid') project_uuid = kwargs.get('project_uuid') log = data_repo.get_inference_log_from_uuid(log_uuid) + filename = str(uuid.uuid4()) + ".png" output_file = data_repo.create_file( name=filename, type=InternalFileType.IMAGE.value, @@ -1102,6 +1103,7 @@ def process_inference_output(**kwargs): tag=InternalFileTag.GALLERY_IMAGE.value ) else: + log_uuid = kwargs.get('log_uuid') del kwargs['log_uuid'] data_repo.update_inference_log_origin_data(log_uuid, **kwargs) diff --git a/ui_components/setup.py b/ui_components/setup.py index 5b7b250c..49703869 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -116,17 +116,10 @@ def setup_app_ui(): st.session_state['main_view_type'] = st_memory.menu(None, main_view_types, icons=['search-heart', 'tools', "play-circle", 'stopwatch'], menu_icon="cast", default_index=0, key="main_view_type_name", orientation="horizontal", styles={ "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "red"}}) - mainheader1, mainheader2 = st.columns([3, 2]) - # with mainheader1: - # st.header(st.session_state["page"]) - - - + _, mainheader2 = st.columns([3, 2]) if st.session_state["main_view_type"] == "Creative Process": - with st.sidebar: - view_types = ["Explorer","Timeline","Individual"] if 'frame_styling_view_type_index' not in st.session_state: @@ -143,7 +136,6 @@ def setup_app_ui(): else: st.session_state['frame_styling_view_type_index'] = None - # Option menu st.session_state['frame_styling_view_type'] = option_menu( None, @@ -173,12 +165,7 @@ def setup_app_ui(): st.session_state['page'] = option_menu(None, pages, icons=['palette', 'camera-reels', "hourglass", 'stopwatch'], menu_icon="cast", orientation="horizontal", key="secti2on_selector", styles={ "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "orange"}}, manual_select=st.session_state["manual_select"]) - # TODO: CORRECT-CODE - - - - frame_styling_page( - mainheader2, st.session_state["project_uuid"]) + frame_styling_page(mainheader2, st.session_state["project_uuid"]) elif st.session_state["main_view_type"] == "Tools & Settings": @@ -199,12 +186,8 @@ def setup_app_ui(): project_settings_page(st.session_state["project_uuid"]) elif st.session_state["page"] == "Mood Board": mood_board_page(st.session_state["project_uuid"]) - - - elif st.session_state["main_view_type"] == "Video Rendering": - video_rendering_page( mainheader2, st.session_state["project_uuid"]) diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index 562fd30c..ae5e5fbb 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -27,12 +27,6 @@ def frame_selector_widget(): with time2: single_frame_time_selector(st.session_state['current_frame_uuid'], 'navbar', shift_frames=False) - with st.expander(f"🖼️ Frame #{st.session_state['current_frame_index']} Details"): - a1, a2 = st.columns([1,1]) - with a1: - st.warning(f"Guidance Image:") - display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value, clickable=False) - with st.expander(f"🖼️ Frame #{st.session_state['current_frame_index']} Details"): a1, a2 = st.columns([1,1]) with a1: diff --git a/ui_components/widgets/sidebar_logger.py b/ui_components/widgets/sidebar_logger.py index a5e614b9..877c8d64 100644 --- a/ui_components/widgets/sidebar_logger.py +++ b/ui_components/widgets/sidebar_logger.py @@ -89,10 +89,19 @@ def sidebar_logger(project_uuid): st.warning("Canceled") if output_url: - timing = data_repo.get_timing_from_uuid(origin_data['timing_uuid']) - if timing: - if st.button(f"Jump to frame {timing.aux_frame_index + 1}", key=str(log.uuid)): - update_current_frame_index(timing.aux_frame_index + 1) + if 'timing_uuid' in origin_data: + timing = data_repo.get_timing_from_uuid(origin_data['timing_uuid']) + if timing: + if st.button(f"Jump to frame {timing.aux_frame_index + 1}", key=str(log.uuid)): + update_current_frame_index(timing.aux_frame_index + 1) + else: + if st.button(f"Jump to explorer view", key=str(log.uuid)): + # TODO: fix this + st.session_state['main_view_type'] = "Creative Process" + st.session_state['frame_styling_view_type_index'] = 0 + st.session_state['frame_styling_view_type'] = "Explorer" + st.session_state['change_view_type'] = False + st.rerun() st.markdown("---") \ No newline at end of file diff --git a/ui_components/widgets/style_explorer_element.py b/ui_components/widgets/style_explorer_element.py index f2124429..eb3b23bb 100644 --- a/ui_components/widgets/style_explorer_element.py +++ b/ui_components/widgets/style_explorer_element.py @@ -107,10 +107,6 @@ def style_explorer_element(project_uuid): if gallery_image_list and len(gallery_image_list): st.markdown("***") num_columns = st.slider('Number of columns', min_value=1, max_value=10, value=4) - - if len(gallery_image_list) % num_items_per_page > 0: - num_pages += 1 # Add extra page if there are remaining items - start_index = (page_number - 1) * num_items_per_page end_index = start_index + num_items_per_page @@ -163,7 +159,7 @@ def create_prompt(**kwargs): "scene_instructions": "Input|Scene Description:\nForest|Misty woods with towering trees and glowing plants.\nFuturistic city|Skyscrapers, flying cars, neon lights in a futuristic metropolis.\nMedieval|Castle courtyard with knights, cobblestones, and a fountain.\nBeach|Golden sands, rolling waves, and a vibrant sunset.\nApocalypse|Ruined buildings and desolation in a bleak wasteland.", } - for instruction_type, user_instruction in kwargs: + for instruction_type, user_instruction in kwargs.items(): if instruction_type in system_instruction_template_list and user_instruction: result = query_llama2(user_instruction, system_instruction_template_list[instruction_type]) text_list.append(result) From 52f931fd1d5cb27143bc216dd87fdcc699cc6975 Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Tue, 17 Oct 2023 15:58:33 +0530 Subject: [PATCH 090/164] db lock fixed + runner fixed --- backend/db_repo.py | 5 +- backend/migrations/0011_lock_added.py | 7 ++- backend/models.py | 3 +- banodoco_runner.py | 53 ++++++++++----------- ui_components/components/mood_board_page.py | 1 + utils/common_utils.py | 7 +-- 6 files changed, 41 insertions(+), 35 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index 7057950c..66797e01 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -1,3 +1,4 @@ +import datetime import inspect import json import os @@ -1492,7 +1493,9 @@ def generate_payment_link(self, amount): # lock def acquire_lock(self, key): with transaction.atomic(): - _, created = Lock.objects.get_or_create(row_key=key) + lock, created = Lock.objects.get_or_create(row_key=key) + if lock.created_on + datetime.timedelta(minutes=1) < datetime.datetime.now(): + created = True # after 1 min, we will assume this to be a fresh lock return InternalResponse({'data': True if created else False}, 'success', True) def release_lock(self, key): diff --git a/backend/migrations/0011_lock_added.py b/backend/migrations/0011_lock_added.py index 11d1abb9..ae04b35e 100644 --- a/backend/migrations/0011_lock_added.py +++ b/backend/migrations/0011_lock_added.py @@ -1,6 +1,7 @@ -# Generated by Django 4.2.1 on 2023-10-14 12:35 +# Generated by Django 4.2.1 on 2023-10-17 05:06 from django.db import migrations, models +import uuid class Migration(migrations.Migration): @@ -14,6 +15,10 @@ class Migration(migrations.Migration): name='Lock', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('uuid', models.UUIDField(default=uuid.uuid4)), + ('created_on', models.DateTimeField(auto_now_add=True)), + ('updated_on', models.DateTimeField(auto_now=True)), + ('is_disabled', models.BooleanField(default=False)), ('row_key', models.CharField(max_length=255, unique=True)), ], options={ diff --git a/backend/models.py b/backend/models.py index a5e5113f..e258870a 100644 --- a/backend/models.py +++ b/backend/models.py @@ -1,3 +1,4 @@ +from django.utils import timezone from django.db import models import uuid import json @@ -18,7 +19,7 @@ class Meta: abstract = True -class Lock(models.Model): +class Lock(BaseModel): row_key = models.CharField(max_length=255, unique=True) class Meta: diff --git a/banodoco_runner.py b/banodoco_runner.py index c4739084..fa503a9b 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -96,28 +96,28 @@ def check_and_update_db(): if origin_data and log_status == InferenceStatus.COMPLETED.value: from ui_components.methods.common_methods import process_inference_output - # try: - origin_data['output'] = output_details['output'] - origin_data['log_uuid'] = log.uuid - print("processing inference output") - process_inference_output(**origin_data) - - if origin_data['inference_type'] in [InferenceType.FRAME_INTERPOLATION.value, \ - InferenceType.FRAME_TIMING_IMAGE_INFERENCE.value, \ - InferenceType.SINGLE_PREVIEW_VIDEO.value]: - if str(log.project.uuid) not in timing_update_list: - timing_update_list[str(log.project.uuid)] = [] - timing_update_list[str(log.project.uuid)].append(origin_data['timing_uuid']) - - elif origin_data['inference_type'] == InferenceType.GALLERY_IMAGE_GENERATION.value: - if str(log.project.uuid) not in gallery_update_list: - gallery_update_list[str(log.project.uuid)] = False - gallery_update_list[str(log.project.uuid)] = True - - # except Exception as e: - # app_logger.log(LoggingType.ERROR, f"Error: {e}") - # output_details['error'] = str(e) - # InferenceLog.objects.filter(id=log.id).update(status=InferenceStatus.FAILED.value, output_details=json.dumps(output_details)) + try: + origin_data['output'] = output_details['output'] + origin_data['log_uuid'] = log.uuid + print("processing inference output") + process_inference_output(**origin_data) + + if origin_data['inference_type'] in [InferenceType.FRAME_INTERPOLATION.value, \ + InferenceType.FRAME_TIMING_IMAGE_INFERENCE.value, \ + InferenceType.SINGLE_PREVIEW_VIDEO.value]: + if str(log.project.uuid) not in timing_update_list: + timing_update_list[str(log.project.uuid)] = [] + timing_update_list[str(log.project.uuid)].append(origin_data['timing_uuid']) + + elif origin_data['inference_type'] == InferenceType.GALLERY_IMAGE_GENERATION.value: + if str(log.project.uuid) not in gallery_update_list: + gallery_update_list[str(log.project.uuid)] = False + gallery_update_list[str(log.project.uuid)] = True + + except Exception as e: + app_logger.log(LoggingType.ERROR, f"Error: {e}") + output_details['error'] = str(e) + InferenceLog.objects.filter(id=log.id).update(status=InferenceStatus.FAILED.value, output_details=json.dumps(output_details)) else: app_logger.log(LoggingType.DEBUG, f"Error: {response.content}") @@ -133,16 +133,15 @@ def check_and_update_db(): final_res[project_uuid] = {ProjectMetaData.DATA_UPDATE.value: list(set(val))} for project_uuid, val in gallery_update_list.items(): - if project_uuid in final_res: - final_res[project_uuid][ProjectMetaData.GALLERY_UPDATE.value] = val - else: - final_res[project_uuid] = {ProjectMetaData.GALLERY_UPDATE.value: val} + if project_uuid not in final_res: + final_res[project_uuid] = {} + + final_res[project_uuid].update({f"{ProjectMetaData.GALLERY_UPDATE.value}": val}) for project_uuid, val in final_res.items(): key = str(project_uuid) if acquire_lock(key): - val = list(set(val)) _ = Project.objects.filter(uuid=project_uuid).update(meta_data=json.dumps(val)) release_lock(key) diff --git a/ui_components/components/mood_board_page.py b/ui_components/components/mood_board_page.py index 19905967..2657224b 100644 --- a/ui_components/components/mood_board_page.py +++ b/ui_components/components/mood_board_page.py @@ -1,4 +1,5 @@ import streamlit as st +from ui_components.methods.common_methods import promote_image_variant from utils.data_repo.data_repo import DataRepo from shared.constants import AIModelType diff --git a/utils/common_utils.py b/utils/common_utils.py index 3436d2ee..b42915a3 100644 --- a/utils/common_utils.py +++ b/utils/common_utils.py @@ -189,15 +189,12 @@ def is_process_active(custom_process_name): def acquire_lock(key): data_repo = DataRepo() retries = 0 - while retries < 6: + while retries < 1: lock_status = data_repo.acquire_lock(key) - if lock_status: return lock_status - retries += 1 - time.sleep(0.3) - + time.sleep(0.2) return False def release_lock(key): From 8328046bc89710b52c63b24a4df05449e94a5a18 Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Tue, 17 Oct 2023 16:22:16 +0530 Subject: [PATCH 091/164] varied prompt gen fixed --- backend/db_repo.py | 2 +- ui_components/methods/ml_methods.py | 13 ++++++------- utils/ml_processor/replicate/constants.py | 3 +++ 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index 66797e01..460a71d6 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -31,7 +31,7 @@ logger = AppLogger() -# @measure_execution_time +@measure_execution_time class DBRepo: _instance = None _count = 0 diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index 214df22e..08974d94 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -305,21 +305,20 @@ def dynamic_prompting(prompt, source_image): return prompt -def query_llama2(user_instructions, system_instructions): - prompt = system_instructions + "\n" + user_instructions + "|" - output = replicate.run( - "meta/llama-2-7b:527827021d8756c7ab79fde0abbfaac885c37a3ed5fe23c7465093f0878d55ef", - input={ +def query_llama2(user_instructions, system_instructions): + ml_client = get_ml_client() + input={ "debug": False, "top_k": 250, "top_p": 0.95, - "prompt": prompt, + "prompt": system_instructions + "\n" + user_instructions + "|", "temperature": 0.73, "max_new_tokens": 30, "min_new_tokens": -1, "stop_sequences": "\n" } - ) + + output, log = ml_client.predict_model_output(REPLICATE_MODEL.llama_2_7b, **input) result = "" for item in output: result += item diff --git a/utils/ml_processor/replicate/constants.py b/utils/ml_processor/replicate/constants.py index 8849aa69..7b7dd924 100644 --- a/utils/ml_processor/replicate/constants.py +++ b/utils/ml_processor/replicate/constants.py @@ -47,6 +47,9 @@ class REPLICATE_MODEL: realistic_vision_v5_img2img = ReplicateModel("lucataco/realistic-vision-v5-img2img", "82bbb4595458d6be142450fc6d8c4d79c936b92bd184dd2d6dd71d0796159819") ad_interpolation = ReplicateModel("piyushk52/ad_interpolation", "4a478c659d96673b81992b866f1072fc62f297b7ad9945632cda027a6a07c624") + # addition 17/10/2023 + llama_2_7b = ReplicateModel("meta/llama-2-7b", "527827021d8756c7ab79fde0abbfaac885c37a3ed5fe23c7465093f0878d55ef") + @staticmethod def get_model_by_db_obj(model_db_obj): for model in REPLICATE_MODEL.__dict__.values(): From 4f449082cb80bec45147d65facee1f4ebbefe25e Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Tue, 17 Oct 2023 17:40:08 +0530 Subject: [PATCH 092/164] pagination fix --- backend/db_repo.py | 2 +- ui_components/widgets/sidebar_logger.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index 460a71d6..86dda9e6 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -572,7 +572,7 @@ def get_all_inference_log_list(self, project_id=None, page=1, data_per_page=5, s if status_list: log_list = log_list.filter(status__in=status_list) else: - log_list = log_list.exclude(status="") + log_list = log_list.exclude(status__in=["", None]) paginator = Paginator(log_list, data_per_page) if page > paginator.num_pages or page < 1: diff --git a/ui_components/widgets/sidebar_logger.py b/ui_components/widgets/sidebar_logger.py index 877c8d64..73794424 100644 --- a/ui_components/widgets/sidebar_logger.py +++ b/ui_components/widgets/sidebar_logger.py @@ -50,7 +50,7 @@ def sidebar_logger(project_uuid): for _, log in enumerate(log_list): origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) - if not log.status or not origin_data: + if not log.status: continue output_url = None From 4a3cb51f7b5930615df1f4a3cde44ba345099ffb Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Tue, 17 Oct 2023 20:00:01 +0530 Subject: [PATCH 093/164] wip: add gallery img to timeline --- backend/db_repo.py | 2 +- .../components/frame_styling_page.py | 4 +- ui_components/components/mood_board_page.py | 127 ------------------ ui_components/methods/common_methods.py | 2 +- ui_components/setup.py | 5 +- .../widgets/add_key_frame_element.py | 3 +- .../widgets/style_explorer_element.py | 25 ++-- 7 files changed, 20 insertions(+), 148 deletions(-) delete mode 100644 ui_components/components/mood_board_page.py diff --git a/backend/db_repo.py b/backend/db_repo.py index 86dda9e6..b3ed0c06 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -31,7 +31,7 @@ logger = AppLogger() -@measure_execution_time +# @measure_execution_time class DBRepo: _instance = None _count = 0 diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 22651957..133bac29 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -182,9 +182,9 @@ def frame_styling_page(mainheader2, project_uuid: str): drawing_element(timing_details,project_settings,project_uuid) with st.expander("➕ Add Key Frame", expanded=True): - selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image = add_key_frame_element(timing_details, project_uuid) + selected_image, inherit_styling_settings, how_long_after, _ = add_key_frame_element(timing_details, project_uuid) if st.button(f"Add key frame",type="primary",use_container_width=True): - add_key_frame(selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image) + add_key_frame(selected_image, inherit_styling_settings, how_long_after) st.rerun() elif st.session_state['frame_styling_view_type'] == "Timeline": diff --git a/ui_components/components/mood_board_page.py b/ui_components/components/mood_board_page.py deleted file mode 100644 index 2657224b..00000000 --- a/ui_components/components/mood_board_page.py +++ /dev/null @@ -1,127 +0,0 @@ -import streamlit as st -from ui_components.methods.common_methods import promote_image_variant -from utils.data_repo.data_repo import DataRepo -from shared.constants import AIModelType - - -def mood_board_page(project_uuid): - - def get_varied_text(styling_instructions="", character_instructions="", action_instructions="", scene_instructions=""): - text_list = [] - - if styling_instructions: - system_instructions = "PLACEHOLDER_STYLING" - # result = query_model(styling_instructions, system_instructions) - result = "Styling instructions" - text_list.append(result) - - if character_instructions: - system_instructions = "PLACEHOLDER_CHARACTER" - # result = query_model(character_instructions, system_instructions) - result = "Character instructions" - text_list.append(result) - - if action_instructions: - system_instructions = "PLACEHOLDER_ACTION" - # result = query_model(action_instructions, system_instructions) - result = "Action instructions" - text_list.append(result) - - if scene_instructions: - system_instructions = "PLACEHOLDER_SCENE" - # result = query_model(scene_instructions, system_instructions) - result = "Scene instructions" - text_list.append(result) - - return ", ".join(text_list) - - data_repo = DataRepo() - st.subheader("Mood Board") - a1, a2, a3 = st.columns([0.5, 1, 0.5]) - with a2: - prompt = st.text_area("What's your prompt?", key="prompt") - - - b1, b2, b3, b4 = st.columns([1, 1, 1, 1]) - with b1: - variate_styling = st.checkbox("Variate styling", key="variate_styling") - if variate_styling: - styling_instructions = st.text_area("How would you like to variate styling?", key="variate_styling_textarea") - else: - styling_instructions = "" - - with b2: - variate_character = st.checkbox("Variate character", key="variate_character") - if variate_character: - character_instructions = st.text_area("How would you like to variate character?", key="variate_character_textarea") - else: - character_instructions = "" - - with b3: - variate_action = st.checkbox("Variate action", key="variate_action") - if variate_action: - action_instructions = st.text_area("How would you like to variate action?", key="variate_action_textarea") - else: - action_instructions = "" - - with b4: - variate_scene = st.checkbox("Variate scene", key="variate_scene") - if variate_scene: - scene_instructions = st.text_area("How would you like to variate the scene?", key="variate_scene_textarea") - else: - scene_instructions = "" - - model_list = data_repo.get_all_ai_model_list(model_type_list=[AIModelType.TXT2IMG.value], custom_trained=False) - model_name_list = list(set([m.name for m in model_list])) - - c1, c2, c3 = st.columns([0.25, 1, 0.25]) - with c2: - models_to_use = st.multiselect("Which models would you like to use?", model_name_list, key="models_to_use", default=model_name_list) - - d1, d2, d3 = st.columns([0.5, 1, 0.5]) - with d2: - number_to_generate = st.slider("How many images would you like to generate?", min_value=1, max_value=100, value=10, step=1, key="number_to_generate") - - if st.button("Generate images", key="generate_images", use_container_width=True, type="primary"): - st.info("Generating images...") - counter = 0 - varied_text = "" - for _ in range(number_to_generate): - for model_name in models_to_use: - if counter % 4 == 0 and (styling_instructions or character_instructions or action_instructions or scene_instructions): - varied_text = get_varied_text(styling_instructions, character_instructions, action_instructions, scene_instructions) - prompt_with_variations = f"{prompt}, {varied_text}" if prompt else varied_text - st.write(f"Prompt: '{prompt_with_variations}'") - st.write(f"Model: {model_name}") - counter += 1 - - timing = data_repo.get_timing_from_uuid("c414f700-680b-4712-a9c5-22c9935d7855") - - variants = timing.alternative_images_list - - st.markdown("***") - num_columns = st.slider('Number of columns', min_value=1, max_value=10, value=4) - - num_items_per_page = 30 - num_pages = len(variants) // num_items_per_page - if len(variants) % num_items_per_page > 0: - num_pages += 1 # Add extra page if there are remaining items - - page_number = st.radio("Select page", options=range(1, num_pages + 1)) - - start_index = (page_number - 1) * num_items_per_page - end_index = start_index + num_items_per_page - - for i in range(start_index, min(end_index, len(variants)), num_columns): - cols = st.columns(num_columns) - for j in range(num_columns): - if i + j < len(variants): - with cols[j]: - st.image(variants[i + j].location, use_column_width=True) - with st.expander(f'Variant #{i + j + 1}', False): - st.info("Instructions: PLACEHOLDER") - - if st.button(f"Add to timeline", key=f"Promote Variant #{i + j + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): - promote_image_variant(timing.uuid, i + j) - st.rerun() - st.markdown("***") \ No newline at end of file diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 5d45c44c..f015c161 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -191,7 +191,7 @@ def zoom_image(image, zoom_factor, fill_with=None): return cropped_image # image here is a PIL object -def apply_image_transformations(image, zoom_level, rotation_angle, x_shift, y_shift): +def apply_image_transformations(image: Image, zoom_level, rotation_angle, x_shift, y_shift) -> Image: width, height = image.size # Calculate the diagonal for the rotation diff --git a/ui_components/setup.py b/ui_components/setup.py index 49703869..8b50dee1 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -12,7 +12,6 @@ from ui_components.components.new_project_page import new_project_page from ui_components.components.project_settings_page import project_settings_page from ui_components.components.video_rendering_page import video_rendering_page -from ui_components.components.mood_board_page import mood_board_page from streamlit_option_menu import option_menu from ui_components.constants import CreativeProcessType from ui_components.methods.common_methods import check_project_meta_data @@ -170,7 +169,7 @@ def setup_app_ui(): elif st.session_state["main_view_type"] == "Tools & Settings": with st.sidebar: - tool_pages = ["Query Logger", "Mood Board", "Custom Models", "Project Settings"] + tool_pages = ["Query Logger", "Custom Models", "Project Settings"] if st.session_state["page"] not in tool_pages: st.session_state["page"] = tool_pages[0] @@ -184,8 +183,6 @@ def setup_app_ui(): custom_models_page(st.session_state["project_uuid"]) elif st.session_state["page"] == "Project Settings": project_settings_page(st.session_state["project_uuid"]) - elif st.session_state["page"] == "Mood Board": - mood_board_page(st.session_state["project_uuid"]) elif st.session_state["main_view_type"] == "Video Rendering": video_rendering_page( diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index a2a77079..9095fe1d 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -86,7 +86,6 @@ def add_key_frame_element(timing_details, project_uuid): if apply_zoom_effects == "Yes": image_preview = generate_pil_image(selected_image_location) selected_image = apply_image_transformations(image_preview, st.session_state['zoom_level_input'], st.session_state['rotation_angle_input'], st.session_state['x_shift'], st.session_state['y_shift']) - else: selected_image = generate_pil_image(selected_image_location) st.info("Starting Image:") @@ -96,7 +95,7 @@ def add_key_frame_element(timing_details, project_uuid): return selected_image, inherit_styling_settings, how_long_after, transformation_stage -def add_key_frame(selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image): +def add_key_frame(selected_image, inherit_styling_settings, how_long_after): data_repo = DataRepo() project_uuid = st.session_state['project_uuid'] timing_details = data_repo.get_timing_list_from_project(project_uuid) diff --git a/ui_components/widgets/style_explorer_element.py b/ui_components/widgets/style_explorer_element.py index eb3b23bb..eb59f837 100644 --- a/ui_components/widgets/style_explorer_element.py +++ b/ui_components/widgets/style_explorer_element.py @@ -1,7 +1,9 @@ import json import streamlit as st from ui_components.methods.common_methods import process_inference_output, promote_image_variant +from ui_components.methods.file_methods import generate_pil_image from ui_components.methods.ml_methods import query_llama2 +from ui_components.widgets.add_key_frame_element import add_key_frame from utils.constants import MLQueryObject from utils.data_repo.data_repo import DataRepo from shared.constants import AIModelType, InferenceType, InternalFileTag, InternalFileType, SortOrder @@ -89,7 +91,7 @@ def style_explorer_element(project_uuid): project_setting = data_repo.get_project_setting(project_uuid) page_number = st.radio("Select page", options=range(1, project_setting.total_gallery_pages + 1), horizontal=True) - num_items_per_page = 30 + num_items_per_page = 10 gallery_image_list, res_payload = data_repo.get_all_file_list( file_type=InternalFileType.IMAGE.value, @@ -103,37 +105,38 @@ def style_explorer_element(project_uuid): if project_setting.total_gallery_pages != res_payload['total_pages']: project_setting.total_gallery_pages = res_payload['total_pages'] st.rerun() + + total_image_count = res_payload['count'] if gallery_image_list and len(gallery_image_list): st.markdown("***") num_columns = st.slider('Number of columns', min_value=1, max_value=10, value=4) + start_index = 0 + end_index = min(start_index + num_items_per_page, total_image_count) - start_index = (page_number - 1) * num_items_per_page - end_index = start_index + num_items_per_page - - for i in range(start_index, min(end_index, len(gallery_image_list)), num_columns): + for i in range(start_index, end_index, num_columns): cols = st.columns(num_columns) for j in range(num_columns): if i + j < len(gallery_image_list): with cols[j]: st.image(gallery_image_list[i + j].location, use_column_width=True) - with st.expander(f'Variant #{i + j + 1}', False): + with st.expander(f'Variant #{(page_number - 1) * num_items_per_page + i + j + 1}', False): if gallery_image_list[i + j].inference_log: log = data_repo.get_inference_log_from_uuid(gallery_image_list[i + j].inference_log.uuid) if log: input_params = json.loads(log.input_params) prompt = input_params.get('prompt', 'No prompt found') model = json.loads(log.output_details)['model_name'].split('/')[-1] - st.info( - f"Prompt: '{prompt}' -- Model: {model}" - ) + st.info(f"Prompt: {prompt}") + st.info(f"Model: {model}") else: st.warning("No data found") else: st.warning("No data found") - if st.button(f"Add to timeline", key=f"Promote Variant #{i + j + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): - # TODO: add method to create a new frame with this as the main image + if st.button(f"Add to timeline", key=f"Promote Variant #{(page_number - 1) * num_items_per_page + i + j + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): + pil_image = generate_pil_image(gallery_image_list[i + j].location) + add_key_frame(pil_image, False, 2.5) st.rerun() st.markdown("***") else: From 05a4c56b292d619bf3ed34798c2e4d2146504d3a Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Tue, 17 Oct 2023 21:38:55 +0530 Subject: [PATCH 094/164] img addition fixed + cache update fixed --- ui_components/methods/common_methods.py | 162 +----------------- ui_components/methods/video_methods.py | 9 +- .../widgets/add_key_frame_element.py | 25 ++- .../widgets/frame_movement_widgets.py | 16 +- ui_components/widgets/timeline_view.py | 11 +- utils/cache/cache.py | 18 +- utils/cache/cache_methods.py | 7 +- 7 files changed, 51 insertions(+), 197 deletions(-) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index f015c161..67f44db8 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -20,14 +20,11 @@ from ui_components.methods.video_methods import calculate_desired_duration_of_individual_clip, update_speed_of_video_clip from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject from utils.common_utils import acquire_lock, release_lock -from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo from shared.constants import AnimationStyleType -from utils.cache.cache import StCache from ui_components.models import InternalFileObject from typing import Union -import streamlit as st from utils.media_processor.video import VideoProcessor @@ -264,97 +261,6 @@ def update_timings_in_order(project_uuid): data_repo.update_specific_timing(timing.uuid, frame_time=float(i)) -def change_position_input(timing_uuid, src): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) - - min_value = 1 - max_value = len(timing_list) - - new_position = st.number_input("Move to new position:", min_value=min_value, max_value=max_value, - value=timing.aux_frame_index + 1, step=1, key=f"new_position_{timing.uuid}_{src}") - - if st.button('Update Position',key=f"change_frame_position_{timing.uuid}_{src}"): - data_repo.update_specific_timing(timing.uuid, aux_frame_index=new_position - 1) - st.rerun() - - -def move_frame(direction, timing_uuid): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - if direction == "Up": - if timing.aux_frame_index == 0: - st.error("This is the first frame") - time.sleep(1) - return - - data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index - 1) - elif direction == "Down": - timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) - if timing.aux_frame_index == len(timing_list) - 1: - st.error("This is the last frame") - time.sleep(1) - return - - data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index + 1) - -def move_frame_back_button(timing_uuid, orientation): - direction = "Up" - if orientation == "side-to-side": - arrow = "⬅️" - else: # up-down - arrow = "⬆️" - if st.button(arrow, key=f"move_frame_back_{timing_uuid}", help="Move frame back"): - move_frame(direction, timing_uuid) - st.rerun() - - -def move_frame_forward_button(timing_uuid, orientation): - direction = "Down" - if orientation == "side-to-side": - arrow = "➡️" - else: # up-down - arrow = "⬇️" - - if st.button(arrow, key=f"move_frame_forward_{timing_uuid}", help="Move frame forward"): - move_frame(direction, timing_uuid) - st.rerun() - - -def delete_frame_button(timing_uuid, show_label=False): - if show_label: - label = "Delete Frame 🗑️" - else: - label = "🗑️" - - if st.button(label, key=f"delete_frame_{timing_uuid}", help="Delete frame"): - delete_frame(timing_uuid) - st.rerun() - -def delete_frame(timing_uuid): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - next_timing = data_repo.get_next_timing(timing_uuid) - timing_details = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) - - if next_timing: - data_repo.update_specific_timing( - next_timing.uuid, - interpolated_clip_list=None, - preview_video_id=None, - timed_clip_id=None - ) - - data_repo.delete_timing_from_uuid(timing.uuid) - - if timing.aux_frame_index == len(timing_details) - 1: - st.session_state['current_frame_index'] = max(1, st.session_state['current_frame_index'] - 1) - st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid - def save_uploaded_image(image, project_uuid, frame_uuid, save_type): data_repo = DataRepo() @@ -372,72 +278,6 @@ def save_uploaded_image(image, project_uuid, frame_uuid, save_type): except Exception as e: print(f"Failed to save image file due to: {str(e)}") return None - -def jump_to_single_frame_view_button(display_number, timing_details): - if st.button(f"Jump to #{display_number}"): - st.session_state['prev_frame_index'] = display_number - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid - st.session_state['frame_styling_view_type'] = "Individual View" - st.session_state['change_view_type'] = True - st.rerun() - -def replace_image_widget(timing_uuid, stage, options=["Other Frame", "Uploaded Frame"]): - data_repo = DataRepo() - timing = data_repo.get_timing_from_uuid(timing_uuid) - timing_details = data_repo.get_timing_list_from_project(timing.project.uuid) - - replace_with = options[0] if len(options) == 1 else st.radio("Replace with:", options, horizontal=True, key=f"replace_with_what_{stage}_{timing_uuid}") - - if replace_with == "Other Frame": - which_stage_to_use_for_replacement = st.radio("Select stage to use:", [ - ImageStage.MAIN_VARIANT.value, ImageStage.SOURCE_IMAGE.value], key=f"which_stage_to_use_for_replacement_{stage}_{timing_uuid}", horizontal=True) - which_image_to_use_for_replacement = st.number_input("Select image to use:", min_value=0, max_value=len( - timing_details)-1, value=0, key=f"which_image_to_use_for_replacement_{stage}") - - if which_stage_to_use_for_replacement == ImageStage.SOURCE_IMAGE.value: - selected_image = timing_details[which_image_to_use_for_replacement].source_image - elif which_stage_to_use_for_replacement == ImageStage.MAIN_VARIANT.value: - selected_image = timing_details[which_image_to_use_for_replacement].primary_image - - st.image(selected_image.local_path, use_column_width=True) - - if st.button("Replace with selected frame", disabled=False,key=f"replace_with_selected_frame_{stage}_{timing_uuid}"): - if stage == "source": - data_repo.update_specific_timing(timing.uuid, source_image_id=selected_image.uuid) - st.success("Replaced") - time.sleep(1) - st.rerun() - else: - number_of_image_variants = add_image_variant( - selected_image.uuid, timing.uuid) - promote_image_variant( - timing.uuid, number_of_image_variants - 1) - st.success("Replaced") - time.sleep(1) - st.rerun() - - elif replace_with == "Uploaded Frame": - if stage == "source": - uploaded_file = st.file_uploader("Upload Source Image", type=[ - "png", "jpeg"], accept_multiple_files=False) - if uploaded_file != None: - if st.button("Upload Source Image"): - if uploaded_file: - timing = data_repo.get_timing_from_uuid(timing.uuid) - if save_and_promote_image(uploaded_file, timing.project.uuid, timing.uuid, "source"): - time.sleep(1.5) - st.rerun() - else: - replacement_frame = st.file_uploader("Upload Styled Image", type=[ - "png", "jpeg"], accept_multiple_files=False, key=f"replacement_frame_upload_{stage}_{timing_uuid}") - if replacement_frame != None: - if st.button("Replace frame", disabled=False): - timing = data_repo.get_timing_from_uuid(timing.uuid) - if replacement_frame: - save_and_promote_image(replacement_frame, timing.project.uuid, timing.uuid, "styled") - st.success("Replaced") - time.sleep(1) - st.rerun() def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): data_repo = DataRepo() @@ -472,6 +312,8 @@ def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): if frame_idx < len(timing_details): data_repo.update_specific_timing(timing.uuid, timed_clip_id=None) + _ = data_repo.get_timing_list_from_project(timing.project.uuid) + # updates the clip duration of the variant_to_promote and sets it as the timed_clip def promote_video_variant(timing_uuid, variant_to_promote_frame_number: str): data_repo = DataRepo() diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index 8cc74c02..18bfcb16 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -299,10 +299,8 @@ def update_speed_of_video_clip(video_file: InternalFileObject, timing_uuid) -> I def calculate_desired_duration_of_individual_clip(timing_uuid): data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - timing_details = data_repo.get_timing_list_from_project( - timing.project.uuid) + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + timing_details = data_repo.get_timing_list_from_project(timing.project.uuid) length_of_list = len(timing_details) # last frame @@ -312,8 +310,7 @@ def calculate_desired_duration_of_individual_clip(timing_uuid): else: time_of_frame = timing.frame_time time_of_next_frame = data_repo.get_next_timing(timing_uuid).frame_time - total_duration_of_frame = float( - time_of_next_frame) - float(time_of_frame) + total_duration_of_frame = float(time_of_next_frame) - float(time_of_frame) return total_duration_of_frame diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index 9095fe1d..4cc8460f 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -1,5 +1,5 @@ import streamlit as st -from ui_components.constants import CreativeProcessType +from ui_components.constants import CreativeProcessType, WorkflowStageType from ui_components.methods.video_methods import calculate_desired_duration_of_individual_clip from ui_components.widgets.image_zoom_widgets import zoom_inputs @@ -9,7 +9,7 @@ from utils.constants import ImageStage from ui_components.methods.file_methods import generate_pil_image,save_or_host_file -from ui_components.methods.common_methods import apply_image_transformations, clone_styling_settings, create_timings_row_at_frame_number, save_uploaded_image +from ui_components.methods.common_methods import apply_image_transformations, clone_styling_settings, create_timings_row_at_frame_number, save_uploaded_image, update_clip_duration_of_all_timing_frames from PIL import Image @@ -95,17 +95,17 @@ def add_key_frame_element(timing_details, project_uuid): return selected_image, inherit_styling_settings, how_long_after, transformation_stage -def add_key_frame(selected_image, inherit_styling_settings, how_long_after): +def add_key_frame(selected_image, inherit_styling_settings, how_long_after, cur_frame_index=None): data_repo = DataRepo() project_uuid = st.session_state['project_uuid'] timing_details = data_repo.get_timing_list_from_project(project_uuid) project_settings = data_repo.get_project_setting(project_uuid) - if len(timing_details) == 0: index_of_current_item = 1 else: - index_of_current_item = min(len(timing_details), st.session_state['current_frame_index']) + cur_frame_index = st.session_state['current_frame_index'] if cur_frame_index is None else cur_frame_index + index_of_current_item = min(len(timing_details), cur_frame_index) timing_details = data_repo.get_timing_list_from_project(project_uuid) @@ -121,14 +121,12 @@ def add_key_frame(selected_image, inherit_styling_settings, how_long_after): new_timing = create_timings_row_at_frame_number(project_uuid, 0) else: new_timing = create_timings_row_at_frame_number(project_uuid, index_of_current_item, frame_time=key_frame_time) - - clip_duration = calculate_desired_duration_of_individual_clip(new_timing.uuid) - data_repo.update_specific_timing(new_timing.uuid, clip_duration=clip_duration) + update_clip_duration_of_all_timing_frames(project_uuid) timing_details = data_repo.get_timing_list_from_project(project_uuid) if selected_image: - save_uploaded_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, "source") - save_uploaded_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, "styled") + save_uploaded_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, WorkflowStageType.SOURCE.value) + save_uploaded_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, WorkflowStageType.STYLED.value) if inherit_styling_settings == "Yes": clone_styling_settings(index_of_current_item - 1, timing_details[index_of_current_item].uuid) @@ -138,9 +136,10 @@ def add_key_frame(selected_image, inherit_styling_settings, how_long_after): if len(timing_details) == 1: st.session_state['current_frame_index'] = 1 st.session_state['current_frame_uuid'] = timing_details[0].uuid - else: - st.session_state['prev_frame_index'] = min(len(timing_details), st.session_state['current_frame_index']+1) - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index']].uuid + else: + st.session_state['prev_frame_index'] = min(len(timing_details), index_of_current_item + 1) + st.session_state['current_frame_index'] = min(len(timing_details), index_of_current_item + 1) + st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid st.session_state['page'] = CreativeProcessType.STYLING.value st.session_state['section_index'] = 0 diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index bc410d3e..a89cbd3f 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -166,7 +166,6 @@ def replace_image_widget(timing_uuid, stage): time.sleep(1) st.rerun() - def jump_to_single_frame_view_button(display_number, timing_details): if st.button(f"Jump to #{display_number}"): st.session_state['prev_frame_index'] = display_number @@ -174,3 +173,18 @@ def jump_to_single_frame_view_button(display_number, timing_details): st.session_state['frame_styling_view_type'] = "Individual View" st.session_state['change_view_type'] = True st.rerun() + +def change_position_input(timing_uuid, src): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) + + min_value = 1 + max_value = len(timing_list) + + new_position = st.number_input("Move to new position:", min_value=min_value, max_value=max_value, + value=timing.aux_frame_index + 1, step=1, key=f"new_position_{timing.uuid}_{src}") + + if st.button('Update Position',key=f"change_frame_position_{timing.uuid}_{src}"): + data_repo.update_specific_timing(timing.uuid, aux_frame_index=new_position - 1) + st.rerun() \ No newline at end of file diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index a4f61d7c..8f57abc2 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -1,15 +1,12 @@ import streamlit as st -from ui_components.methods.common_methods import delete_frame, jump_to_single_frame_view_button, move_frame,delete_frame_button,move_frame_back_button,move_frame_forward_button,change_position_input,update_clip_duration_of_all_timing_frames,replace_image_widget +from ui_components.widgets.frame_movement_widgets import change_position_input, delete_frame_button, jump_to_single_frame_view_button, move_frame_back_button, move_frame_forward_button, replace_image_widget from ui_components.widgets.frame_time_selector import single_frame_time_selector, single_frame_time_duration_setter from ui_components.widgets.image_carousal import display_image from utils.data_repo.data_repo import DataRepo -from ui_components.widgets.frame_clip_generation_elements import update_animation_style_element from ui_components.constants import WorkflowStageType from utils import st_memory def timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle): - - if time_setter_toggle: single_frame_time_selector(timing_details[i + j].uuid, 'motion', shift_frames=shift_frames_setting) if duration_setter_toggle: @@ -32,13 +29,13 @@ def timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_sette if change_position_toggle: change_position_input(timing_details[i + j].uuid, "side-to-side") - if time_setter_toggle or duration_setter_toggle or replace_image_widget_toggle or move_frames_toggle or delete_frames_toggle or change_frame_position_toggle: + if time_setter_toggle or duration_setter_toggle or replace_image_widget_toggle or move_frames_toggle or delete_frames_toggle: st.caption("--") jump_to_single_frame_view_button(i + j + 1, timing_details) + def timeline_view(project_uuid, stage): - data_repo = DataRepo() timing_details = data_repo.get_timing_list_from_project(project_uuid) @@ -46,8 +43,6 @@ def timeline_view(project_uuid, stage): with header_col_1: shift_frames_setting = st.toggle("Shift Frames", help="If set to True, it will shift the frames after your adjustment forward by the amount of time you move.") - - with header_col_2: col1, col2, col3 = st.columns(3) diff --git a/utils/cache/cache.py b/utils/cache/cache.py index 6786a86f..ec47b69c 100644 --- a/utils/cache/cache.py +++ b/utils/cache/cache.py @@ -15,9 +15,11 @@ class CacheKey(ExtendedEnum): class StCache: @staticmethod def get(uuid, data_type): + uuid = str(uuid) if data_type in st.session_state: for ele in st.session_state[data_type]: - if ele.uuid == uuid: + ele_uuid = ele['uuid'] if type(ele) is dict else str(ele.uuid) + if ele_uuid == uuid: return ele return None @@ -26,14 +28,15 @@ def get(uuid, data_type): def update(data, data_type) -> bool: object_found = False uuid = data['uuid'] if type(data) is dict else data.uuid + uuid = str(uuid) if data_type in st.session_state: object_list = st.session_state[data_type] - for ele in object_list: - if ele.uuid == uuid: - ele = data + for idx, ele in enumerate(object_list): + ele_uuid = ele['uuid'] if type(ele) is dict else str(ele.uuid) + if ele_uuid == uuid: + object_list[idx] = data object_found = True - break st.session_state[data_type] = object_list @@ -42,6 +45,7 @@ def update(data, data_type) -> bool: @staticmethod def add(data, data_type) -> bool: uuid = data['uuid'] if type(data) is dict else data.uuid + uuid = str(uuid) obj = StCache.get(uuid, data_type) if obj: StCache.update(data, data_type) @@ -59,10 +63,12 @@ def add(data, data_type) -> bool: @staticmethod def delete(uuid, data_type) -> bool: object_found = False + uuid = str(uuid) if data_type in st.session_state: object_list = st.session_state[data_type] for ele in object_list: - if ele.uuid == uuid: + ele_uuid = ele['uuid'] if type(ele) is dict else str(ele.uuid) + if ele_uuid == uuid: object_list.remove(ele) object_found = True break diff --git a/utils/cache/cache_methods.py b/utils/cache/cache_methods.py index 95701dee..c7380129 100644 --- a/utils/cache/cache_methods.py +++ b/utils/cache/cache_methods.py @@ -269,7 +269,8 @@ def _cache_get_timing_list_from_project(self, *args, **kwargs): # if there are any timings for the project, return them if len(project_specific_list): - return project_specific_list + sorted_objects = sorted(project_specific_list, key=lambda x: x.aux_frame_index) + return sorted_objects original_func = getattr(cls, '_original_get_timing_list_from_project') timing_list = original_func(self, *args, **kwargs) @@ -313,8 +314,8 @@ def _cache_get_timing_from_uuid(self, *args, **kwargs): original_func = getattr(cls, '_original_get_timing_from_uuid') timing = original_func(self, *args, **kwargs) - StCache.delete(timing.uuid, CacheKey.TIMING_DETAILS.value) - StCache.add(timing, CacheKey.TIMING_DETAILS.value) + if timing: + StCache.add(timing, CacheKey.TIMING_DETAILS.value) return timing From 8c4b07c1914fd687831a0699e3c1a0a59039fe9b Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Tue, 17 Oct 2023 21:57:16 +0530 Subject: [PATCH 095/164] frame deletion fixed --- ui_components/methods/common_methods.py | 2 ++ .../widgets/frame_movement_widgets.py | 18 ++++++++++++++++-- ui_components/widgets/styling_element.py | 3 +-- 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 67f44db8..0ab17bc8 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -581,6 +581,8 @@ def update_clip_duration_of_all_timing_frames(project_uuid): total_duration_of_frame = round(total_duration_of_frame, 2) data_repo.update_specific_timing(timing_item.uuid, clip_duration=total_duration_of_frame) + _ = data_repo.get_timing_list_from_project(project_uuid) + def create_timings_row_at_frame_number(project_uuid, index_of_frame, frame_time=0.0): data_repo = DataRepo() diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index a89cbd3f..dc432f93 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -1,6 +1,6 @@ import time import streamlit as st -from ui_components.methods.common_methods import add_image_variant, promote_image_variant, save_uploaded_image +from ui_components.methods.common_methods import add_image_variant, promote_image_variant, save_uploaded_image, update_clip_duration_of_all_timing_frames from ui_components.models import InternalFrameTimingObject from utils.constants import ImageStage @@ -79,9 +79,14 @@ def delete_frame_button(timing_uuid, show_label=False): def delete_frame(timing_uuid): data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + project_uuid = timing.project.uuid next_timing = data_repo.get_next_timing(timing_uuid) timing_details = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) + if len(timing_details) == 1: + st.error("can't delete the only image present in the project") + return + if next_timing: data_repo.update_specific_timing( next_timing.uuid, @@ -91,11 +96,20 @@ def delete_frame(timing_uuid): ) data_repo.delete_timing_from_uuid(timing.uuid) + timing_details = data_repo.get_timing_list_from_project(project_uuid=project_uuid) - if timing.aux_frame_index == len(timing_details) - 1: + # this is the last frame + if not next_timing: st.session_state['current_frame_index'] = max(1, st.session_state['current_frame_index'] - 1) st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid + # this is the first frame or something in the middle + else: + st.session_state['current_frame_index'] = min(len(timing_details) - 1, st.session_state['current_frame_index'] + 1) + st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] + st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid + + update_clip_duration_of_all_timing_frames(project_uuid) def replace_image_widget(timing_uuid, stage): data_repo = DataRepo() diff --git a/ui_components/widgets/styling_element.py b/ui_components/widgets/styling_element.py index 287c3b5c..555d4390 100644 --- a/ui_components/widgets/styling_element.py +++ b/ui_components/widgets/styling_element.py @@ -12,8 +12,7 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project(timing.project.uuid) project_settings: InternalSettingObject = data_repo.get_project_setting(timing.project.uuid) From 5e7e156317ecf2d62a42ff973751552c4f36a640 Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Tue, 17 Oct 2023 23:46:20 +0530 Subject: [PATCH 096/164] frame position change fixed --- backend/models.py | 42 ++++++++++--------- .../widgets/frame_movement_widgets.py | 4 +- ui_components/widgets/timeline_view.py | 2 +- 3 files changed, 26 insertions(+), 22 deletions(-) diff --git a/backend/models.py b/backend/models.py index e258870a..0ed90f57 100644 --- a/backend/models.py +++ b/backend/models.py @@ -193,6 +193,7 @@ def save(self, *args, **kwargs): # TODO: updating details of every frame this way can be slow - implement a better strategy # ------ handling aux_frame_index ------ + update_frame_duration = False # if the frame is being deleted (disabled) if self.old_is_disabled != self.is_disabled and self.is_disabled: timing_list = Timing.objects.filter(project_id=self.project_id, \ @@ -248,26 +249,8 @@ def save(self, *args, **kwargs): self.interpolated_video_id = None self.timed_clip_id = None - - # updating clip_duration - timing_list = Timing.objects.filter(project_id=self.project_id, is_disabled=False).order_by('aux_frame_index') - length_of_list = len(timing_list) - - for idx, timing_item in enumerate(timing_list): - # last frame - if idx == (length_of_list - 1): - time_of_frame = timing_item.frame_time - duration_of_static_time = 0.0 - end_duration_of_frame = float(time_of_frame) + float(duration_of_static_time) - total_duration_of_frame = float(end_duration_of_frame) - float(time_of_frame) - else: - time_of_frame = timing_item.frame_time - next_timing = timing_list[idx + 1] - time_of_next_frame = next_timing.frame_time - total_duration_of_frame = float(time_of_next_frame) - float(time_of_frame) - - Timing.objects.filter(uuid=timing_item.uuid, is_disabled=False).update(clip_duration=total_duration_of_frame) + update_frame_duration = True # ------ handling timed_clip ------ # if timed_clip is deleted/changed then preview_video will be deleted @@ -276,6 +259,27 @@ def save(self, *args, **kwargs): super().save(*args, **kwargs) + if update_frame_duration: + # updating clip_duration + timing_list = Timing.objects.filter(project_id=self.project_id, is_disabled=False).order_by('aux_frame_index') + length_of_list = len(timing_list) + + for idx, timing_item in enumerate(timing_list): + # last frame + if idx == (length_of_list - 1): + time_of_frame = timing_item.frame_time + duration_of_static_time = 0.0 + end_duration_of_frame = float(time_of_frame) + float(duration_of_static_time) + total_duration_of_frame = float(end_duration_of_frame) - float(time_of_frame) + else: + time_of_frame = timing_item.frame_time + next_timing = timing_list[idx + 1] + time_of_next_frame = next_timing.frame_time + total_duration_of_frame = float(time_of_next_frame) - float(time_of_frame) + + total_duration_of_frame = round(total_duration_of_frame, 2) + Timing.objects.filter(uuid=timing_item.uuid, is_disabled=False).update(clip_duration=total_duration_of_frame) + def add_interpolated_clip_list(self, clip_uuid_list): cur_list = json.loads(self.interpolated_clip_list) if self.interpolated_clip_list else [] cur_list.extend(clip_uuid_list) diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index dc432f93..88bab76f 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -30,7 +30,7 @@ def move_frame(direction, timing_uuid): if direction == "Up": if timing.aux_frame_index == 0: st.error("This is the first frame") - time.sleep(1) + time.sleep(0.5) return data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index - 1) @@ -38,7 +38,7 @@ def move_frame(direction, timing_uuid): timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) if timing.aux_frame_index == len(timing_list) - 1: st.error("This is the last frame") - time.sleep(1) + time.sleep(0.5) return data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index + 1) diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index 8f57abc2..704f6069 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -29,7 +29,7 @@ def timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_sette if change_position_toggle: change_position_input(timing_details[i + j].uuid, "side-to-side") - if time_setter_toggle or duration_setter_toggle or replace_image_widget_toggle or move_frames_toggle or delete_frames_toggle: + if time_setter_toggle or duration_setter_toggle or replace_image_widget_toggle or move_frames_toggle or delete_frames_toggle or change_position_toggle: st.caption("--") jump_to_single_frame_view_button(i + j + 1, timing_details) From 2ff2245c3aaf556f05551e2dac7925c832fb3f52 Mon Sep 17 00:00:00 2001 From: peter942 Date: Wed, 18 Oct 2023 03:15:36 +0200 Subject: [PATCH 097/164] More small improvements --- .../widgets/add_key_frame_element.py | 8 +- .../widgets/frame_movement_widgets.py | 57 +++----- ui_components/widgets/sidebar_logger.py | 131 +++++++++--------- .../widgets/style_explorer_element.py | 11 +- ui_components/widgets/timeline_view.py | 4 +- 5 files changed, 104 insertions(+), 107 deletions(-) diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index 4cc8460f..7573bfa0 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -95,7 +95,7 @@ def add_key_frame_element(timing_details, project_uuid): return selected_image, inherit_styling_settings, how_long_after, transformation_stage -def add_key_frame(selected_image, inherit_styling_settings, how_long_after, cur_frame_index=None): +def add_key_frame(selected_image, inherit_styling_settings, how_long_after, target_frame_position=None): data_repo = DataRepo() project_uuid = st.session_state['project_uuid'] timing_details = data_repo.get_timing_list_from_project(project_uuid) @@ -104,13 +104,15 @@ def add_key_frame(selected_image, inherit_styling_settings, how_long_after, cur_ if len(timing_details) == 0: index_of_current_item = 1 else: - cur_frame_index = st.session_state['current_frame_index'] if cur_frame_index is None else cur_frame_index - index_of_current_item = min(len(timing_details), cur_frame_index) + target_frame_position = st.session_state['current_frame_index'] if target_frame_position is None else target_frame_position + index_of_current_item = min(len(timing_details), target_frame_position) timing_details = data_repo.get_timing_list_from_project(project_uuid) if len(timing_details) == 0: key_frame_time = 0.0 + elif target_frame_position is not None: + key_frame_time = float(timing_details[target_frame_position - 1].frame_time) + how_long_after elif index_of_current_item == len(timing_details): key_frame_time = float(timing_details[index_of_current_item - 1].frame_time) + how_long_after else: diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index 88bab76f..2ce6b6a2 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -111,40 +111,32 @@ def delete_frame(timing_uuid): update_clip_duration_of_all_timing_frames(project_uuid) -def replace_image_widget(timing_uuid, stage): +def replace_image_widget(timing_uuid, stage, options=["Uploaded Frame","Other Frame"]): data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) timing_details = data_repo.get_timing_list_from_project(timing.project.uuid) - - replace_with = st.radio("Replace with:", [ - "Uploaded Frame", "Other Frame"], horizontal=True, key=f"replace_with_what_{stage}") - + + replace_with = options[0] if len(options) == 1 else st.radio("Replace with:", options, horizontal=True, key=f"replace_with_what_{stage}_{timing_uuid}") if replace_with == "Other Frame": - which_stage_to_use_for_replacement = st.radio("Select stage to use:", [ - ImageStage.MAIN_VARIANT.value, ImageStage.SOURCE_IMAGE.value], key=f"which_stage_to_use_for_replacement_{stage}", horizontal=True) + ImageStage.MAIN_VARIANT.value, ImageStage.SOURCE_IMAGE.value], key=f"which_stage_to_use_for_replacement_{stage}_{timing_uuid}", horizontal=True) which_image_to_use_for_replacement = st.number_input("Select image to use:", min_value=0, max_value=len( timing_details)-1, value=0, key=f"which_image_to_use_for_replacement_{stage}") - + if which_stage_to_use_for_replacement == ImageStage.SOURCE_IMAGE.value: selected_image = timing_details[which_image_to_use_for_replacement].source_image - - elif which_stage_to_use_for_replacement == ImageStage.MAIN_VARIANT.value: selected_image = timing_details[which_image_to_use_for_replacement].primary_image - - + st.image(selected_image.local_path, use_column_width=True) - if st.button("Replace with selected frame", disabled=False,key=f"replace_with_selected_frame_{stage}"): + if st.button("Replace with selected frame", disabled=False,key=f"replace_with_selected_frame_{stage}_{timing_uuid}"): if stage == "source": - data_repo.update_specific_timing(timing.uuid, source_image_id=selected_image.uuid) st.success("Replaced") time.sleep(1) st.rerun() - else: number_of_image_variants = add_image_variant( selected_image.uuid, timing.uuid) @@ -153,29 +145,26 @@ def replace_image_widget(timing_uuid, stage): st.success("Replaced") time.sleep(1) st.rerun() - + elif replace_with == "Uploaded Frame": if stage == "source": uploaded_file = st.file_uploader("Upload Source Image", type=[ "png", "jpeg"], accept_multiple_files=False) - if st.button("Upload Source Image"): - if uploaded_file: - timing = data_repo.get_timing_from_uuid(timing.uuid) - if save_uploaded_image(uploaded_file, timing.project.uuid, timing.uuid, "source"): - time.sleep(1.5) - st.rerun() + if uploaded_file != None: + if st.button("Upload Source Image"): + if uploaded_file: + timing = data_repo.get_timing_from_uuid(timing.uuid) + if save_and_promote_image(uploaded_file, timing.project.uuid, timing.uuid, "source"): + time.sleep(1.5) + st.rerun() else: - replacement_frame = st.file_uploader("Upload a replacement frame here", type=[ - "png", "jpeg"], accept_multiple_files=False, key=f"replacement_frame_upload_{stage}") - if st.button("Replace frame", disabled=False): - images_for_model = [] - timing = data_repo.get_timing_from_uuid(timing.uuid) - if replacement_frame: - saved_file = save_uploaded_image(replacement_frame, timing.project.uuid, timing.uuid, "styled") - if saved_file: - number_of_image_variants = add_image_variant(saved_file.uuid, timing.uuid) - promote_image_variant( - timing.uuid, number_of_image_variants - 1) + replacement_frame = st.file_uploader("Upload Styled Image", type=[ + "png", "jpeg"], accept_multiple_files=False, key=f"replacement_frame_upload_{stage}_{timing_uuid}") + if replacement_frame != None: + if st.button("Replace frame", disabled=False): + timing = data_repo.get_timing_from_uuid(timing.uuid) + if replacement_frame: + save_and_promote_image(replacement_frame, timing.project.uuid, timing.uuid, "styled") st.success("Replaced") time.sleep(1) st.rerun() @@ -184,7 +173,7 @@ def jump_to_single_frame_view_button(display_number, timing_details): if st.button(f"Jump to #{display_number}"): st.session_state['prev_frame_index'] = display_number st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid - st.session_state['frame_styling_view_type'] = "Individual View" + st.session_state['frame_styling_view_type'] = "Individual" st.session_state['change_view_type'] = True st.rerun() diff --git a/ui_components/widgets/sidebar_logger.py b/ui_components/widgets/sidebar_logger.py index 73794424..6f20deef 100644 --- a/ui_components/widgets/sidebar_logger.py +++ b/ui_components/widgets/sidebar_logger.py @@ -1,7 +1,7 @@ import streamlit as st from shared.constants import InferenceParamType, InferenceStatus - +from ui_components.widgets.frame_movement_widgets import jump_to_single_frame_view_button import json import math from ui_components.widgets.frame_selector import update_current_frame_index @@ -11,6 +11,8 @@ def sidebar_logger(project_uuid): data_repo = DataRepo() + timing_details = data_repo.get_timing_list_from_project(project_uuid=project_uuid) + a1, _, a3 = st.columns([1, 0.2, 1]) refresh_disabled = False # not any(log.status in [InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value] for log in log_list) @@ -41,67 +43,72 @@ def sidebar_logger(project_uuid): st.write("Total page count: ", total_page_count) # display_list = log_list[(page_number - 1) * items_per_page : page_number * items_per_page] - file_list = data_repo.get_file_list_from_log_uuid_list([log.uuid for log in log_list]) - log_file_dict = {} - for file in file_list: - log_file_dict[str(file.inference_log.uuid)] = file - - st.markdown("---") - for _, log in enumerate(log_list): - origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) - if not log.status: - continue + if log_list is not None: - output_url = None - if log.uuid in log_file_dict: - output_url = log_file_dict[log.uuid].location - - c1, c2, c3 = st.columns([1, 1 if output_url else 0.01, 1]) - - with c1: - input_params = json.loads(log.input_params) - st.caption(f"Prompt:") - prompt = input_params.get('prompt', 'No prompt found') - st.write(f'"{prompt[:30]}..."' if len(prompt) > 30 else f'"{prompt}"') - st.caption(f"Model:") - st.write(json.loads(log.output_details)['model_name'].split('/')[-1]) - - with c2: - if output_url: - if output_url.endswith('png') or output_url.endswith('jpg') or output_url.endswith('jpeg') or output_url.endswith('gif'): - st.image(output_url) - elif output_url.endswith('mp4'): - st.video(output_url, format='mp4', start_time=0) - else: - st.info("No data to display") - - with c3: - if log.status == InferenceStatus.COMPLETED.value: - st.success("Completed") - elif log.status == InferenceStatus.FAILED.value: - st.warning("Failed") - elif log.status == InferenceStatus.QUEUED.value: - st.info("Queued") - elif log.status == InferenceStatus.IN_PROGRESS.value: - st.info("In progress") - elif log.status == InferenceStatus.CANCELED.value: - st.warning("Canceled") - - if output_url: - if 'timing_uuid' in origin_data: - timing = data_repo.get_timing_from_uuid(origin_data['timing_uuid']) - if timing: - if st.button(f"Jump to frame {timing.aux_frame_index + 1}", key=str(log.uuid)): - update_current_frame_index(timing.aux_frame_index + 1) - else: - if st.button(f"Jump to explorer view", key=str(log.uuid)): - # TODO: fix this - st.session_state['main_view_type'] = "Creative Process" - st.session_state['frame_styling_view_type_index'] = 0 - st.session_state['frame_styling_view_type'] = "Explorer" - st.session_state['change_view_type'] = False - st.rerun() - + file_list = data_repo.get_file_list_from_log_uuid_list([log.uuid for log in log_list]) + log_file_dict = {} + for file in file_list: + log_file_dict[str(file.inference_log.uuid)] = file + + st.markdown("---") + + for _, log in enumerate(log_list): + origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) + if not log.status: + continue - st.markdown("---") \ No newline at end of file + output_url = None + if log.uuid in log_file_dict: + output_url = log_file_dict[log.uuid].location + + c1, c2, c3 = st.columns([1, 1 if output_url else 0.01, 1]) + + with c1: + input_params = json.loads(log.input_params) + st.caption(f"Prompt:") + prompt = input_params.get('prompt', 'No prompt found') + st.write(f'"{prompt[:30]}..."' if len(prompt) > 30 else f'"{prompt}"') + st.caption(f"Model:") + st.write(json.loads(log.output_details)['model_name'].split('/')[-1]) + + with c2: + if output_url: + if output_url.endswith('png') or output_url.endswith('jpg') or output_url.endswith('jpeg') or output_url.endswith('gif'): + st.image(output_url) + elif output_url.endswith('mp4'): + st.video(output_url, format='mp4', start_time=0) + else: + st.info("No data to display") + + with c3: + if log.status == InferenceStatus.COMPLETED.value: + st.success("Completed") + elif log.status == InferenceStatus.FAILED.value: + st.warning("Failed") + elif log.status == InferenceStatus.QUEUED.value: + st.info("Queued") + elif log.status == InferenceStatus.IN_PROGRESS.value: + st.info("In progress") + elif log.status == InferenceStatus.CANCELED.value: + st.warning("Canceled") + + if output_url: + if 'timing_uuid' in origin_data: + timing = data_repo.get_timing_from_uuid(origin_data['timing_uuid']) + if st.session_state['frame_styling_view_type'] != "Timeline": + if timing: + jump_to_single_frame_view_button(timing.aux_frame_index + 1, timing_details) + + else: + if st.session_state['frame_styling_view_type'] != "Explorer": + if st.button(f"Jump to explorer", key=str(log.uuid)): + # TODO: fix this + st.session_state['main_view_type'] = "Creative Process" + st.session_state['frame_styling_view_type_index'] = 0 + st.session_state['frame_styling_view_type'] = "Explorer" + st.session_state['change_view_type'] = False + st.rerun() + + + st.markdown("---") \ No newline at end of file diff --git a/ui_components/widgets/style_explorer_element.py b/ui_components/widgets/style_explorer_element.py index eb59f837..56cba4e4 100644 --- a/ui_components/widgets/style_explorer_element.py +++ b/ui_components/widgets/style_explorer_element.py @@ -1,13 +1,12 @@ import json import streamlit as st -from ui_components.methods.common_methods import process_inference_output, promote_image_variant +from ui_components.methods.common_methods import process_inference_output from ui_components.methods.file_methods import generate_pil_image from ui_components.methods.ml_methods import query_llama2 from ui_components.widgets.add_key_frame_element import add_key_frame from utils.constants import MLQueryObject from utils.data_repo.data_repo import DataRepo from shared.constants import AIModelType, InferenceType, InternalFileTag, InternalFileType, SortOrder -import replicate from utils.ml_processor.ml_interface import get_ml_client from utils.ml_processor.replicate.constants import REPLICATE_MODEL @@ -67,7 +66,7 @@ def style_explorer_element(project_uuid): query_obj = MLQueryObject( timing_uuid=None, model_uuid=None, - guidance_scale=7.5, + guidance_scale=5, seed=-1, num_inference_steps=30, strength=1, @@ -98,7 +97,7 @@ def style_explorer_element(project_uuid): tag=InternalFileTag.GALLERY_IMAGE.value, project_id=project_uuid, page=page_number, - data_per_page=num_items_per_page, + data_per_page=10, sort_order=SortOrder.DESCENDING.value # newly created images appear first ) @@ -118,7 +117,7 @@ def style_explorer_element(project_uuid): cols = st.columns(num_columns) for j in range(num_columns): if i + j < len(gallery_image_list): - with cols[j]: + with cols[j]: st.image(gallery_image_list[i + j].location, use_column_width=True) with st.expander(f'Variant #{(page_number - 1) * num_items_per_page + i + j + 1}', False): if gallery_image_list[i + j].inference_log: @@ -136,7 +135,7 @@ def style_explorer_element(project_uuid): if st.button(f"Add to timeline", key=f"Promote Variant #{(page_number - 1) * num_items_per_page + i + j + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): pil_image = generate_pil_image(gallery_image_list[i + j].location) - add_key_frame(pil_image, False, 2.5) + add_key_frame(pil_image, False, 2.5, len(data_repo.get_timing_list_from_project(project_uuid))) st.rerun() st.markdown("***") else: diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index 704f6069..d6e32e7a 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -56,11 +56,11 @@ def timeline_view(project_uuid, stage): else: with col2: time_setter_toggle = st_memory.toggle("Time Setter", value=True, key="time_setter_toggle") - replace_image_widget_toggle = st_memory.toggle("Replace Image", value=False, key="replace_image_widget_toggle") + delete_frames_toggle = st_memory.toggle("Delete Frames", value=True, key="delete_frames_toggle") duration_setter_toggle = st_memory.toggle("Duration Setter", value=False, key="duration_setter_toggle") with col3: move_frames_toggle = st_memory.toggle("Move Frames", value=True, key="move_frames_toggle") - delete_frames_toggle = st_memory.toggle("Delete Frames", value=False, key="delete_frames_toggle") + replace_image_widget_toggle = st_memory.toggle("Replace Image", value=False, key="replace_image_widget_toggle") change_position_toggle = st_memory.toggle("Change Position", value=False, key="change_position_toggle") with header_col_3: From c2ac1fb915660c4344a426556d78474c8c16d7b1 Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Thu, 19 Oct 2023 14:52:35 +0530 Subject: [PATCH 098/164] data_per_page + realistic_vision inference fix --- ui_components/methods/common_methods.py | 12 ++---------- ui_components/widgets/frame_movement_widgets.py | 2 +- ui_components/widgets/style_explorer_element.py | 3 ++- 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 0ab17bc8..f1527be8 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -825,7 +825,7 @@ def process_inference_output(**kwargs): output_file = data_repo.create_file( name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0], + hosted_url=output[0] if isinstance(output, list) else output, inference_log_id=log.uuid ) @@ -926,14 +926,6 @@ def process_inference_output(**kwargs): output = kwargs.get('output') if output: - if isinstance(output, str) and output.startswith("http"): - temp_output_file = generate_temp_file(output, '.png') - output = None - with open(temp_output_file.name, 'rb') as f: - output = f.read() - - os.remove(temp_output_file.name) - log_uuid = kwargs.get('log_uuid') project_uuid = kwargs.get('project_uuid') log = data_repo.get_inference_log_from_uuid(log_uuid) @@ -941,7 +933,7 @@ def process_inference_output(**kwargs): output_file = data_repo.create_file( name=filename, type=InternalFileType.IMAGE.value, - hosted_url=output[0], + hosted_url=output[0] if isinstance(output, list) else output, inference_log_id=log.uuid, project_id=project_uuid, tag=InternalFileTag.GALLERY_IMAGE.value diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index 2ce6b6a2..2a52f551 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -1,6 +1,6 @@ import time import streamlit as st -from ui_components.methods.common_methods import add_image_variant, promote_image_variant, save_uploaded_image, update_clip_duration_of_all_timing_frames +from ui_components.methods.common_methods import add_image_variant, promote_image_variant, save_and_promote_image, save_uploaded_image, update_clip_duration_of_all_timing_frames from ui_components.models import InternalFrameTimingObject from utils.constants import ImageStage diff --git a/ui_components/widgets/style_explorer_element.py b/ui_components/widgets/style_explorer_element.py index 56cba4e4..44af92d2 100644 --- a/ui_components/widgets/style_explorer_element.py +++ b/ui_components/widgets/style_explorer_element.py @@ -75,6 +75,7 @@ def style_explorer_element(project_uuid): negative_prompt="bad image, worst image, bad anatomy, washed out colors", height=project_settings.height, width=project_settings.width, + project_uuid=project_uuid ) replicate_model = REPLICATE_MODEL.get_model_by_db_obj(model_dict[model_name]) @@ -97,7 +98,7 @@ def style_explorer_element(project_uuid): tag=InternalFileTag.GALLERY_IMAGE.value, project_id=project_uuid, page=page_number, - data_per_page=10, + data_per_page=num_items_per_page, sort_order=SortOrder.DESCENDING.value # newly created images appear first ) From e94eab285c14c59b5852d84ae7ec8305c2d73b25 Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Thu, 19 Oct 2023 15:23:40 +0530 Subject: [PATCH 099/164] gallery frame addition changed --- ui_components/widgets/add_key_frame_element.py | 6 ++++-- ui_components/widgets/style_explorer_element.py | 7 ++++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index 7573bfa0..a4bc5f40 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -95,7 +95,7 @@ def add_key_frame_element(timing_details, project_uuid): return selected_image, inherit_styling_settings, how_long_after, transformation_stage -def add_key_frame(selected_image, inherit_styling_settings, how_long_after, target_frame_position=None): +def add_key_frame(selected_image, inherit_styling_settings, how_long_after, target_frame_position=None, refresh_state=True): data_repo = DataRepo() project_uuid = st.session_state['project_uuid'] timing_details = data_repo.get_timing_list_from_project(project_uuid) @@ -145,4 +145,6 @@ def add_key_frame(selected_image, inherit_styling_settings, how_long_after, targ st.session_state['page'] = CreativeProcessType.STYLING.value st.session_state['section_index'] = 0 - st.rerun() \ No newline at end of file + + if refresh_state: + st.rerun() \ No newline at end of file diff --git a/ui_components/widgets/style_explorer_element.py b/ui_components/widgets/style_explorer_element.py index 44af92d2..007bbd72 100644 --- a/ui_components/widgets/style_explorer_element.py +++ b/ui_components/widgets/style_explorer_element.py @@ -136,8 +136,13 @@ def style_explorer_element(project_uuid): if st.button(f"Add to timeline", key=f"Promote Variant #{(page_number - 1) * num_items_per_page + i + j + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): pil_image = generate_pil_image(gallery_image_list[i + j].location) - add_key_frame(pil_image, False, 2.5, len(data_repo.get_timing_list_from_project(project_uuid))) + add_key_frame(pil_image, False, 2.5, len(data_repo.get_timing_list_from_project(project_uuid)), refresh_state=False) + + # removing this from the gallery view + data_repo.update_file(gallery_image_list[i + j].uuid, tag="") + st.rerun() + st.markdown("***") else: st.warning("No images present") From 674903f9e6f64b27ddfb701427a6f0f94767082c Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Thu, 19 Oct 2023 15:59:42 +0530 Subject: [PATCH 100/164] llama removed from log list --- backend/db_repo.py | 4 +++- ui_components/widgets/sidebar_logger.py | 9 ++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index b3ed0c06..e024f44c 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -562,7 +562,7 @@ def get_inference_log_from_uuid(self, uuid): return InternalResponse(payload, 'inference log fetched', True) - def get_all_inference_log_list(self, project_id=None, page=1, data_per_page=5, status_list=None): + def get_all_inference_log_list(self, project_id=None, page=1, data_per_page=5, status_list=None, exclude_model_list=None): if project_id: project = Project.objects.filter(uuid=project_id, is_disabled=False).first() log_list = InferenceLog.objects.filter(project_id=project.id, is_disabled=False).order_by('-created_on').all() @@ -574,6 +574,8 @@ def get_all_inference_log_list(self, project_id=None, page=1, data_per_page=5, s else: log_list = log_list.exclude(status__in=["", None]) + log_list = log_list.exclude(model_id=None) # hackish sol to exclude non-image/video logs + paginator = Paginator(log_list, data_per_page) if page > paginator.num_pages or page < 1: return InternalResponse({}, "invalid page number", False) diff --git a/ui_components/widgets/sidebar_logger.py b/ui_components/widgets/sidebar_logger.py index 6f20deef..2f757259 100644 --- a/ui_components/widgets/sidebar_logger.py +++ b/ui_components/widgets/sidebar_logger.py @@ -7,6 +7,7 @@ from ui_components.widgets.frame_selector import update_current_frame_index from utils.data_repo.data_repo import DataRepo +from utils.ml_processor.replicate.constants import REPLICATE_MODEL def sidebar_logger(project_uuid): data_repo = DataRepo() @@ -33,9 +34,15 @@ def sidebar_logger(project_uuid): b1, b2 = st.columns([1, 1]) project_setting = data_repo.get_project_setting(project_uuid) + page_number = b1.number_input('Page number', min_value=1, max_value=project_setting.total_log_pages, value=1, step=1) items_per_page = b2.slider("Items per page", min_value=1, max_value=20, value=5, step=1) - log_list, total_page_count = data_repo.get_all_inference_log_list(project_id=project_uuid, page=page_number, data_per_page=items_per_page, status_list=status_list) + log_list, total_page_count = data_repo.get_all_inference_log_list( + project_id=project_uuid, + page=page_number, + data_per_page=items_per_page, + status_list=status_list + ) if project_setting.total_log_pages != total_page_count: project_setting.total_log_pages = total_page_count From a0786f40b7f6d5c47b076d7a8cf4d856f383531c Mon Sep 17 00:00:00 2001 From: piyushk52 Date: Thu, 19 Oct 2023 17:38:57 +0530 Subject: [PATCH 101/164] sdxl resize fix --- backend/serializers/dto.py | 3 ++- ui_components/methods/file_methods.py | 32 +++++++++++++++++++++++---- ui_components/models.py | 1 + utils/data_repo/data_repo.py | 8 ++++++- utils/ml_processor/replicate/utils.py | 4 ++-- 5 files changed, 40 insertions(+), 8 deletions(-) diff --git a/backend/serializers/dto.py b/backend/serializers/dto.py index 871917b7..a84dd699 100644 --- a/backend/serializers/dto.py +++ b/backend/serializers/dto.py @@ -67,10 +67,11 @@ class Meta: class InternalFileDto(serializers.ModelSerializer): + project = ProjectDto() # TODO: pass this as context to speed up the api inference_log = InferenceLogDto() class Meta: model = InternalFileObject - fields = ('uuid', 'name', 'local_path', 'type', 'hosted_url', 'created_on', 'inference_log') + fields = ('uuid', 'name', 'local_path', 'type', 'hosted_url', 'created_on', 'inference_log', 'project') class TimingDto(serializers.ModelSerializer): diff --git a/ui_components/methods/file_methods.py b/ui_components/methods/file_methods.py index 666d90a1..c76e0634 100644 --- a/ui_components/methods/file_methods.py +++ b/ui_components/methods/file_methods.py @@ -13,15 +13,21 @@ import requests import streamlit as st from shared.constants import SERVER, InternalFileType, ServerType +from ui_components.models import InternalFileObject from utils.data_repo.data_repo import DataRepo # depending on the environment it will either save or host the PIL image object -def save_or_host_file(file, path, mime_type='image/png'): +def save_or_host_file(file, path, mime_type='image/png', dim=None): data_repo = DataRepo() # TODO: fix session state management, remove direct access out side the main code - project_setting = data_repo.get_project_setting(st.session_state['project_uuid']) - if project_setting: - file = zoom_and_crop(file, project_setting.width, project_setting.height) + if dim: + width, height = dim[0], dim[1] + elif 'project_uuid' in st.session_state and st.session_state['project_uuid']: + project_setting = data_repo.get_project_setting(st.session_state['project_uuid']) + width, height = project_setting.width, project_setting.height + + if width and height: + file = zoom_and_crop(file, width, height) else: # new project file = zoom_and_crop(file, 512, 512) @@ -57,6 +63,24 @@ def zoom_and_crop(file, width, height): return file +# resizes file dimensions to current project_settings +def normalize_size_internal_file_obj(file_obj: InternalFileObject): + if not file_obj or file_obj.type != InternalFileType.IMAGE.value: + return file_obj + + data_repo = DataRepo() + project_setting = data_repo.get_project_setting(file_obj.project.uuid) + dim = (project_setting.width, project_setting.height) + + pil_file = generate_pil_image(file_obj.location) + uploaded_url = save_or_host_file(pil_file, file_obj.location, mime_type='image/png', dim=dim) + if uploaded_url: + data_repo = DataRepo() + data_repo.update_file(file_obj.uuid, hosted_url=uploaded_url) + + return file_obj + + def save_or_host_file_bytes(video_bytes, path, ext=".mp4"): uploaded_url = None if SERVER != ServerType.DEVELOPMENT.value: diff --git a/ui_components/models.py b/ui_components/models.py index e6ab31d3..ab505c7c 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -17,6 +17,7 @@ def __init__(self, **kwargs): self.tag = kwargs['tag'] if key_present('tag', kwargs) else None self.created_on = kwargs['created_on'] if key_present('created_on', kwargs) else None self.inference_log = InferenceLogObject(**kwargs['inference_log']) if key_present('inference_log', kwargs) else None + self.project = InternalProjectObject(**kwargs['project']) if key_present('project', kwargs) else None @property def location(self): diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index a5df9424..98ce2247 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -106,7 +106,13 @@ def create_file(self, **kwargs): res = self.db_repo.create_file(**kwargs) file = res.data['data'] if res.status else None - return InternalFileObject(**file) if file else None + file = InternalFileObject(**file) if file else None + + if file and file.type == InternalFileType.IMAGE.value: + from ui_components.methods.file_methods import normalize_size_internal_file_obj + file = normalize_size_internal_file_obj(file) + + return file def delete_file_from_uuid(self, uuid): res = self.db_repo.delete_file_from_uuid(uuid) diff --git a/utils/ml_processor/replicate/utils.py b/utils/ml_processor/replicate/utils.py index 5868fe46..ae4a752a 100644 --- a/utils/ml_processor/replicate/utils.py +++ b/utils/ml_processor/replicate/utils.py @@ -72,8 +72,8 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): data = { "prompt" : query_obj.prompt, "negative_prompt" : query_obj.negative_prompt, - "width" : query_obj.width, - "height" : query_obj.height, + "width" : max(768, query_obj.width), # 768 is the default for sdxl + "height" : max(768, query_obj.height), "mask": mask } From bbe9e7aac59ce166e9a58b433627c4f017583bbb Mon Sep 17 00:00:00 2001 From: peter942 Date: Fri, 20 Oct 2023 02:33:50 +0200 Subject: [PATCH 102/164] Little improvements --- ui_components/methods/ml_methods.py | 3 +- .../widgets/style_explorer_element.py | 42 ++++++++++++------- utils/ml_processor/replicate/constants.py | 2 +- 3 files changed, 29 insertions(+), 18 deletions(-) diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index 08974d94..642ae498 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -212,8 +212,7 @@ def inpainting(input_image: str, prompt, negative_prompt, timing_uuid, invert_ma input_image = open(input_image, "rb") ml_client = get_ml_client() - output, log = ml_client.predict_model_output(REPLICATE_MODEL.andreas_sd_inpainting, mask=mask, image=input_image, prompt=prompt, - invert_mask=invert_mask, negative_prompt=negative_prompt, num_inference_steps=25) + output, log = ml_client.predict_model_output(REPLICATE_MODEL.andreas_sd_inpainting, mask=mask, image=input_image, prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=25, strength=1.0) file_name = str(uuid.uuid4()) + ".png" image_file = data_repo.create_file( diff --git a/ui_components/widgets/style_explorer_element.py b/ui_components/widgets/style_explorer_element.py index 007bbd72..4599b93e 100644 --- a/ui_components/widgets/style_explorer_element.py +++ b/ui_components/widgets/style_explorer_element.py @@ -17,8 +17,10 @@ def style_explorer_element(project_uuid): data_repo = DataRepo() project_settings = data_repo.get_project_setting(project_uuid) - _, a2, _ = st.columns([0.5, 1, 0.5]) + _, a2, a3,_= st.columns([0.5, 1, 0.5,0.5]) prompt = a2.text_area("What's your base prompt?", key="prompt", help="This will be included at the beginning of each prompt") + base_prompt_position = a3.radio("Where would you like to place the base prompt?", options=["Beginning", "End"], key="base_prompt_position", help="This will be included at the beginning of each prompt") + _, b2, b3, b4, b5, _ = st.columns([0.5, 1, 1, 1, 1, 0.5]) character_instructions = create_variate_option(b2, "character") @@ -45,22 +47,25 @@ def style_explorer_element(project_uuid): if e2.button("Generate images", key="generate_images", use_container_width=True, type="primary"): ml_client = get_ml_client() counter = 0 - varied_text = "" num_models = len(models_to_use) num_images_per_model = number_to_generate // num_models - varied_prompt = create_prompt( + varied_text = "" + for _ in range(num_images_per_model): + for model_name in models_to_use: + if counter % 4 == 0 and (styling_instructions or character_instructions or action_instructions or scene_instructions): + varied_prompt = create_prompt( styling_instructions=styling_instructions, character_instructions=character_instructions, action_instructions=action_instructions, scene_instructions=scene_instructions ) - for _ in range(num_images_per_model): - for model_name in models_to_use: - if counter % 4 == 0 and (styling_instructions or character_instructions or action_instructions or scene_instructions): varied_text = varied_prompt - prompt_with_variations = f"{prompt}, {varied_text}" if prompt else varied_text - st.write(f"Prompt: '{prompt_with_variations}'") - st.write(f"Model: {model_name}") + if base_prompt_position == "Beginning": + prompt_with_variations = f"{prompt}, {varied_text}" if prompt else varied_text + else: # base_prompt_position is "End" + prompt_with_variations = f"{varied_text}, {prompt}" if prompt else varied_text + # st.write(f"Prompt: '{prompt_with_variations}'") + # st.write(f"Model: {model_name}") counter += 1 query_obj = MLQueryObject( @@ -90,9 +95,13 @@ def style_explorer_element(project_uuid): process_inference_output(**inference_data) project_setting = data_repo.get_project_setting(project_uuid) + st.markdown("***") page_number = st.radio("Select page", options=range(1, project_setting.total_gallery_pages + 1), horizontal=True) - num_items_per_page = 10 - + + f1,f2 = st.columns([1, 1]) + num_columns = f1.slider('Number of columns:', min_value=1, max_value=10, value=4) + num_items_per_page = f2.slider('Items per page:', min_value=1, max_value=100, value=20) + st.markdown("***") gallery_image_list, res_payload = data_repo.get_all_file_list( file_type=InternalFileType.IMAGE.value, tag=InternalFileTag.GALLERY_IMAGE.value, @@ -109,8 +118,8 @@ def style_explorer_element(project_uuid): total_image_count = res_payload['count'] if gallery_image_list and len(gallery_image_list): - st.markdown("***") - num_columns = st.slider('Number of columns', min_value=1, max_value=10, value=4) + + start_index = 0 end_index = min(start_index + num_items_per_page, total_image_count) @@ -159,6 +168,8 @@ def create_variate_option(column, key): def create_prompt(**kwargs): text_list = [] + order = ["character_instructions", "styling_instructions", "action_instructions", "scene_instructions"] + system_instruction_template_list = { "character_instructions": "Input|Character Descriptions:\nSickly old man|Francois Leger,old Russian man, beaten-down look, wearing suit\nPretty young woman|Jules van Cohen,beautiful young woman, floral dress,vibrant\nIrish boy|James McCarthy,10 year old Irish boy,red hair,pink shirt,wheezing in a small voice\nYoung thug|Hughie Banks,23 y/o English football hooligan with skinned head", @@ -167,8 +178,9 @@ def create_prompt(**kwargs): "scene_instructions": "Input|Scene Description:\nForest|Misty woods with towering trees and glowing plants.\nFuturistic city|Skyscrapers, flying cars, neon lights in a futuristic metropolis.\nMedieval|Castle courtyard with knights, cobblestones, and a fountain.\nBeach|Golden sands, rolling waves, and a vibrant sunset.\nApocalypse|Ruined buildings and desolation in a bleak wasteland.", } - for instruction_type, user_instruction in kwargs.items(): - if instruction_type in system_instruction_template_list and user_instruction: + for instruction_type in order: + user_instruction = kwargs.get(instruction_type) + if user_instruction and instruction_type in system_instruction_template_list: result = query_llama2(user_instruction, system_instruction_template_list[instruction_type]) text_list.append(result) diff --git a/utils/ml_processor/replicate/constants.py b/utils/ml_processor/replicate/constants.py index 7b7dd924..4e1f5b22 100644 --- a/utils/ml_processor/replicate/constants.py +++ b/utils/ml_processor/replicate/constants.py @@ -9,7 +9,7 @@ class ReplicateModel: version: str class REPLICATE_MODEL: - andreas_sd_inpainting = ReplicateModel("andreasjansson/stable-diffusion-inpainting", "e490d072a34a94a11e9711ed5a6ba621c3fab884eda1665d9d3a282d65a21180") + andreas_sd_inpainting = ReplicateModel("lucataco/sdxl-inpainting", "f03c01943bacdee38d6a5d216586bf9bfbfd799350aed263aa32980efc173f0b") clones_lora_training = ReplicateModel("cloneofsimo/lora-training", "b2a308762e36ac48d16bfadc03a65493fe6e799f429f7941639a6acec5b276cc") clones_lora_training_2 = ReplicateModel("cloneofsimo/lora", "fce477182f407ffd66b94b08e761424cabd13b82b518754b83080bc75ad32466") google_frame_interpolation = ReplicateModel("google-research/frame-interpolation", "4f88a16a13673a8b589c18866e540556170a5bcb2ccdc12de556e800e9456d3d") From 099a63e1c2638828feaed8eca6d3a6599094ed85 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Fri, 20 Oct 2023 22:52:29 +0530 Subject: [PATCH 103/164] duplicate widget fix --- ui_components/widgets/frame_movement_widgets.py | 4 ++-- ui_components/widgets/list_view.py | 2 +- ui_components/widgets/sidebar_logger.py | 13 +++++-------- ui_components/widgets/timeline_view.py | 2 +- 4 files changed, 9 insertions(+), 12 deletions(-) diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index 2a52f551..d42b38d1 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -169,8 +169,8 @@ def replace_image_widget(timing_uuid, stage, options=["Uploaded Frame","Other Fr time.sleep(1) st.rerun() -def jump_to_single_frame_view_button(display_number, timing_details): - if st.button(f"Jump to #{display_number}"): +def jump_to_single_frame_view_button(display_number, timing_details, src): + if st.button(f"Jump to #{display_number}", key=src): st.session_state['prev_frame_index'] = display_number st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid st.session_state['frame_styling_view_type'] = "Individual" diff --git a/ui_components/widgets/list_view.py b/ui_components/widgets/list_view.py index c7e20e8d..e89492d3 100644 --- a/ui_components/widgets/list_view.py +++ b/ui_components/widgets/list_view.py @@ -66,7 +66,7 @@ def styling_list_view(start_index, end_index, shift_frames_setting, project_uuid st.write("") - jump_to_single_frame_view_button(display_number,timing_details) + jump_to_single_frame_view_button(display_number,timing_details, 'styling_list_view_'+str(timing_details[i].uuid)) st.markdown("---") btn1, btn2, btn3 = st.columns([2, 1, 1]) diff --git a/ui_components/widgets/sidebar_logger.py b/ui_components/widgets/sidebar_logger.py index 2f757259..ee2079d8 100644 --- a/ui_components/widgets/sidebar_logger.py +++ b/ui_components/widgets/sidebar_logger.py @@ -51,8 +51,7 @@ def sidebar_logger(project_uuid): st.write("Total page count: ", total_page_count) # display_list = log_list[(page_number - 1) * items_per_page : page_number * items_per_page] - if log_list is not None: - + if log_list and len(log_list): file_list = data_repo.get_file_list_from_log_uuid_list([log.uuid for log in log_list]) log_file_dict = {} for file in file_list: @@ -100,12 +99,10 @@ def sidebar_logger(project_uuid): elif log.status == InferenceStatus.CANCELED.value: st.warning("Canceled") - if output_url: - if 'timing_uuid' in origin_data: - timing = data_repo.get_timing_from_uuid(origin_data['timing_uuid']) - if st.session_state['frame_styling_view_type'] != "Timeline": - if timing: - jump_to_single_frame_view_button(timing.aux_frame_index + 1, timing_details) + if output_url and 'timing_uuid' in origin_data: + timing = data_repo.get_timing_from_uuid(origin_data['timing_uuid']) + if timing and st.session_state['frame_styling_view_type'] != "Timeline": + jump_to_single_frame_view_button(timing.aux_frame_index + 1, timing_details, 'sidebar_'+str(log.uuid)) else: if st.session_state['frame_styling_view_type'] != "Explorer": diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index d6e32e7a..5f714591 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -32,7 +32,7 @@ def timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_sette if time_setter_toggle or duration_setter_toggle or replace_image_widget_toggle or move_frames_toggle or delete_frames_toggle or change_position_toggle: st.caption("--") - jump_to_single_frame_view_button(i + j + 1, timing_details) + jump_to_single_frame_view_button(i + j + 1, timing_details, 'timeline_btn_'+str(timing_details[i+j].uuid)) def timeline_view(project_uuid, stage): From def62b604906a4fa239bd4a647f519e8a0f5ce78 Mon Sep 17 00:00:00 2001 From: peter942 Date: Sat, 21 Oct 2023 05:09:49 +0200 Subject: [PATCH 104/164] Lil' fixes --- .../widgets/frame_movement_widgets.py | 11 ++++++++ .../widgets/style_explorer_element.py | 15 +++++++---- ui_components/widgets/timeline_view.py | 25 +++++++++++++++---- utils/st_memory.py | 16 +++++++++++- 4 files changed, 56 insertions(+), 11 deletions(-) diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index 2a52f551..3340d562 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -95,6 +95,17 @@ def delete_frame(timing_uuid): timed_clip_id=None ) + # If the frame being deleted is the first one + if timing.aux_frame_index == 0 and next_timing: + print("first frame") + print(next_timing.uuid) + print(timing.uuid) + data_repo.update_specific_timing( + next_timing.uuid, + start_time=0.0 + ) + + data_repo.delete_timing_from_uuid(timing.uuid) timing_details = data_repo.get_timing_list_from_project(project_uuid=project_uuid) diff --git a/ui_components/widgets/style_explorer_element.py b/ui_components/widgets/style_explorer_element.py index 4599b93e..43e1cbf0 100644 --- a/ui_components/widgets/style_explorer_element.py +++ b/ui_components/widgets/style_explorer_element.py @@ -7,6 +7,7 @@ from utils.constants import MLQueryObject from utils.data_repo.data_repo import DataRepo from shared.constants import AIModelType, InferenceType, InternalFileTag, InternalFileType, SortOrder +from utils import st_memory from utils.ml_processor.ml_interface import get_ml_client from utils.ml_processor.replicate.constants import REPLICATE_MODEL @@ -18,8 +19,11 @@ def style_explorer_element(project_uuid): project_settings = data_repo.get_project_setting(project_uuid) _, a2, a3,_= st.columns([0.5, 1, 0.5,0.5]) - prompt = a2.text_area("What's your base prompt?", key="prompt", help="This will be included at the beginning of each prompt") - base_prompt_position = a3.radio("Where would you like to place the base prompt?", options=["Beginning", "End"], key="base_prompt_position", help="This will be included at the beginning of each prompt") + with a2: + prompt = st_memory.text_area("What's your base prompt?", key="explorer_base_prompt", help="This will be included at the beginning of each prompt") + with a3: + st.write("") + base_prompt_position = st_memory.radio("Where would you like to place the base prompt?", options=["Beginning", "End"], key="base_prompt_position", help="This will be included at the beginning of each prompt") _, b2, b3, b4, b5, _ = st.columns([0.5, 1, 1, 1, 1, 0.5]) @@ -37,11 +41,11 @@ def style_explorer_element(project_uuid): _, c2, _ = st.columns([0.25, 1, 0.25]) with c2: - models_to_use = st.multiselect("Which models would you like to use?", model_name_list, key="models_to_use", default=model_name_list, help="It'll rotate through the models you select.") + models_to_use = st.multiselect("Which models would you like to use?", model_name_list, key="models_to_use", default=[model_name_list[0]], help="It'll rotate through the models you select.") _, d2, _ = st.columns([0.75, 1, 0.75]) with d2: - number_to_generate = st.slider("How many images would you like to generate?", min_value=1, max_value=100, value=10, step=1, key="number_to_generate", help="It'll generate 4 from each variation.") + number_to_generate = st.slider("How many images would you like to generate?", min_value=0, max_value=100, value=4, step=4, key="number_to_generate", help="It'll generate 4 from each variation.") _, e2, _ = st.columns([0.5, 1, 0.5]) if e2.button("Generate images", key="generate_images", use_container_width=True, type="primary"): @@ -161,7 +165,8 @@ def create_variate_option(column, key): label = key.replace('_', ' ').capitalize() variate_option = column.checkbox(f"Vary {label.lower()}", key=f"{key}_checkbox") if variate_option: - instructions = column.text_area(f"How would you like to vary the {label.lower()}?", key=f"{key}_textarea", help=f"It'll write a custom {label.lower()} prompt based on your instructions.") + with column: + instructions = st_memory.text_area(f"How would you like to vary the {label.lower()}?", key=f"{key}_textarea", help=f"It'll write a custom {label.lower()} prompt based on your instructions.") else: instructions = "" return instructions diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index d6e32e7a..fe6ba88b 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -5,8 +5,12 @@ from utils.data_repo.data_repo import DataRepo from ui_components.constants import WorkflowStageType from utils import st_memory +from ui_components.methods.file_methods import generate_pil_image +from ui_components.widgets.add_key_frame_element import add_key_frame -def timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle): + +def timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle, project_uuid): + data_repo = DataRepo() if time_setter_toggle: single_frame_time_selector(timing_details[i + j].uuid, 'motion', shift_frames=shift_frames_setting) if duration_setter_toggle: @@ -14,7 +18,7 @@ def timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_sette if replace_image_widget_toggle: replace_image_widget(timing_details[i + j].uuid, stage=WorkflowStageType.STYLED.value,options=["Uploaded Frame"]) - btn1, btn2, btn3 = st.columns([1, 1, 1]) + btn1, btn2, btn3, btn4 = st.columns([1, 1, 1, 1]) if move_frames_toggle: with btn1: @@ -22,8 +26,17 @@ def timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_sette with btn2: move_frame_forward_button(timing_details[i + j].uuid, "side-to-side") - if delete_frames_toggle: + if copy_frame_toggle: with btn3: + if st.button("🔁", key=f"copy_frame_{timing_details[i + j].uuid}"): + pil_image = generate_pil_image(timing_details[i + j].primary_image.location) + position_of_current_item = timing_details[i + j].aux_frame_index + add_key_frame(pil_image, False, 2.5, timing_details[i + j].aux_frame_index+1, refresh_state=False) + + st.rerun() + + if delete_frames_toggle: + with btn4: delete_frame_button(timing_details[i + j].uuid) if change_position_toggle: @@ -51,13 +64,14 @@ def timeline_view(project_uuid, stage): expand_all = st_memory.toggle("Expand All", key="expand_all",value=False) if expand_all: - time_setter_toggle = replace_image_widget_toggle = duration_setter_toggle = move_frames_toggle = delete_frames_toggle = change_position_toggle = True + time_setter_toggle = replace_image_widget_toggle = duration_setter_toggle = copy_frame_toggle = move_frames_toggle = delete_frames_toggle = change_position_toggle = True else: with col2: time_setter_toggle = st_memory.toggle("Time Setter", value=True, key="time_setter_toggle") delete_frames_toggle = st_memory.toggle("Delete Frames", value=True, key="delete_frames_toggle") duration_setter_toggle = st_memory.toggle("Duration Setter", value=False, key="duration_setter_toggle") + copy_frame_toggle = st_memory.toggle("Copy Frame", value=False, key="copy_frame_toggle") with col3: move_frames_toggle = st_memory.toggle("Move Frames", value=True, key="move_frames_toggle") replace_image_widget_toggle = st_memory.toggle("Replace Image", value=False, key="replace_image_widget_toggle") @@ -66,6 +80,7 @@ def timeline_view(project_uuid, stage): with header_col_3: items_per_row = st_memory.slider("How many frames per row?", min_value=1, max_value=10, value=5, step=1, key="items_per_row_slider") + st.markdown("***") total_count = len(timing_details) for i in range(0, total_count, items_per_row): # Step of items_per_row for grid grid = st.columns(items_per_row) # Create items_per_row columns for grid @@ -81,6 +96,6 @@ def timeline_view(project_uuid, stage): else: st.error("No video found for this frame.") with st.expander(f'Frame #{display_number}', True): - timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle) + timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle, project_uuid) diff --git a/utils/st_memory.py b/utils/st_memory.py index 41b5f622..c9171b24 100644 --- a/utils/st_memory.py +++ b/utils/st_memory.py @@ -104,4 +104,18 @@ def menu(menu_title,options, icons=None, menu_icon=None, default_index=0, key=No st.session_state[key] = options.index(selection) st.rerun() - return selection \ No newline at end of file + return selection + +def text_area(label, value='', height=None, max_chars=None, key=None, help=None, on_change=None, args=None, kwargs=None, *, disabled=False, label_visibility="visible"): + + if key not in st.session_state: + st.session_state[key] = value + + selection = st.text_area(label=label, value=st.session_state[key], height=height, max_chars=max_chars, help=help, on_change=on_change, disabled=disabled, label_visibility=label_visibility) + + if selection != st.session_state[key]: + st.session_state[key] = selection + st.rerun() + + return selection + From a177c1dfbec2b78331a7d0e670f8a46c3041c2c9 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 21 Oct 2023 11:56:18 +0530 Subject: [PATCH 105/164] add to timeline fix --- ui_components/widgets/style_explorer_element.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/ui_components/widgets/style_explorer_element.py b/ui_components/widgets/style_explorer_element.py index 4599b93e..aea1e452 100644 --- a/ui_components/widgets/style_explorer_element.py +++ b/ui_components/widgets/style_explorer_element.py @@ -116,10 +116,7 @@ def style_explorer_element(project_uuid): st.rerun() total_image_count = res_payload['count'] - if gallery_image_list and len(gallery_image_list): - - start_index = 0 end_index = min(start_index + num_items_per_page, total_image_count) @@ -142,8 +139,8 @@ def style_explorer_element(project_uuid): st.warning("No data found") else: st.warning("No data found") - - if st.button(f"Add to timeline", key=f"Promote Variant #{(page_number - 1) * num_items_per_page + i + j + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): + + if st.button(f"Add to timeline", key=f"{gallery_image_list[i + j].uuid}", help="Promote this variant to the primary image", use_container_width=True): pil_image = generate_pil_image(gallery_image_list[i + j].location) add_key_frame(pil_image, False, 2.5, len(data_repo.get_timing_list_from_project(project_uuid)), refresh_state=False) From 030eadf95cf2f9f6996364538415fc063a7a9bf7 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 21 Oct 2023 12:22:23 +0530 Subject: [PATCH 106/164] seed fix --- utils/ml_processor/replicate/utils.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/utils/ml_processor/replicate/utils.py b/utils/ml_processor/replicate/utils.py index ae4a752a..fa1bac54 100644 --- a/utils/ml_processor/replicate/utils.py +++ b/utils/ml_processor/replicate/utils.py @@ -129,7 +129,7 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): 'width': query_obj.width, 'height': query_obj.height, 'steps': query_obj.num_inference_steps, - 'seed': query_obj.seed + 'seed': query_obj.seed if query_obj.seed not in [-1, 0] else 0 } elif model == REPLICATE_MODEL.deliberate_v3 or model == REPLICATE_MODEL.dreamshaper_v7 or model == REPLICATE_MODEL.epicrealism_v5: data = { @@ -140,9 +140,13 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): 'prompt_strength': query_obj.strength, 'guidance_scale': query_obj.guidance_scale, 'num_inference_steps': query_obj.num_inference_steps, - 'safety_checker': False + 'safety_checker': False, + 'seed': query_obj.seed } + if query_obj.seed in [-1, 0]: + del data['seed'] + if input_image: data['image'] = input_image if mask: From ed8605ed602314718ef538a9f00657d1d14ce75d Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 21 Oct 2023 14:39:54 +0530 Subject: [PATCH 107/164] inpainting fix --- ui_components/methods/common_methods.py | 52 +++++++++------------ ui_components/methods/file_methods.py | 15 ++++-- ui_components/methods/ml_methods.py | 15 +++--- ui_components/widgets/inpainting_element.py | 18 +++---- utils/cache/cache_methods.py | 11 ++++- utils/ml_processor/replicate/constants.py | 2 +- utils/ml_processor/replicate/replicate.py | 2 +- 7 files changed, 61 insertions(+), 54 deletions(-) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index f1527be8..7f02d447 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -473,12 +473,12 @@ def replace_background(project_uuid, background_image) -> InternalFileObject: else: background_image = Image.open(f"{background_image}") - if SERVER == ServerType.DEVELOPMENT.value: - foreground_image = Image.open(SECOND_MASK_FILE_PATH) - else: - path = project.get_temp_mask_file(SECOND_MASK_FILE).location + path = project.get_temp_mask_file(SECOND_MASK_FILE).location + if path.startswith('http'): response = r.get(path) foreground_image = Image.open(BytesIO(response.content)) + else: + foreground_image = Image.open(path) background_image.paste(foreground_image, (0, 0), foreground_image) filename = str(uuid.uuid4()) + ".png" @@ -652,19 +652,18 @@ def execute_image_edit(type_of_mask_selection, type_of_mask_replacement, response = r.get(removed_background) img = Image.open(BytesIO(response.content)) hosted_url = save_or_host_file(img, SECOND_MASK_FILE_PATH) - if hosted_url: - add_temp_file_to_project(project.uuid, SECOND_MASK_FILE, hosted_url) + add_temp_file_to_project(project.uuid, SECOND_MASK_FILE, hosted_url or SECOND_MASK_FILE_PATH) if type_of_mask_replacement == "Replace With Image": edited_image = replace_background(project.uuid, background_image) elif type_of_mask_replacement == "Inpainting": - if SERVER == ServerType.DEVELOPMENT.value: - image = Image.open(SECOND_MASK_FILE_PATH) - else: - path = project.get_temp_mask_file(SECOND_MASK_FILE).location + path = project.get_temp_mask_file(SECOND_MASK_FILE).location + if path.startswith("http"): response = r.get(path) image = Image.open(BytesIO(response.content)) + else: + image = Image.open(path) converted_image = Image.new("RGB", image.size, (255, 255, 255)) for x in range(image.width): @@ -705,10 +704,9 @@ def execute_image_edit(type_of_mask_selection, type_of_mask_replacement, result_img.putpixel((x, y), bg_img.getpixel((x, y))) hosted_manual_bg_url = save_or_host_file(result_img, SECOND_MASK_FILE_PATH) - if hosted_manual_bg_url: - add_temp_file_to_project(project.uuid, SECOND_MASK_FILE, hosted_manual_bg_url) - edited_image = replace_background( - project.uuid, SECOND_MASK_FILE_PATH, background_image) + add_temp_file_to_project(project.uuid, SECOND_MASK_FILE, hosted_manual_bg_url or SECOND_MASK_FILE_PATH) + edited_image = replace_background(project.uuid, SECOND_MASK_FILE_PATH, background_image) + elif type_of_mask_replacement == "Inpainting": mask_location = timing.mask.location if mask_location.startswith("http"): @@ -739,10 +737,9 @@ def execute_image_edit(type_of_mask_selection, type_of_mask_replacement, masked_img = Image.composite(bg_img, Image.new( 'RGBA', bg_img.size, (0, 0, 0, 0)), mask) hosted_automated_bg_url = save_or_host_file(result_img, SECOND_MASK_FILE_PATH) - if hosted_automated_bg_url: - add_temp_file_to_project(project.uuid, SECOND_MASK_FILE, hosted_automated_bg_url) - edited_image = replace_background( - project.uuid, SECOND_MASK_FILE_PATH, background_image) + add_temp_file_to_project(project.uuid, SECOND_MASK_FILE, hosted_automated_bg_url or SECOND_MASK_FILE_PATH) + edited_image = replace_background(project.uuid, SECOND_MASK_FILE_PATH, background_image) + elif type_of_mask_replacement == "Inpainting": edited_image = inpainting( editing_image, prompt, negative_prompt, timing_uuid, True) @@ -760,14 +757,11 @@ def execute_image_edit(type_of_mask_selection, type_of_mask_replacement, bg_img = Image.open(BytesIO(response.content)).convert('RGBA') else: bg_img = Image.open(editing_image).convert('RGBA') - masked_img = Image.composite(bg_img, Image.new( - 'RGBA', bg_img.size, (0, 0, 0, 0)), mask) - hosted_image_replace_url = save_or_host_file(result_img, SECOND_MASK_FILE_PATH) - if hosted_image_replace_url: - add_temp_file_to_project(project.uuid, SECOND_MASK_FILE, hosted_image_replace_url) - edited_image = replace_background( - project.uuid, SECOND_MASK_FILE_PATH, background_image) + hosted_image_replace_url = save_or_host_file(result_img, SECOND_MASK_FILE_PATH) + add_temp_file_to_project(project.uuid, SECOND_MASK_FILE, hosted_image_replace_url or SECOND_MASK_FILE_PATH) + edited_image = replace_background(project.uuid, SECOND_MASK_FILE_PATH, background_image) + elif type_of_mask_replacement == "Inpainting": edited_image = inpainting( editing_image, prompt, negative_prompt, timing_uuid, True) @@ -790,11 +784,9 @@ def execute_image_edit(type_of_mask_selection, type_of_mask_replacement, 'RGBA', bg_img.size, (0, 0, 0, 0)), inverted_mask) # TODO: standardise temproray fixes hosted_prvious_invert_url = save_or_host_file(result_img, SECOND_MASK_FILE_PATH) - if hosted_prvious_invert_url: - add_temp_file_to_project(project.uuid, SECOND_MASK_FILE, hosted_prvious_invert_url) - - edited_image = replace_background( - project.uuid, SECOND_MASK_FILE_PATH, background_image) + add_temp_file_to_project(project.uuid, SECOND_MASK_FILE, hosted_prvious_invert_url or SECOND_MASK_FILE_PATH) + edited_image = replace_background(project.uuid, SECOND_MASK_FILE_PATH, background_image) + elif type_of_mask_replacement == "Inpainting": edited_image = inpainting( editing_image, prompt, negative_prompt, timing_uuid, False) diff --git a/ui_components/methods/file_methods.py b/ui_components/methods/file_methods.py index c76e0634..231b9177 100644 --- a/ui_components/methods/file_methods.py +++ b/ui_components/methods/file_methods.py @@ -46,6 +46,9 @@ def save_or_host_file(file, path, mime_type='image/png', dim=None): return uploaded_url def zoom_and_crop(file, width, height): + if file.width == width and file.height == height: + return file + # scaling s_x = width / file.width s_y = height / file.height @@ -65,7 +68,7 @@ def zoom_and_crop(file, width, height): # resizes file dimensions to current project_settings def normalize_size_internal_file_obj(file_obj: InternalFileObject): - if not file_obj or file_obj.type != InternalFileType.IMAGE.value: + if not file_obj or file_obj.type != InternalFileType.IMAGE.value or not file_obj.project: return file_obj data_repo = DataRepo() @@ -93,16 +96,20 @@ def save_or_host_file_bytes(video_bytes, path, ext=".mp4"): return uploaded_url -def add_temp_file_to_project(project_uuid, key, hosted_url): +def add_temp_file_to_project(project_uuid, key, file_path): data_repo = DataRepo() file_data = { "name": str(uuid.uuid4()) + ".png", "type": InternalFileType.IMAGE.value, - "project_id": project_uuid, - 'hosted_url': hosted_url + "project_id": project_uuid } + if file_path.startswith('http'): + file_data.update({'hosted_url': file_path}) + else: + file_data.update({'local_path': file_path}) + temp_file = data_repo.create_file(**file_data) project = data_repo.get_project_from_uuid(project_uuid) temp_file_list = project.project_temp_file_list diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index 642ae498..de1daf18 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -199,11 +199,7 @@ def inpainting(input_image: str, prompt, negative_prompt, timing_uuid, invert_ma if pass_mask == False: mask = timing.mask.location else: - # TODO: store the local temp files in the db too - if SERVER != ServerType.DEVELOPMENT.value: - mask = timing.project.get_temp_mask_file(TEMP_MASK_FILE).location - else: - mask = MASK_IMG_LOCAL_PATH + mask = timing.project.get_temp_mask_file(TEMP_MASK_FILE).location if not mask.startswith("http"): mask = open(mask, "rb") @@ -212,11 +208,16 @@ def inpainting(input_image: str, prompt, negative_prompt, timing_uuid, invert_ma input_image = open(input_image, "rb") ml_client = get_ml_client() - output, log = ml_client.predict_model_output(REPLICATE_MODEL.andreas_sd_inpainting, mask=mask, image=input_image, prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=25, strength=1.0) + output, log = ml_client.predict_model_output(REPLICATE_MODEL.sdxl_inpainting, mask=mask, image=input_image, prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=25, strength=1.0) file_name = str(uuid.uuid4()) + ".png" image_file = data_repo.create_file( - name=file_name, type=InternalFileType.IMAGE.value, hosted_url=output[0], inference_log_id=log.uuid) + name=file_name, + type=InternalFileType.IMAGE.value, + hosted_url=output[0] if isinstance(output, list) else output, + inference_log_id=log.uuid, + project_id=timing.project.uuid + ) return image_file diff --git a/ui_components/widgets/inpainting_element.py b/ui_components/widgets/inpainting_element.py index 1f1bef1f..13207a3d 100644 --- a/ui_components/widgets/inpainting_element.py +++ b/ui_components/widgets/inpainting_element.py @@ -4,7 +4,7 @@ from io import BytesIO from typing import List import requests as r -from PIL import Image +from PIL import Image, ImageOps import streamlit as st from streamlit_drawable_canvas import st_canvas from ui_components.constants import CROPPED_IMG_LOCAL_PATH, MASK_IMG_LOCAL_PATH, TEMP_MASK_FILE, WorkflowStageType @@ -127,9 +127,7 @@ def inpainting_element(timing_uuid): stroke_width = 3 with main_col_2: - realtime_update = True - canvas_result = st_canvas( fill_color="rgba(0, 0, 0)", stroke_width=stroke_width, @@ -149,10 +147,12 @@ def inpainting_element(timing_uuid): if canvas_result.image_data is not None: img_data = canvas_result.image_data - im = Image.fromarray( - img_data.astype("uint8"), mode="RGBA") - create_or_update_mask( - st.session_state['current_frame_uuid'], im) + im = Image.fromarray(img_data.astype("uint8"), mode="RGBA") + im_rgb = Image.new("RGB", im.size, (255, 255, 255)) + im_rgb.paste(im, mask=im.split()[3]) + im = im_rgb + im = ImageOps.invert(im) # inverting for sdxl inpainting + create_or_update_mask(st.session_state['current_frame_uuid'], im) else: image_file = data_repo.get_file_from_uuid(st.session_state['edited_image']) image_comparison( @@ -389,10 +389,10 @@ def inpaint_in_black_space_element(cropped_img, project_uuid, stage=WorkflowStag # Otherwise, make the pixel white in the new image else: mask.putpixel((x, y), (255, 255, 255)) # White + # Save the mask image hosted_url = save_or_host_file(mask, MASK_IMG_LOCAL_PATH) - if hosted_url: - add_temp_file_to_project(project_uuid, TEMP_MASK_FILE, hosted_url) + add_temp_file_to_project(project_uuid, TEMP_MASK_FILE, hosted_url or MASK_IMG_LOCAL_PATH) cropped_img_path = hosted_cropped_img_path if hosted_cropped_img_path else CROPPED_IMG_LOCAL_PATH inpainted_file = inpainting(cropped_img_path, inpaint_prompt, diff --git a/utils/cache/cache_methods.py b/utils/cache/cache_methods.py index c7380129..55891d64 100644 --- a/utils/cache/cache_methods.py +++ b/utils/cache/cache_methods.py @@ -1,7 +1,5 @@ from shared.logging.logging import AppLogger from utils.cache.cache import CacheKey, StCache -import streamlit as st - logger = AppLogger() @@ -299,6 +297,15 @@ def _cache_update_specific_timing(self, *args, **kwargs): if status: StCache.delete_all(CacheKey.TIMING_DETAILS.value) + + # updating the timing list + timing_func = getattr(cls, '_original_get_timing_from_uuid') + timing = timing_func(self, args[0]) + if timing and timing.project: + original_func = getattr(cls, '_original_get_timing_list_from_project') + timing_list = original_func(self, timing.project.uuid) + if timing_list and len(timing_list): + StCache.add_all(timing_list, CacheKey.TIMING_DETAILS.value) setattr(cls, '_original_update_specific_timing', cls.update_specific_timing) setattr(cls, "update_specific_timing", _cache_update_specific_timing) diff --git a/utils/ml_processor/replicate/constants.py b/utils/ml_processor/replicate/constants.py index 4e1f5b22..40fb1b58 100644 --- a/utils/ml_processor/replicate/constants.py +++ b/utils/ml_processor/replicate/constants.py @@ -9,7 +9,7 @@ class ReplicateModel: version: str class REPLICATE_MODEL: - andreas_sd_inpainting = ReplicateModel("lucataco/sdxl-inpainting", "f03c01943bacdee38d6a5d216586bf9bfbfd799350aed263aa32980efc173f0b") + sdxl_inpainting = ReplicateModel("lucataco/sdxl-inpainting", "f03c01943bacdee38d6a5d216586bf9bfbfd799350aed263aa32980efc173f0b") clones_lora_training = ReplicateModel("cloneofsimo/lora-training", "b2a308762e36ac48d16bfadc03a65493fe6e799f429f7941639a6acec5b276cc") clones_lora_training_2 = ReplicateModel("cloneofsimo/lora", "fce477182f407ffd66b94b08e761424cabd13b82b518754b83080bc75ad32466") google_frame_interpolation = ReplicateModel("google-research/frame-interpolation", "4f88a16a13673a8b589c18866e540556170a5bcb2ccdc12de556e800e9456d3d") diff --git a/utils/ml_processor/replicate/replicate.py b/utils/ml_processor/replicate/replicate.py index 353a2bab..43d7701c 100644 --- a/utils/ml_processor/replicate/replicate.py +++ b/utils/ml_processor/replicate/replicate.py @@ -184,7 +184,7 @@ async def _async_model_prediction(self, replicate_model: ReplicateModel, **kwarg @check_user_credits def inpainting(self, video_name, input_image, prompt, negative_prompt): - model = self.get_model(REPLICATE_MODEL.andreas_sd_inpainting) + model = self.get_model(REPLICATE_MODEL.sdxl_inpainting) mask = "mask.png" mask = upload_file("mask.png", self.app_settings['aws_access_key'], self.app_settings['aws_secret_key']) From 7961c47e7c7efd26adcaed81f73c7ff32292ac1f Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 21 Oct 2023 16:45:39 +0530 Subject: [PATCH 108/164] async inpainting fix --- banodoco_runner.py | 3 +- shared/constants.py | 1 + ui_components/methods/common_methods.py | 85 +++++++++++---------- ui_components/methods/ml_methods.py | 27 ++++--- ui_components/widgets/inpainting_element.py | 63 ++++++++------- utils/data_repo/data_repo.py | 2 +- 6 files changed, 94 insertions(+), 87 deletions(-) diff --git a/banodoco_runner.py b/banodoco_runner.py index fa503a9b..0ebc844e 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -104,7 +104,8 @@ def check_and_update_db(): if origin_data['inference_type'] in [InferenceType.FRAME_INTERPOLATION.value, \ InferenceType.FRAME_TIMING_IMAGE_INFERENCE.value, \ - InferenceType.SINGLE_PREVIEW_VIDEO.value]: + InferenceType.SINGLE_PREVIEW_VIDEO.value, \ + InferenceType.FRAME_INPAINTING.value]: if str(log.project.uuid) not in timing_update_list: timing_update_list[str(log.project.uuid)] = [] timing_update_list[str(log.project.uuid)].append(origin_data['timing_uuid']) diff --git a/shared/constants.py b/shared/constants.py index 9b898311..403e13e6 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -80,6 +80,7 @@ class InferenceType(ExtendedEnum): SINGLE_PREVIEW_VIDEO = "single_preview_video" # for generating a single preview video FRAME_INTERPOLATION = "frame_interpolation" # for generating single/multiple interpolated videos GALLERY_IMAGE_GENERATION = "gallery_image_generation" # for generating gallery images + FRAME_INPAINTING = "frame_inpainting" # for generating inpainted frames class InferenceStatus(ExtendedEnum): QUEUED = "queued" diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 7f02d447..85f62b99 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -463,22 +463,13 @@ def convert_image_list_to_file_list(image_list): file_list.append(image_file) return file_list -def replace_background(project_uuid, background_image) -> InternalFileObject: +def replace_background(project_uuid, bg_img_loc) -> InternalFileObject: data_repo = DataRepo() project = data_repo.get_project_from_uuid(project_uuid) - - if background_image.startswith("http"): - response = r.get(background_image) - background_image = Image.open(BytesIO(response.content)) - else: - background_image = Image.open(f"{background_image}") + background_image = generate_pil_image(bg_img_loc) path = project.get_temp_mask_file(SECOND_MASK_FILE).location - if path.startswith('http'): - response = r.get(path) - foreground_image = Image.open(BytesIO(response.content)) - else: - foreground_image = Image.open(path) + foreground_image = generate_pil_image(path) background_image.paste(foreground_image, (0, 0), foreground_image) filename = str(uuid.uuid4()) + ".png" @@ -640,12 +631,13 @@ def save_audio_file(uploaded_file, project_uuid): def execute_image_edit(type_of_mask_selection, type_of_mask_replacement, background_image, editing_image, prompt, negative_prompt, - width, height, layer, timing_uuid) -> InternalFileObject: + width, height, layer, timing_uuid): from ui_components.methods.ml_methods import inpainting, remove_background, create_depth_mask_image data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) project = timing.project + inference_log = None if type_of_mask_selection == "Automated Background Selection": removed_background = remove_background(editing_image) @@ -679,18 +671,8 @@ def execute_image_edit(type_of_mask_selection, type_of_mask_replacement, elif type_of_mask_selection == "Manual Background Selection": if type_of_mask_replacement == "Replace With Image": - if editing_image.startswith("http"): - response = r.get(editing_image) - bg_img = Image.open(BytesIO(response.content)) - else: - bg_img = Image.open(editing_image) - - mask_location = timing.mask.location - if mask_location.startswith("http"): - response = r.get(mask_location) - mask_img = Image.open(BytesIO(response.content)) - else: - mask_img = Image.open(mask_location) + bg_img = generate_pil_image(editing_image) + mask_img = generate_pil_image(timing.mask.location) # TODO: fix this logic, if the uploaded image and the image to be editted are of different sizes then # this code will cause issues @@ -698,28 +680,19 @@ def execute_image_edit(type_of_mask_selection, type_of_mask_replacement, for x in range(bg_img.size[0]): for y in range(bg_img.size[1]): if x < mask_img.size[0] and y < mask_img.size[1]: - if mask_img.getpixel((x, y)) == (0, 0, 0, 255): + if mask_img.getpixel((x, y)) == (0, 0, 0) or mask_img.getpixel((x, y)) == (0, 0, 0, 255): result_img.putpixel((x, y), (255, 255, 255, 0)) else: result_img.putpixel((x, y), bg_img.getpixel((x, y))) hosted_manual_bg_url = save_or_host_file(result_img, SECOND_MASK_FILE_PATH) add_temp_file_to_project(project.uuid, SECOND_MASK_FILE, hosted_manual_bg_url or SECOND_MASK_FILE_PATH) - edited_image = replace_background(project.uuid, SECOND_MASK_FILE_PATH, background_image) + edited_image = replace_background(project.uuid, background_image) elif type_of_mask_replacement == "Inpainting": - mask_location = timing.mask.location - if mask_location.startswith("http"): - response = r.get(mask_location) - im = Image.open(BytesIO(response.content)) - else: - im = Image.open(mask_location) - if "A" in im.getbands(): - mask = Image.new('RGB', (width, height), color=(255, 255, 255)) - mask.paste(im, (0, 0), im) - create_or_update_mask(timing.uuid, mask) - edited_image = inpainting( - editing_image, prompt, negative_prompt, timing_uuid, True) + edited_image, log = inpainting(editing_image, prompt, negative_prompt, timing_uuid, False) + inference_log = log + elif type_of_mask_selection == "Automated Layer Selection": mask_location = create_depth_mask_image( editing_image, layer, timing.uuid) @@ -734,8 +707,7 @@ def execute_image_edit(type_of_mask_selection, type_of_mask_replacement, bg_img = Image.open(BytesIO(response.content)).convert('RGBA') else: bg_img = Image.open(editing_image).convert('RGBA') - masked_img = Image.composite(bg_img, Image.new( - 'RGBA', bg_img.size, (0, 0, 0, 0)), mask) + hosted_automated_bg_url = save_or_host_file(result_img, SECOND_MASK_FILE_PATH) add_temp_file_to_project(project.uuid, SECOND_MASK_FILE, hosted_automated_bg_url or SECOND_MASK_FILE_PATH) edited_image = replace_background(project.uuid, SECOND_MASK_FILE_PATH, background_image) @@ -791,7 +763,7 @@ def execute_image_edit(type_of_mask_selection, type_of_mask_replacement, edited_image = inpainting( editing_image, prompt, negative_prompt, timing_uuid, False) - return edited_image + return edited_image, inference_log # if the output is present it adds it to the respective place or else it updates the inference log @@ -935,6 +907,35 @@ def process_inference_output(**kwargs): del kwargs['log_uuid'] data_repo.update_inference_log_origin_data(log_uuid, **kwargs) + # --------------------- FRAME INPAINTING ------------------------ + elif inference_type == InferenceType.FRAME_INPAINTING.value: + output = kwargs.get('output') + log_uuid = kwargs.get('log_uuid') + + if output: + stage = kwargs.get('stage', WorkflowStageType.STYLED.value) + promote = kwargs.get('promote_generation', False) + current_frame_uuid = kwargs.get('timing_uuid') + timing = data_repo.get_timing_from_uuid(current_frame_uuid) + + file_name = str(uuid.uuid4()) + ".png" + output_file = data_repo.create_file( + name=file_name, + type=InternalFileType.IMAGE.value, + hosted_url=output[0] if isinstance(output, list) else output, + inference_log_id=str(log_uuid), + project_id=timing.project.uuid + ) + + if stage == WorkflowStageType.SOURCE.value: + data_repo.update_specific_timing(current_frame_uuid, source_image_id=output_file.uuid) + elif stage == WorkflowStageType.STYLED.value: + number_of_image_variants = add_image_variant(output_file.uuid, current_frame_uuid) + if promote: + promote_image_variant(current_frame_uuid, number_of_image_variants - 1) + else: + del kwargs['log_uuid'] + data_repo.update_inference_log_origin_data(log_uuid, **kwargs) return True diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index de1daf18..8b9424d5 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -191,12 +191,11 @@ def facial_expression_recognition(input_image): emotion = (f"neutral expression") return emotion -def inpainting(input_image: str, prompt, negative_prompt, timing_uuid, invert_mask, pass_mask=False) -> InternalFileObject: +def inpainting(input_image: str, prompt, negative_prompt, timing_uuid, mask_in_project=False) -> InternalFileObject: data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - if pass_mask == False: + if mask_in_project == False: mask = timing.mask.location else: mask = timing.project.get_temp_mask_file(TEMP_MASK_FILE).location @@ -208,18 +207,18 @@ def inpainting(input_image: str, prompt, negative_prompt, timing_uuid, invert_ma input_image = open(input_image, "rb") ml_client = get_ml_client() - output, log = ml_client.predict_model_output(REPLICATE_MODEL.sdxl_inpainting, mask=mask, image=input_image, prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=25, strength=1.0) - - file_name = str(uuid.uuid4()) + ".png" - image_file = data_repo.create_file( - name=file_name, - type=InternalFileType.IMAGE.value, - hosted_url=output[0] if isinstance(output, list) else output, - inference_log_id=log.uuid, - project_id=timing.project.uuid + output, log = ml_client.predict_model_output( + REPLICATE_MODEL.sdxl_inpainting, + mask=mask, + image=input_image, + prompt=prompt, + negative_prompt=negative_prompt, + num_inference_steps=25, + strength=1.0, + queue_inference=True ) - return image_file + return output, log def remove_background(input_image): if not input_image.startswith("http"): diff --git a/ui_components/widgets/inpainting_element.py b/ui_components/widgets/inpainting_element.py index 13207a3d..7a505b77 100644 --- a/ui_components/widgets/inpainting_element.py +++ b/ui_components/widgets/inpainting_element.py @@ -7,6 +7,7 @@ from PIL import Image, ImageOps import streamlit as st from streamlit_drawable_canvas import st_canvas +from shared.constants import InferenceType from ui_components.constants import CROPPED_IMG_LOCAL_PATH, MASK_IMG_LOCAL_PATH, TEMP_MASK_FILE, WorkflowStageType from ui_components.methods.file_methods import add_temp_file_to_project, save_or_host_file from utils.data_repo.data_repo import DataRepo @@ -14,7 +15,7 @@ from utils import st_memory from utils.data_repo.data_repo import DataRepo from utils import st_memory -from ui_components.methods.common_methods import add_image_variant, execute_image_edit, create_or_update_mask, promote_image_variant +from ui_components.methods.common_methods import add_image_variant, execute_image_edit, create_or_update_mask, process_inference_output, promote_image_variant from ui_components.models import InternalFrameTimingObject, InternalSettingObject from streamlit_image_comparison import image_comparison @@ -237,17 +238,15 @@ def inpainting_element(timing_uuid): f.write(uploaded_file.getbuffer()) st.success( "Your backgrounds are uploaded file - they should appear in the dropdown.") - background_list.append( - uploaded_file.name) + background_list.append(uploaded_file.name) time.sleep(1.5) st.rerun() with btn2: - background_selection = st.selectbox( - "Range background", background_list) + background_selection = st.selectbox("Range background", background_list) background_image = f'videos/{timing.project.uuid}/assets/resources/backgrounds/{background_selection}' if background_list != []: - st.image(f"{background_image}", - use_column_width=True) + st.image(f"{background_image}", use_column_width=True) + elif source_of_image == "From Other Frame": btn1, btn2 = st.columns([1, 1]) with btn1: @@ -278,14 +277,23 @@ def inpainting_element(timing_uuid): with edit1: if st.button(f'Run Edit On Current Image'): if st.session_state["type_of_mask_replacement"] == "Inpainting": - edited_image = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], + edited_image, log = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], "", editing_image, prompt, negative_prompt, width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) - st.session_state['edited_image'] = edited_image.uuid + elif st.session_state["type_of_mask_replacement"] == "Replace With Image": - edited_image = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], + edited_image, log = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], background_image, editing_image, "", "", width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) - st.session_state['edited_image'] = edited_image.uuid - st.rerun() + + inference_data = { + "inference_type": InferenceType.FRAME_INPAINTING.value, + "output": edited_image, + "log_uuid": log.uuid, + "timing_uuid": st.session_state['current_frame_uuid'], + "promote_generation": False, + "stage": stage + } + + process_inference_output(**inference_data) with edit2: if st.session_state['edited_image'] != "": @@ -303,26 +311,23 @@ def inpainting_element(timing_uuid): else: if st.button("Run Edit & Promote"): if st.session_state["type_of_mask_replacement"] == "Inpainting": - edited_image = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], + edited_image, log = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], "", editing_image, prompt, negative_prompt, width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) - st.session_state['edited_image'] = edited_image.uuid + elif st.session_state["type_of_mask_replacement"] == "Replace With Image": - edited_image = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], + edited_image, log = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], background_image, editing_image, "", "", width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) - st.session_state['edited_image'] = edited_image.uuid - - if stage == WorkflowStageType.SOURCE.value: - data_repo.update_specific_timing( - st.session_state['current_frame_uuid'], source_image_id=st.session_state['edited_image']) - elif stage == WorkflowStageType.STYLED.value: - number_of_image_variants = add_image_variant( - edited_image.uuid, st.session_state['current_frame_uuid']) - promote_image_variant( - st.session_state['current_frame_uuid'], number_of_image_variants - 1) - - st.session_state['edited_image'] = "" - st.success("Image promoted!") - st.rerun() + + inference_data = { + "inference_type": InferenceType.FRAME_INPAINTING.value, + "output": edited_image, + "log_uuid": log.uuid, + "timing_uuid": st.session_state['current_frame_uuid'], + "promote_generation": True, + "stage": stage + } + + process_inference_output(**inference_data) # cropped_img here is a PIL image object diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 98ce2247..3d5c41eb 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -243,7 +243,7 @@ def delete_ai_model(self, uuid): # timing - def get_timing_from_uuid(self, uuid, invalidate_cache=False): + def get_timing_from_uuid(self, uuid, **kwargs): timing = self.db_repo.get_timing_from_uuid(uuid).data['data'] return InternalFrameTimingObject(**timing) if timing else None From 514fea17563625c1134cebe2ecd0ee79435e4f1e Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 21 Oct 2023 17:04:42 +0530 Subject: [PATCH 109/164] inapainting log fixed --- ui_components/methods/data_logger.py | 6 +++++- ui_components/widgets/inpainting_element.py | 10 +++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/ui_components/methods/data_logger.py b/ui_components/methods/data_logger.py index 72f4ffcb..8c54c070 100644 --- a/ui_components/methods/data_logger.py +++ b/ui_components/methods/data_logger.py @@ -6,7 +6,7 @@ from shared.logging.logging import AppLogger from utils.data_repo.data_repo import DataRepo -from utils.ml_processor.replicate.constants import ReplicateModel +from utils.ml_processor.replicate.constants import REPLICATE_MODEL, ReplicateModel def log_model_inference(model: ReplicateModel, time_taken, **kwargs): @@ -38,6 +38,10 @@ def log_model_inference(model: ReplicateModel, time_taken, **kwargs): data_repo = DataRepo() ai_model = data_repo.get_ai_model_from_name(model.name) + # hackish sol for insuring that inpainting logs don't have an empty model field + if ai_model is None and model.name == REPLICATE_MODEL.sdxl_inpainting.name: + ai_model = data_repo.get_ai_model_from_name(REPLICATE_MODEL.sdxl.name) + log_data = { "project_id" : st.session_state["project_uuid"], "model_id" : ai_model.uuid if ai_model else None, diff --git a/ui_components/widgets/inpainting_element.py b/ui_components/widgets/inpainting_element.py index 7a505b77..120409cb 100644 --- a/ui_components/widgets/inpainting_element.py +++ b/ui_components/widgets/inpainting_element.py @@ -3,6 +3,7 @@ import time from io import BytesIO from typing import List +import numpy as np import requests as r from PIL import Image, ImageOps import streamlit as st @@ -146,7 +147,9 @@ def inpainting_element(timing_uuid): if 'image_created' not in st.session_state: st.session_state['image_created'] = 'no' - if canvas_result.image_data is not None: + is_completely_transparent = np.all(canvas_result.image_data[:, :, 3] == 0) + + if not is_completely_transparent: img_data = canvas_result.image_data im = Image.fromarray(img_data.astype("uint8"), mode="RGBA") im_rgb = Image.new("RGB", im.size, (255, 255, 255)) @@ -154,6 +157,11 @@ def inpainting_element(timing_uuid): im = im_rgb im = ImageOps.invert(im) # inverting for sdxl inpainting create_or_update_mask(st.session_state['current_frame_uuid'], im) + + if st.button("Reset Canvas"): + cava + st.session_state['edited_image'] = "" + st.rerun() else: image_file = data_repo.get_file_from_uuid(st.session_state['edited_image']) image_comparison( From a7017045bf795526b69c17243ccdcfd1229691ee Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 21 Oct 2023 17:05:26 +0530 Subject: [PATCH 110/164] minor fix --- ui_components/widgets/inpainting_element.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/ui_components/widgets/inpainting_element.py b/ui_components/widgets/inpainting_element.py index 120409cb..b7b2b51f 100644 --- a/ui_components/widgets/inpainting_element.py +++ b/ui_components/widgets/inpainting_element.py @@ -157,11 +157,6 @@ def inpainting_element(timing_uuid): im = im_rgb im = ImageOps.invert(im) # inverting for sdxl inpainting create_or_update_mask(st.session_state['current_frame_uuid'], im) - - if st.button("Reset Canvas"): - cava - st.session_state['edited_image'] = "" - st.rerun() else: image_file = data_repo.get_file_from_uuid(st.session_state['edited_image']) image_comparison( From 94b695dced310733783360fc2b2097a52724c04d Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 21 Oct 2023 21:37:17 +0530 Subject: [PATCH 111/164] replace inpainted area with img fixed --- shared/constants.py | 1 + ui_components/methods/common_methods.py | 10 +- ui_components/models.py | 12 +- ui_components/widgets/inpainting_element.py | 171 ++++++++++++-------- 4 files changed, 118 insertions(+), 76 deletions(-) diff --git a/shared/constants.py b/shared/constants.py index 403e13e6..a3d60749 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -97,6 +97,7 @@ class InferenceParamType(ExtendedEnum): class ProjectMetaData(ExtendedEnum): DATA_UPDATE = "data_update" # info regarding cache/data update when runner updates the db GALLERY_UPDATE = "gallery_update" + BACKGROUND_IMG_LIST = "background_img_list" class SortOrder(ExtendedEnum): ASCENDING = "asc" diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 85f62b99..ce18892a 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -674,13 +674,11 @@ def execute_image_edit(type_of_mask_selection, type_of_mask_replacement, bg_img = generate_pil_image(editing_image) mask_img = generate_pil_image(timing.mask.location) - # TODO: fix this logic, if the uploaded image and the image to be editted are of different sizes then - # this code will cause issues result_img = Image.new("RGBA", bg_img.size, (255, 255, 255, 0)) for x in range(bg_img.size[0]): for y in range(bg_img.size[1]): if x < mask_img.size[0] and y < mask_img.size[1]: - if mask_img.getpixel((x, y)) == (0, 0, 0) or mask_img.getpixel((x, y)) == (0, 0, 0, 255): + if mask_img.getpixel((x, y)) == (255, 255, 255): result_img.putpixel((x, y), (255, 255, 255, 0)) else: result_img.putpixel((x, y), bg_img.getpixel((x, y))) @@ -940,8 +938,10 @@ def process_inference_output(**kwargs): def check_project_meta_data(project_uuid): - # checking for project metadata (like cache updates) - # project_update_data is of the format {"data_update": [timing_uuid], "gallery_update": True/False} + ''' + checking for project metadata (like cache updates) + project_update_data is of the format {"data_update": [timing_uuid], "gallery_update": True/False, "background_img_list": []} + ''' data_repo = DataRepo() key = project_uuid diff --git a/ui_components/models.py b/ui_components/models.py index ab505c7c..a699b605 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -1,6 +1,6 @@ import datetime import json -from shared.constants import InferenceParamType +from shared.constants import InferenceParamType, ProjectMetaData from ui_components.constants import DefaultProjectSettingParams, DefaultTimingStyleParams from utils.common_decorators import session_state_attributes @@ -66,6 +66,16 @@ def get_temp_mask_file(self, key): return file return None + + def get_background_image_list(self): + image_list = json.loads(self.meta_data).get(ProjectMetaData.BACKGROUND_IMG_LIST.value, []) + if image_list and len(image_list): + from utils.data_repo.data_repo import DataRepo + data_repo = DataRepo() + image_list = data_repo.get_image_list_from_uuid_list(image_list) + return image_list + + return [] class InternalAIModelObject: diff --git a/ui_components/widgets/inpainting_element.py b/ui_components/widgets/inpainting_element.py index b7b2b51f..c730decb 100644 --- a/ui_components/widgets/inpainting_element.py +++ b/ui_components/widgets/inpainting_element.py @@ -1,14 +1,18 @@ +import json import os +import random +import string import time from io import BytesIO from typing import List +import uuid import numpy as np import requests as r from PIL import Image, ImageOps import streamlit as st from streamlit_drawable_canvas import st_canvas -from shared.constants import InferenceType +from shared.constants import InferenceType, InternalFileType, ProjectMetaData from ui_components.constants import CROPPED_IMG_LOCAL_PATH, MASK_IMG_LOCAL_PATH, TEMP_MASK_FILE, WorkflowStageType from ui_components.methods.file_methods import add_temp_file_to_project, save_or_host_file from utils.data_repo.data_repo import DataRepo @@ -17,12 +21,11 @@ from utils.data_repo.data_repo import DataRepo from utils import st_memory from ui_components.methods.common_methods import add_image_variant, execute_image_edit, create_or_update_mask, process_inference_output, promote_image_variant -from ui_components.models import InternalFrameTimingObject, InternalSettingObject +from ui_components.models import InternalFrameTimingObject, InternalProjectObject, InternalSettingObject from streamlit_image_comparison import image_comparison def inpainting_element(timing_uuid): - which_stage_to_inpaint = st_memory.radio("Which stage to work on?", ["Styled Key Frame", "Unedited Key Frame"], horizontal=True, key="which_stage_inpainting") if which_stage_to_inpaint == "Styled Key Frame": @@ -36,6 +39,10 @@ def inpainting_element(timing_uuid): project_settings: InternalSettingObject = data_repo.get_project_setting( timing.project.uuid) + if "type_of_mask_replacement" not in st.session_state: + st.session_state["type_of_mask_replacement"] = "Replace With Image" + st.session_state["index_of_type_of_mask_replacement"] = 0 + if len(timing_details) == 0: st.info("You need to add key frames first in the Key Frame Selection section.") else: @@ -147,9 +154,10 @@ def inpainting_element(timing_uuid): if 'image_created' not in st.session_state: st.session_state['image_created'] = 'no' - is_completely_transparent = np.all(canvas_result.image_data[:, :, 3] == 0) + is_completely_transparent = np.all(canvas_result.image_data[:, :, 3] == 0) \ + if canvas_result.image_data is not None else False - if not is_completely_transparent: + if not is_completely_transparent and canvas_result.image_data is not None: img_data = canvas_result.image_data im = Image.fromarray(img_data.astype("uint8"), mode="RGBA") im_rgb = Image.new("RGB", im.size, (255, 255, 255)) @@ -197,11 +205,6 @@ def inpainting_element(timing_uuid): st.rerun() with main_col_1: - - if "type_of_mask_replacement" not in st.session_state: - st.session_state["type_of_mask_replacement"] = "Replace With Image" - st.session_state["index_of_type_of_mask_replacement"] = 0 - types_of_mask_replacement = [ "Inpainting", "Replace With Image"] st.session_state["type_of_mask_replacement"] = st.radio( @@ -213,58 +216,85 @@ def inpainting_element(timing_uuid): st.rerun() if st.session_state["type_of_mask_replacement"] == "Replace With Image": + data_repo = DataRepo() + project: InternalProjectObject = data_repo.get_project_from_uuid(timing.project.uuid) + prompt = "" negative_prompt = "" - background_list = [f for f in os.listdir( - f'videos/{timing.project.uuid}/assets/resources/backgrounds') if f.endswith('.png')] - background_list = [f for f in os.listdir( - f'videos/{timing.project.uuid}/assets/resources/backgrounds') if f.endswith('.png')] + background_image_list = project.get_background_image_list() sources_of_images = ["Uploaded", "From Other Frame"] if 'index_of_source_of_image' not in st.session_state: st.session_state['index_of_source_of_image'] = 0 + source_of_image = st.radio("Select type of image", sources_of_images, horizontal=True, index=st.session_state['index_of_source_of_image']) if st.session_state['index_of_source_of_image'] != sources_of_images.index(source_of_image): - st.session_state['index_of_source_of_image'] = sources_of_images.index( - source_of_image) + st.session_state['index_of_source_of_image'] = sources_of_images.index(source_of_image) st.rerun() if source_of_image == "Uploaded": btn1, btn2 = st.columns([1, 1]) with btn1: - uploaded_files = st.file_uploader( - "Add more background images here", accept_multiple_files=True) + uploaded_files = st.file_uploader("Add more background images here", accept_multiple_files=True) if st.button("Upload Backgrounds"): + file_upload_uuid_list = [] for uploaded_file in uploaded_files: - with open(os.path.join(f"videos/{timing.project.uuid}/assets/resources/backgrounds", uploaded_file.name), "wb") as f: - f.write(uploaded_file.getbuffer()) - st.success( - "Your backgrounds are uploaded file - they should appear in the dropdown.") - background_list.append(uploaded_file.name) - time.sleep(1.5) - st.rerun() + if uploaded_file is not None: + image = Image.open(uploaded_file) + name = ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)) + ".png" + file_location = f"videos/{project.uuid}/assets/frames/1_selected/" + name + hosted_url = save_or_host_file(image, file_location) + file_data = { + "name": name, + "type": InternalFileType.IMAGE.value, + "project_id": project.uuid + } + + if hosted_url: + file_data.update({'hosted_url': hosted_url}) + else: + file_data.update({'local_path': file_location}) + + new_image = data_repo.create_file(**file_data) + file_upload_uuid_list.append(str(new_image.uuid)) + + # adding the new uploaded images + project_meta_data = json.loads(project.meta_data) + curr_background_img_list = project_meta_data.get(ProjectMetaData.BACKGROUND_IMG_LIST.value, []) + curr_background_img_list.extend(file_upload_uuid_list) + project_meta_data[ProjectMetaData.BACKGROUND_IMG_LIST.value] = curr_background_img_list + data_repo.update_project(uuid=project.uuid, meta_data=json.dumps(project_meta_data)) + st.success("Your backgrounds are uploaded file - they should appear in the dropdown.") + uploaded_files = [] + time.sleep(0.3) + st.rerun() + with btn2: - background_selection = st.selectbox("Range background", background_list) - background_image = f'videos/{timing.project.uuid}/assets/resources/backgrounds/{background_selection}' - if background_list != []: - st.image(f"{background_image}", use_column_width=True) + background_name_list = [f.name for f in background_image_list] + background_selection = st.selectbox("Range background", background_name_list) + + if len(background_name_list): + selected_model_index = next((i for i, obj in \ + enumerate(background_image_list) if getattr(obj, 'name') == background_selection), -1) + + background_image = background_image_list[selected_model_index] + if selected_model_index >= 0: + st.image(background_image.location, use_column_width=True) elif source_of_image == "From Other Frame": btn1, btn2 = st.columns([1, 1]) with btn1: - which_stage_to_use = st.radio( - "Select stage to use:", WorkflowStageType.value_list()) - which_image_to_use = st.number_input( - "Select image to use:", min_value=0, max_value=len(timing_details)-1, value=0) + which_stage_to_use = st.radio("Select stage to use:", WorkflowStageType.value_list()) + which_image_to_use = st.number_input("Select image to use:", min_value=0, max_value=len(timing_details)-1, value=0) + if which_stage_to_use == WorkflowStageType.SOURCE.value: background_image = timing_details[which_image_to_use].source_image.location - elif which_stage_to_use == WorkflowStageType.STYLED.value: background_image = timing_details[which_image_to_use].primary_image_location + with btn2: - st.image(background_image, - use_column_width=True) + st.image(background_image, use_column_width=True) elif st.session_state["type_of_mask_replacement"] == "Inpainting": btn1, btn2 = st.columns([1, 1]) @@ -283,44 +313,28 @@ def inpainting_element(timing_uuid): edited_image, log = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], "", editing_image, prompt, negative_prompt, width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) + inference_data = { + "inference_type": InferenceType.FRAME_INPAINTING.value, + "output": edited_image, + "log_uuid": log.uuid, + "timing_uuid": st.session_state['current_frame_uuid'], + "promote_generation": False, + "stage": stage + } + + process_inference_output(**inference_data) + elif st.session_state["type_of_mask_replacement"] == "Replace With Image": edited_image, log = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], - background_image, editing_image, "", "", width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) - - inference_data = { - "inference_type": InferenceType.FRAME_INPAINTING.value, - "output": edited_image, - "log_uuid": log.uuid, - "timing_uuid": st.session_state['current_frame_uuid'], - "promote_generation": False, - "stage": stage - } - - process_inference_output(**inference_data) + background_image.location, editing_image, "", "", width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) + replace_with_image(stage, edited_image, st.session_state['current_frame_uuid']) with edit2: - if st.session_state['edited_image'] != "": - if st.button("Promote Last Edit", type="primary"): - if stage == WorkflowStageType.SOURCE.value: - data_repo.update_specific_timing( - st.session_state['current_frame_uuid'], source_image_id=st.session_state['edited_image']) - elif stage == WorkflowStageType.STYLED.value: - number_of_image_variants = add_image_variant( - st.session_state['edited_image'], st.session_state['current_frame_uuid']) - promote_image_variant( - st.session_state['current_frame_uuid'], number_of_image_variants - 1) - st.session_state['edited_image'] = "" - st.rerun() - else: - if st.button("Run Edit & Promote"): - if st.session_state["type_of_mask_replacement"] == "Inpainting": - edited_image, log = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], - "", editing_image, prompt, negative_prompt, width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) - - elif st.session_state["type_of_mask_replacement"] == "Replace With Image": - edited_image, log = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], - background_image, editing_image, "", "", width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) - + if st.button("Run Edit & Promote"): + if st.session_state["type_of_mask_replacement"] == "Inpainting": + edited_image, log = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], + "", editing_image, prompt, negative_prompt, width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) + inference_data = { "inference_type": InferenceType.FRAME_INPAINTING.value, "output": edited_image, @@ -332,6 +346,23 @@ def inpainting_element(timing_uuid): process_inference_output(**inference_data) + elif st.session_state["type_of_mask_replacement"] == "Replace With Image": + edited_image, log = execute_image_edit(type_of_mask_selection, st.session_state["type_of_mask_replacement"], + background_image.location, editing_image, "", "", width, height, st.session_state['which_layer'], st.session_state['current_frame_uuid']) + replace_with_image(stage, edited_image, st.session_state['current_frame_uuid'], promote=True) + + +def replace_with_image(stage, output_file, current_frame_uuid, promote=False): + data_repo = DataRepo() + + if stage == WorkflowStageType.SOURCE.value: + data_repo.update_specific_timing(current_frame_uuid, source_image_id=output_file.uuid) + elif stage == WorkflowStageType.STYLED.value: + number_of_image_variants = add_image_variant(output_file.uuid, current_frame_uuid) + if promote: + promote_image_variant(current_frame_uuid, number_of_image_variants - 1) + + st.rerun() # cropped_img here is a PIL image object def inpaint_in_black_space_element(cropped_img, project_uuid, stage=WorkflowStageType.SOURCE.value): From 12ab78fbf0fe31a17583e70f5badbab8c8ff3745 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 21 Oct 2023 21:38:47 +0530 Subject: [PATCH 112/164] minor fix --- ui_components/widgets/inpainting_element.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ui_components/widgets/inpainting_element.py b/ui_components/widgets/inpainting_element.py index c730decb..5b115ee9 100644 --- a/ui_components/widgets/inpainting_element.py +++ b/ui_components/widgets/inpainting_element.py @@ -289,12 +289,12 @@ def inpainting_element(timing_uuid): which_image_to_use = st.number_input("Select image to use:", min_value=0, max_value=len(timing_details)-1, value=0) if which_stage_to_use == WorkflowStageType.SOURCE.value: - background_image = timing_details[which_image_to_use].source_image.location + background_image = timing_details[which_image_to_use].source_image elif which_stage_to_use == WorkflowStageType.STYLED.value: - background_image = timing_details[which_image_to_use].primary_image_location + background_image = timing_details[which_image_to_use].primary_image with btn2: - st.image(background_image, use_column_width=True) + st.image(background_image.location, use_column_width=True) elif st.session_state["type_of_mask_replacement"] == "Inpainting": btn1, btn2 = st.columns([1, 1]) From 2a5fbd5494a5d067af6beb0cdbda9e4a38a1f53d Mon Sep 17 00:00:00 2001 From: peter942 Date: Sat, 21 Oct 2023 22:06:44 +0200 Subject: [PATCH 113/164] Restructing variant comparison elements and more --- .../components/frame_styling_page.py | 27 +--- ui_components/widgets/timeline_view.py | 3 + .../widgets/variant_comparison_element.py | 135 ------------------ .../widgets/variant_comparison_grid.py | 74 +++++++--- 4 files changed, 60 insertions(+), 179 deletions(-) delete mode 100644 ui_components/widgets/variant_comparison_element.py diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 133bac29..e232178f 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -12,7 +12,7 @@ from ui_components.widgets.add_key_frame_element import add_key_frame, add_key_frame_element from ui_components.widgets.styling_element import styling_element from ui_components.widgets.timeline_view import timeline_view -from ui_components.widgets.variant_comparison_element import compare_to_previous_and_next_frame, compare_to_source_frame, variant_comparison_element + from ui_components.widgets.animation_style_element import animation_style_element from ui_components.widgets.inpainting_element import inpainting_element from ui_components.widgets.drawing_element import drawing_element @@ -76,7 +76,7 @@ def frame_styling_page(mainheader2, project_uuid: str): st.session_state['show_comparison'] = st_memory.radio("Show:", options=["Other Variants", "Preview Video in Context"], horizontal=True, key="show_comparison_radio_motion") if st.session_state['show_comparison'] == "Other Variants": - variant_comparison_element(st.session_state['current_frame_uuid']) + variant_comparison_grid(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) elif st.session_state['show_comparison'] == "Preview Video in Context": current_preview_video_element(st.session_state['current_frame_uuid']) @@ -87,28 +87,10 @@ def frame_styling_page(mainheader2, project_uuid: str): elif st.session_state['page'] == CreativeProcessType.STYLING.value: # carousal_of_images_element(project_uuid, stage=WorkflowStageType.STYLED.value) - comparison_values = [ - "Single Variants", - "All Other Variants", - "Source Frame", - "Previous & Next Frame", - "None" - ] - st.session_state['show_comparison'] = st_memory.radio("Show comparison to:", options=comparison_values, horizontal=True, key="show_comparison_radio") - if st.session_state['show_comparison'] == "Single Variants": - variant_comparison_element(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) - - elif st.session_state['show_comparison'] == "All Other Variants": - variant_comparison_grid(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) - - elif st.session_state['show_comparison'] == "Source Frame": - compare_to_source_frame(timing_details) + + variant_comparison_grid(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) - elif st.session_state['show_comparison'] == "Previous & Next Frame": - compare_to_previous_and_next_frame(project_uuid,timing_details) - elif st.session_state['show_comparison'] == "None": - display_image(timing_uuid=st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) st.markdown("***") st.session_state['styling_view'] = st_memory.menu('',\ @@ -188,7 +170,6 @@ def frame_styling_page(mainheader2, project_uuid: str): st.rerun() elif st.session_state['frame_styling_view_type'] == "Timeline": - st.markdown("---") if st.session_state['page'] == "Key Frames": with st.sidebar: diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index 0237849b..f065b5c1 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -52,6 +52,8 @@ def timeline_view(project_uuid, stage): data_repo = DataRepo() timing_details = data_repo.get_timing_list_from_project(project_uuid) + st.markdown("***") + header_col_1, header_col_2, header_col_3 = st.columns([1.5,4,1.5]) with header_col_1: @@ -81,6 +83,7 @@ def timeline_view(project_uuid, stage): items_per_row = st_memory.slider("How many frames per row?", min_value=1, max_value=10, value=5, step=1, key="items_per_row_slider") st.markdown("***") + total_count = len(timing_details) for i in range(0, total_count, items_per_row): # Step of items_per_row for grid grid = st.columns(items_per_row) # Create items_per_row columns for grid diff --git a/ui_components/widgets/variant_comparison_element.py b/ui_components/widgets/variant_comparison_element.py deleted file mode 100644 index 3f3e4591..00000000 --- a/ui_components/widgets/variant_comparison_element.py +++ /dev/null @@ -1,135 +0,0 @@ -import time -import streamlit as st -from streamlit_image_comparison import image_comparison -from ui_components.constants import CreativeProcessType, WorkflowStageType -from ui_components.methods.common_methods import promote_image_variant, promote_video_variant -from ui_components.methods.video_methods import create_or_get_single_preview_video -from ui_components.widgets.image_carousal import display_image -from utils.data_repo.data_repo import DataRepo - - -def variant_comparison_element(timing_uuid, stage=CreativeProcessType.MOTION.value): - data_repo = DataRepo() - - timing = data_repo.get_timing_from_uuid(timing_uuid) - variants = timing.alternative_images_list - mainimages1, mainimages2 = st.columns([1, 1]) - aboveimage1, aboveimage2, aboveimage3 = st.columns([1, 0.25, 0.75]) - - which_variant = 1 - number_of_variants = 0 - - with aboveimage1: - st.info(f"Current variant = {timing.primary_variant_index + 1}") - - with aboveimage2: - show_more_than_10_variants = st.checkbox("Show >10 variants", key="show_more_than_10_variants") - - with aboveimage3: - number_of_variants = len(timing.interpolated_clip_list) if stage == CreativeProcessType.MOTION.value else len(variants) - - if number_of_variants: - if show_more_than_10_variants is True: - current_variant = timing.primary_interpolated_video_index if stage == CreativeProcessType.MOTION.value else int( - timing.primary_variant_index) - which_variant = st.radio(f'Main variant = {current_variant + 1}', range(1, - number_of_variants + 1), index=number_of_variants-1, horizontal=True, key=f"Main variant for {st.session_state['current_frame_index']}") - else: - last_ten_variants = range( - max(1, number_of_variants - 10), number_of_variants + 1) - current_variant = timing.primary_interpolated_video_index if stage == CreativeProcessType.MOTION.value else int( - timing.primary_variant_index) - which_variant = st.radio(f'Main variant = {current_variant + 1}', last_ten_variants, index=len( - last_ten_variants)-1, horizontal=True, key=f"Main variant for {st.session_state['current_frame_index']}") - - with mainimages1: - st.success("**Main variant**") - if stage == CreativeProcessType.MOTION.value: - st.video(timing.timed_clip.location, format='mp4', start_time=0) if timing.timed_clip else st.error("No video present") - else: - if len(timing.alternative_images_list): - st.image(timing.primary_image_location, use_column_width=True) - else: - st.error("No variants found for this frame") - - with mainimages2: - if stage == CreativeProcessType.MOTION.value: - if number_of_variants: - if not (timing.interpolated_clip_list and len(timing.interpolated_clip_list)): - st.error("No variant for this frame") - if which_variant - 1 == current_variant: - st.success("**Main variant**") - else: - st.info(f"**Variant #{which_variant}**") - - st.video(timing.interpolated_clip_list[which_variant - 1].location, format='mp4', start_time=0) if \ - (timing.interpolated_clip_list and len(timing.interpolated_clip_list)) else st.error("No video present") - else: - st.error("No variants found for this frame") - else: - if len(timing.alternative_images_list): - if which_variant - 1 == current_variant: - st.success("**Main variant**") - else: - st.info(f"**Variant #{which_variant}**") - - st.image(variants[which_variant - 1].location, - use_column_width=True) - - if number_of_variants: - if which_variant - 1 != current_variant: - if st.button(f"Promote Variant #{which_variant}", key=f"Promote Variant #{which_variant} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image"): - if stage == CreativeProcessType.MOTION.value: - promote_video_variant(timing.uuid, which_variant - 1) - else: - promote_image_variant(timing.uuid, which_variant - 1) - time.sleep(0.5) - st.rerun() - - -def compare_to_previous_and_next_frame(project_uuid, timing_details): - data_repo = DataRepo() - mainimages1, mainimages2, mainimages3 = st.columns([1, 1, 1]) - - with mainimages1: - if st.session_state['current_frame_index'] - 2 >= 0: - previous_image = data_repo.get_timing_from_frame_number(project_uuid, frame_number=st.session_state['current_frame_index'] - 2) - st.info(f"Previous image:") - display_image( - timing_uuid=previous_image.uuid, stage=WorkflowStageType.STYLED.value, clickable=False) - - if st.button(f"Preview Interpolation From #{st.session_state['current_frame_index']-1} to #{st.session_state['current_frame_index']}", key=f"Preview Interpolation From #{st.session_state['current_frame_index']-1} to #{st.session_state['current_frame_index']}", use_container_width=True): - prev_frame_timing = data_repo.get_prev_timing(st.session_state['current_frame_uuid']) - create_or_get_single_preview_video(prev_frame_timing.uuid) - prev_frame_timing = data_repo.get_timing_from_uuid(prev_frame_timing.uuid) - if prev_frame_timing.preview_video: - st.video(prev_frame_timing.preview_video.location) - - with mainimages2: - st.success(f"Current image:") - display_image( - timing_uuid=st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) - - with mainimages3: - if st.session_state['current_frame_index'] + 1 <= len(timing_details): - next_image = data_repo.get_timing_from_frame_number(project_uuid, frame_number=st.session_state['current_frame_index']) - st.info(f"Next image") - display_image(timing_uuid=next_image.uuid, stage=WorkflowStageType.STYLED.value, clickable=False) - - if st.button(f"Preview Interpolation From #{st.session_state['current_frame_index']} to #{st.session_state['current_frame_index']+1}", key=f"Preview Interpolation From #{st.session_state['current_frame_index']} to #{st.session_state['current_frame_index']+1}", use_container_width=True): - create_or_get_single_preview_video(st.session_state['current_frame_uuid']) - current_frame = data_repo.get_timing_from_uuid(st.session_state['current_frame_uuid']) - st.video(current_frame.timed_clip.location) - - -def compare_to_source_frame(timing_details): - if timing_details[st.session_state['current_frame_index']- 1].primary_image: - img2 = timing_details[st.session_state['current_frame_index'] - 1].primary_image_location - else: - img2 = 'https://i.ibb.co/GHVfjP0/Image-Not-Yet-Created.png' - - img1 = timing_details[st.session_state['current_frame_index'] - 1].source_image.location if timing_details[st.session_state['current_frame_index'] - 1].source_image else 'https://i.ibb.co/GHVfjP0/Image-Not-Yet-Created.png' - - image_comparison(starting_position=50, - img1=img1, - img2=img2, make_responsive=False, label1=WorkflowStageType.SOURCE.value, label2=WorkflowStageType.STYLED.value) \ No newline at end of file diff --git a/ui_components/widgets/variant_comparison_grid.py b/ui_components/widgets/variant_comparison_grid.py index dd1a9060..175e566b 100644 --- a/ui_components/widgets/variant_comparison_grid.py +++ b/ui_components/widgets/variant_comparison_grid.py @@ -13,24 +13,56 @@ def variant_comparison_grid(timing_uuid, stage=CreativeProcessType.MOTION.value) current_variant = timing.primary_interpolated_video_index if stage == CreativeProcessType.MOTION.value else int( timing.primary_variant_index) - num_columns = st.slider('Number of columns', min_value=1, max_value=10, value=4) - - for i in range(0, len(variants), num_columns): - cols = st.columns(num_columns) - for j in range(num_columns): - variant_index = i + j - if variant_index < len(variants): - with cols[j]: - if stage == CreativeProcessType.MOTION.value: - st.video(variants[variant_index].location, format='mp4', start_time=0) if variants[variant_index] else st.error("No video present") - else: - st.image(variants[variant_index].location, use_column_width=True) - - if variant_index == current_variant: - st.success("**Main variant**") - else: - st.info(f"Variant #{variant_index + 1}") - - if st.button(f"Promote Variant #{variant_index + 1}", key=f"Promote Variant #{variant_index + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): - promote_image_variant(timing.uuid, variant_index) - st.rerun() \ No newline at end of file + st.markdown("***") + + col1, col2 = st.columns([1, 1]) + items_to_show = col1.slider('Variants per page:', min_value=1, max_value=12, value=6) + num_columns = col2.slider('Number of columns:', min_value=1, max_value=6, value=3) + + # Display the main variant first + num_pages = (len(variants) + 1) // items_to_show + if (len(variants) + 1) % items_to_show != 0: + num_pages += 1 + + + # Create a number input for page selection if there's more than one page + page = 1 + if num_pages > 1: + page = st.radio('Page:', options=list(range(1, num_pages + 1)), horizontal=True) + + st.markdown("***") + + # Display the main variant first + cols = st.columns(num_columns) + with cols[0]: + if stage == CreativeProcessType.MOTION.value: + st.video(variants[current_variant].location, format='mp4', start_time=0) if variants[current_variant] else st.error("No video present") + else: + st.image(variants[current_variant].location, use_column_width=True) + st.success("**Main variant**") + + # Calculate start and end indices for the variants to display on this page + start = (page - 1) * items_to_show + end = min(start + items_to_show, len(variants)) # Ensure end does not exceed the length of the variants list + + # Start from the last variant + next_col = 1 + for i in range(end - 1, start - 1, -1): + variant_index = i + if variant_index != current_variant: # Skip the main variant + with cols[next_col]: # Use next_col to place the variant + if stage == CreativeProcessType.MOTION.value: + st.video(variants[variant_index].location, format='mp4', start_time=0) if variants[variant_index] else st.error("No video present") + else: + st.image(variants[variant_index].location, use_column_width=True) + + if st.button(f"Promote Variant #{variant_index + 1}", key=f"Promote Variant #{variant_index + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): + promote_image_variant(timing.uuid, variant_index) + st.rerun() + + next_col += 1 # Move to the next column + + # Create new row after filling the current one + if next_col >= num_columns: + cols = st.columns(num_columns) + next_col = 0 # Reset column counter \ No newline at end of file From c0983c17cbdd899992a4aea08817dd21835ce92d Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sun, 22 Oct 2023 13:17:01 +0530 Subject: [PATCH 114/164] project creation fixed --- banodoco_settings.py | 5 +++-- ui_components/components/new_project_page.py | 9 +++------ ui_components/methods/file_methods.py | 10 +++++++--- utils/data_repo/data_repo.py | 2 +- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/banodoco_settings.py b/banodoco_settings.py index 4cda3c09..f4d568c2 100644 --- a/banodoco_settings.py +++ b/banodoco_settings.py @@ -92,11 +92,12 @@ def create_new_project(user: InternalUserObject, project_name: str, width=512, h sample_file_location = "sample_assets/sample_images/v.jpeg" img = Image.open(sample_file_location) img = img.resize((width, height)) - hosted_url = save_or_host_file(img, sample_file_location) + hosted_url = save_or_host_file(img, sample_file_location, mime_type='image/png', dim=(width, height)) file_data = { "name": str(uuid.uuid4()), "type": InternalFileType.IMAGE.value, - "project_id": project.uuid + "project_id": project.uuid, + "dim": (width, height), } if hosted_url: diff --git a/ui_components/components/new_project_page.py b/ui_components/components/new_project_page.py index b8e0d645..4acc449f 100644 --- a/ui_components/components/new_project_page.py +++ b/ui_components/components/new_project_page.py @@ -78,12 +78,9 @@ def new_project_page(): new_project_name = new_project_name.replace(" ", "_") current_user = data_repo.get_first_active_user() - try: - new_project = create_new_project(current_user, new_project_name, width, height, "Images", "Interpolation") - new_timing = create_timings_row_at_frame_number(new_project.uuid, 0) - except Exception as e: - st.error(f"Failed to create the new project due to {str(e)}") - + new_project = create_new_project(current_user, new_project_name, width, height, "Images", "Interpolation") + new_timing = create_timings_row_at_frame_number(new_project.uuid, 0) + if starting_image: try: save_and_promote_image(starting_image, new_project.uuid, new_timing.uuid, "source") diff --git a/ui_components/methods/file_methods.py b/ui_components/methods/file_methods.py index 231b9177..8e5a3b8f 100644 --- a/ui_components/methods/file_methods.py +++ b/ui_components/methods/file_methods.py @@ -67,13 +67,17 @@ def zoom_and_crop(file, width, height): return file # resizes file dimensions to current project_settings -def normalize_size_internal_file_obj(file_obj: InternalFileObject): +def normalize_size_internal_file_obj(file_obj: InternalFileObject, **kwargs): if not file_obj or file_obj.type != InternalFileType.IMAGE.value or not file_obj.project: return file_obj data_repo = DataRepo() - project_setting = data_repo.get_project_setting(file_obj.project.uuid) - dim = (project_setting.width, project_setting.height) + + if 'dim' in kwargs: + dim = kwargs['dim'] + else: + project_setting = data_repo.get_project_setting(file_obj.project.uuid) + dim = (project_setting.width, project_setting.height) pil_file = generate_pil_image(file_obj.location) uploaded_url = save_or_host_file(pil_file, file_obj.location, mime_type='image/png', dim=dim) diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 3d5c41eb..d52a8e38 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -110,7 +110,7 @@ def create_file(self, **kwargs): if file and file.type == InternalFileType.IMAGE.value: from ui_components.methods.file_methods import normalize_size_internal_file_obj - file = normalize_size_internal_file_obj(file) + file = normalize_size_internal_file_obj(file, **kwargs) return file From ebe20c6cb6ebade4ded8098c4b0a41a67fb3a75b Mon Sep 17 00:00:00 2001 From: peter942 Date: Sun, 22 Oct 2023 16:24:44 +0200 Subject: [PATCH 115/164] Fixing prompt strength on SDXL --- utils/ml_processor/replicate/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/ml_processor/replicate/utils.py b/utils/ml_processor/replicate/utils.py index fa1bac54..13783855 100644 --- a/utils/ml_processor/replicate/utils.py +++ b/utils/ml_processor/replicate/utils.py @@ -74,6 +74,7 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): "negative_prompt" : query_obj.negative_prompt, "width" : max(768, query_obj.width), # 768 is the default for sdxl "height" : max(768, query_obj.height), + "prompt_strength": query_obj.strength, "mask": mask } From debcc001f3fdcbef939c89290ebc4a7e578bd20b Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Mon, 23 Oct 2023 16:31:09 +0530 Subject: [PATCH 116/164] 3_img interpolation added --- .../components/frame_styling_page.py | 26 +++------ ui_components/methods/common_methods.py | 53 +++++++++++-------- ui_components/methods/data_logger.py | 2 +- ui_components/methods/video_methods.py | 18 +++++-- ui_components/setup.py | 2 +- .../widgets/frame_clip_generation_elements.py | 41 +------------- .../widgets/variant_comparison_grid.py | 36 +++++++------ utils/common_decorators.py | 3 +- utils/media_processor/interpolator.py | 40 ++++++++++---- utils/ml_processor/replicate/constants.py | 2 +- 10 files changed, 108 insertions(+), 115 deletions(-) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index e232178f..e752d0aa 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -4,10 +4,8 @@ from ui_components.methods.ml_methods import trigger_restyling_process from ui_components.widgets.cropping_element import cropping_selector_element -from ui_components.widgets.frame_clip_generation_elements import current_preview_video_element from ui_components.widgets.frame_selector import frame_selector_widget from ui_components.widgets.frame_style_clone_element import style_cloning_element -from ui_components.widgets.image_carousal import display_image from ui_components.widgets.prompt_finder import prompt_finder_element from ui_components.widgets.add_key_frame_element import add_key_frame, add_key_frame_element from ui_components.widgets.styling_element import styling_element @@ -21,7 +19,7 @@ from ui_components.widgets.variant_comparison_grid import variant_comparison_grid from utils import st_memory -from ui_components.constants import CreativeProcessType, WorkflowStageType +from ui_components.constants import CreativeProcessType from utils.data_repo.data_repo import DataRepo @@ -41,7 +39,6 @@ def frame_styling_page(mainheader2, project_uuid: str): st.session_state['seed'] = project_settings.default_seed st.session_state['num_inference_steps'] = project_settings.default_num_inference_steps st.session_state['transformation_stage'] = project_settings.default_stage - st.session_state['show_comparison'] = "Don't show" if "current_frame_uuid" not in st.session_state: timing = data_repo.get_timing_list_from_project(project_uuid)[0] @@ -67,30 +64,20 @@ def frame_styling_page(mainheader2, project_uuid: str): if st.session_state['frame_styling_view_type'] == "Explorer": style_explorer_element(project_uuid) + # -------------------- INDIVIDUAL VIEW ---------------------- elif st.session_state['frame_styling_view_type'] == "Individual": with st.sidebar: frame_selector_widget() if st.session_state['page'] == CreativeProcessType.MOTION.value: - idx = st.session_state['current_frame_index'] - 1 - st.session_state['show_comparison'] = st_memory.radio("Show:", options=["Other Variants", "Preview Video in Context"], horizontal=True, key="show_comparison_radio_motion") + variant_comparison_grid(st.session_state['current_frame_uuid'], stage=CreativeProcessType.MOTION.value) - if st.session_state['show_comparison'] == "Other Variants": - variant_comparison_grid(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) - - elif st.session_state['show_comparison'] == "Preview Video in Context": - current_preview_video_element(st.session_state['current_frame_uuid']) - st.markdown("***") with st.expander("🎬 Choose Animation Style & Create Variants", expanded=True): animation_style_element(st.session_state['current_frame_uuid'], project_uuid) elif st.session_state['page'] == CreativeProcessType.STYLING.value: - # carousal_of_images_element(project_uuid, stage=WorkflowStageType.STYLED.value) - - variant_comparison_grid(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) - - + variant_comparison_grid(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) st.markdown("***") st.session_state['styling_view'] = st_memory.menu('',\ @@ -115,7 +102,6 @@ def frame_styling_page(mainheader2, project_uuid: str): ) with detail2: - # TODO: add custom model validation such for sd img2img the value of strength can only be 1 if st.button(f"Generate variants", key=f"new_variations_{st.session_state['current_frame_index']}", help="This will generate new variants based on the settings to the left."): for i in range(0, max(st.session_state['individual_number_of_variants'], 1)): trigger_restyling_process( @@ -168,7 +154,8 @@ def frame_styling_page(mainheader2, project_uuid: str): if st.button(f"Add key frame",type="primary",use_container_width=True): add_key_frame(selected_image, inherit_styling_settings, how_long_after) st.rerun() - + + # -------------------- TIMELINE VIEW -------------------------- elif st.session_state['frame_styling_view_type'] == "Timeline": if st.session_state['page'] == "Key Frames": @@ -179,6 +166,7 @@ def frame_styling_page(mainheader2, project_uuid: str): elif st.session_state['page'] == "Videos": timeline_view(project_uuid, "Videos") + # -------------------- SIDEBAR NAVIGATION -------------------------- with st.sidebar: with st.expander("🔍 Inference Logging", expanded=True): sidebar_logger(project_uuid) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index ce18892a..94ab4eae 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -279,47 +279,54 @@ def save_uploaded_image(image, project_uuid, frame_uuid, save_type): print(f"Failed to save image file due to: {str(e)}") return None +# TODO: change variant_to_promote_frame_number to variant_uuid def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): + ''' + this methods promotes the variant to the primary image (also referred to as styled image) + and clears the interpolation data of the prev and the current frame + ''' data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) + # promoting variant variant_to_promote = timing.alternative_images_list[variant_to_promote_frame_number] - data_repo.update_specific_timing( - timing_uuid, primary_image_id=variant_to_promote.uuid) + data_repo.update_specific_timing(timing_uuid, primary_image_id=variant_to_promote.uuid) prev_timing = data_repo.get_prev_timing(timing_uuid) + # clearing the interpolation data of the prev frame if prev_timing: - data_repo.update_specific_timing( - prev_timing.uuid, interpolated_clip_list=None) - data_repo.update_specific_timing( - timing_uuid, interpolated_clip_list=None) + data_repo.update_specific_timing(prev_timing.uuid, interpolated_clip_list=None) + data_repo.update_specific_timing(timing_uuid, interpolated_clip_list=None) + data_repo.update_specific_timing(prev_timing.uuid, timed_clip_id=None) timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( timing.project.uuid) - frame_idx = timing.aux_frame_index - - # DOUBT: setting last interpolated_video to empty? - if frame_idx < len(timing_details): - data_repo.update_specific_timing( - timing.uuid, interpolated_clip_list=None) - - if frame_idx > 1: - data_repo.update_specific_timing( - data_repo.get_prev_timing(timing_uuid).uuid, timed_clip_id=None) - - data_repo.update_specific_timing(timing_uuid, timed_clip_id=None) - - if frame_idx < len(timing_details): + + # if this is not the last element then clearing it's interpolation data + if timing.aux_frame_index < len(timing_details): + data_repo.update_specific_timing(timing.uuid, interpolated_clip_list=None) data_repo.update_specific_timing(timing.uuid, timed_clip_id=None) + data_repo.update_specific_timing(timing_uuid, timed_clip_id=None) _ = data_repo.get_timing_list_from_project(timing.project.uuid) -# updates the clip duration of the variant_to_promote and sets it as the timed_clip -def promote_video_variant(timing_uuid, variant_to_promote_frame_number: str): + +def promote_video_variant(timing_uuid, variant_uuid): + ''' + this first changes the duration of the interpolated_clip to the frame clip_duration + then adds the clip to the timed_clip (which is considered as the main variant) + ''' data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) - variant_to_promote = timing.interpolated_clip_list[variant_to_promote_frame_number] + variant_to_promote = None + for variant in timing.interpolated_clip_list: + if variant.uuid == variant_uuid: + variant_to_promote = variant + break + + if not variant_to_promote: + return None if variant_to_promote.location.startswith(('http://', 'https://')): temp_video_path, _ = urllib3.request.urlretrieve(variant_to_promote.location) diff --git a/ui_components/methods/data_logger.py b/ui_components/methods/data_logger.py index 8c54c070..21d448f8 100644 --- a/ui_components/methods/data_logger.py +++ b/ui_components/methods/data_logger.py @@ -39,7 +39,7 @@ def log_model_inference(model: ReplicateModel, time_taken, **kwargs): ai_model = data_repo.get_ai_model_from_name(model.name) # hackish sol for insuring that inpainting logs don't have an empty model field - if ai_model is None and model.name == REPLICATE_MODEL.sdxl_inpainting.name: + if ai_model is None and model.name in [REPLICATE_MODEL.sdxl_inpainting.name, REPLICATE_MODEL.ad_interpolation.name]: ai_model = data_repo.get_ai_model_from_name(REPLICATE_MODEL.sdxl.name) log_data = { diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index 18bfcb16..a7f2931f 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -123,17 +123,29 @@ def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None) timing_uuid) return timing.preview_video -# this includes all the animation styles [direct morphing, interpolation, image to video] def create_single_interpolated_clip(timing_uuid, quality, settings={}, variant_count=1): + ''' + - this includes all the animation styles [direct morphing, interpolation, image to video] + - this stores the newly created video in the interpolated_clip_list and promotes them to + timed_clip (if it's not already present) + ''' + from ui_components.methods.common_methods import process_inference_output from shared.constants import QUEUE_INFERENCE_QUERIES data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) next_timing: InternalFrameTimingObject = data_repo.get_next_timing(timing_uuid) + prev_timing: InternalFrameTimingObject = data_repo.get_prev_timing(timing_uuid) if not next_timing: - st.error('This is the last image. Please add more images to create interpolated clip.') + st.error('This is the last image. Please select images having both prev & next images') + time.sleep(0.5) + return None + + if not prev_timing: + st.error('This is the first image. Please select images having both prev & next images') + time.sleep(0.5) return None if quality == 'full': @@ -142,7 +154,7 @@ def create_single_interpolated_clip(timing_uuid, quality, settings={}, variant_c interpolation_steps = 3 timing.interpolated_steps = interpolation_steps - img_list = [timing.primary_image.location, next_timing.primary_image.location] + img_list = [prev_timing.primary_image.location, timing.primary_image.location, next_timing.primary_image.location] settings.update(interpolation_steps=timing.interpolation_steps) # res is an array of tuples (video_bytes, log) diff --git a/ui_components/setup.py b/ui_components/setup.py index 8b50dee1..e0b43bb3 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -4,7 +4,7 @@ import os import math from moviepy.editor import * -from shared.constants import SERVER, ProjectMetaData, ServerType +from shared.constants import SERVER, ServerType from ui_components.components.app_settings_page import app_settings_page from ui_components.components.custom_models_page import custom_models_page diff --git a/ui_components/widgets/frame_clip_generation_elements.py b/ui_components/widgets/frame_clip_generation_elements.py index ea9d8ea0..0770be4d 100644 --- a/ui_components/widgets/frame_clip_generation_elements.py +++ b/ui_components/widgets/frame_clip_generation_elements.py @@ -74,43 +74,4 @@ def update_animation_style_element(timing_uuid, horizontal=True): if st.session_state[f"animation_style_{idx}"] != timing.animation_style: st.session_state[f"animation_style_index_{idx}"] = animation_styles.index(st.session_state[f"animation_style_{idx}"]) timing.animation_style = st.session_state[f"animation_style_{idx}"] - st.rerun() - - -def current_preview_video_element(timing_uuid): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - idx = timing.aux_frame_index - st.info("Preview Video in Context:") - - preview_video_1, preview_video_2 = st.columns([2.5, 1]) - - with preview_video_1: - if timing.preview_video: - st.video(timing.preview_video.location) - else: - st.error(''' - **----------------------------------------** - - --------- - - ================== - - **No Preview Video Created Yet** - - ================== - - --------- - - **----------------------------------------** - ''') - - with preview_video_2: - - st.info("This allows you to preview the video with the surrounding clips attached.") - - if st.button("Generate New Preview Video", key=f"generate_preview_{idx}"): - preview_video = create_full_preview_video(timing.uuid, 1.0) - data_repo.update_specific_timing( - timing.uuid, preview_video_id=preview_video.uuid) - st.rerun() \ No newline at end of file + st.rerun() \ No newline at end of file diff --git a/ui_components/widgets/variant_comparison_grid.py b/ui_components/widgets/variant_comparison_grid.py index 175e566b..88d349ab 100644 --- a/ui_components/widgets/variant_comparison_grid.py +++ b/ui_components/widgets/variant_comparison_grid.py @@ -1,6 +1,6 @@ import streamlit as st from ui_components.constants import CreativeProcessType -from ui_components.methods.common_methods import promote_image_variant +from ui_components.methods.common_methods import promote_image_variant, promote_video_variant from utils.data_repo.data_repo import DataRepo @@ -8,10 +8,7 @@ def variant_comparison_grid(timing_uuid, stage=CreativeProcessType.MOTION.value) data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) - variants = timing.alternative_images_list - - current_variant = timing.primary_interpolated_video_index if stage == CreativeProcessType.MOTION.value else int( - timing.primary_variant_index) + variants = timing.interpolated_clip_list if stage == CreativeProcessType.MOTION.value else timing.alternative_images_list st.markdown("***") @@ -19,20 +16,24 @@ def variant_comparison_grid(timing_uuid, stage=CreativeProcessType.MOTION.value) items_to_show = col1.slider('Variants per page:', min_value=1, max_value=12, value=6) num_columns = col2.slider('Number of columns:', min_value=1, max_value=6, value=3) - # Display the main variant first num_pages = (len(variants) + 1) // items_to_show if (len(variants) + 1) % items_to_show != 0: num_pages += 1 - # Create a number input for page selection if there's more than one page page = 1 if num_pages > 1: page = st.radio('Page:', options=list(range(1, num_pages + 1)), horizontal=True) + if not len(variants): + st.info("No variants present") + return + + current_variant = timing.primary_interpolated_video_index if stage == CreativeProcessType.MOTION.value else int( + timing.primary_variant_index) + st.markdown("***") - # Display the main variant first cols = st.columns(num_columns) with cols[0]: if stage == CreativeProcessType.MOTION.value: @@ -41,28 +42,29 @@ def variant_comparison_grid(timing_uuid, stage=CreativeProcessType.MOTION.value) st.image(variants[current_variant].location, use_column_width=True) st.success("**Main variant**") - # Calculate start and end indices for the variants to display on this page start = (page - 1) * items_to_show - end = min(start + items_to_show, len(variants)) # Ensure end does not exceed the length of the variants list + end = min(start + items_to_show, len(variants)) - # Start from the last variant next_col = 1 for i in range(end - 1, start - 1, -1): variant_index = i - if variant_index != current_variant: # Skip the main variant - with cols[next_col]: # Use next_col to place the variant + if variant_index != current_variant: + with cols[next_col]: if stage == CreativeProcessType.MOTION.value: st.video(variants[variant_index].location, format='mp4', start_time=0) if variants[variant_index] else st.error("No video present") else: - st.image(variants[variant_index].location, use_column_width=True) + st.image(variants[variant_index].location, use_column_width=True) if variants[variant_index] else st.error("No image present") if st.button(f"Promote Variant #{variant_index + 1}", key=f"Promote Variant #{variant_index + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): - promote_image_variant(timing.uuid, variant_index) + if stage == CreativeProcessType.MOTION.value: + promote_video_variant(timing.uuid, variants[variant_index].uuid) + else: + promote_image_variant(timing.uuid, variant_index) + st.rerun() - next_col += 1 # Move to the next column + next_col += 1 - # Create new row after filling the current one if next_col >= num_columns: cols = st.columns(num_columns) next_col = 0 # Reset column counter \ No newline at end of file diff --git a/utils/common_decorators.py b/utils/common_decorators.py index 16b9e623..2296c68a 100644 --- a/utils/common_decorators.py +++ b/utils/common_decorators.py @@ -1,5 +1,6 @@ import time import streamlit as st +from streamlit import runtime def count_calls(cls): class Wrapper(cls): @@ -69,7 +70,7 @@ def custom_attr(self, attr): if not (key in st.session_state and st.session_state[key]): st.session_state[key] = getattr(default_value_cls, attr) - return st.session_state[key] + return st.session_state[key] if runtime.exists() else getattr(default_value_cls, attr) else: return original_getattr(self, attr) diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index 2872e9cd..590f5de5 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -60,6 +60,7 @@ def video_through_frame_interpolation(img_location_list, settings, variant_count # TODO: extend this for more than two images img1 = img_location_list[0] img2 = img_location_list[1] + img3 = img_location_list[2] if not img1.startswith("http"): img1 = open(img1, "rb") @@ -67,6 +68,9 @@ def video_through_frame_interpolation(img_location_list, settings, variant_count if not img2.startswith("http"): img2 = open(img2, "rb") + if not img3.startswith("http"): + img3 = open(img3, "rb") + ml_client = get_ml_client() animation_tool = settings['animation_tool'] if 'animation_tool' in settings else AnimationToolType.G_FILM.value @@ -83,16 +87,34 @@ def video_through_frame_interpolation(img_location_list, settings, variant_count # since workflows can have multiple input params it's not standardized yet elif animation_tool == AnimationToolType.ANIMATEDIFF.value: + # data = { + # "positive_prompt": settings['positive_prompt'], + # "negative_prompt": settings['negative_prompt'], + # "image_dimension": settings['image_dimension'], + # "starting_image_path": img1, + # "ending_image_path": img2, + # "sampling_steps": settings['sampling_steps'], + # "motion_module": settings['motion_module'], + # "model": settings['model'], + # "queue_inference": queue_inference + # } + data = { - "positive_prompt": settings['positive_prompt'], - "negative_prompt": settings['negative_prompt'], - "image_dimension": settings['image_dimension'], - "starting_image_path": img1, - "ending_image_path": img2, - "sampling_steps": settings['sampling_steps'], - "motion_module": settings['motion_module'], - "model": settings['model'], - "queue_inference": queue_inference + "prompt_travel" : "0_:16_:24_", # default value.. format {idx_prompt}:... + "negative_prompt" : settings['negative_prompt'], + "img_1" : img1, + "img_2" : img2, + "img_3" : img3, + "motion_module" : settings['motion_module'], + "model" : settings['model'], + "img_1_latent_cn_weights" : "0=1.00,1=0.82,2=0.74,3=0.56,4=0.47,5=0.41,6=0.38,7=0.33,8=0.30,9=0.28,10=0.25,11=0.24,12=0.20,13=0.17,14=0.15,15=0.13,16=0.13,17=0.11,18=0.11,19=0.11,20=0.11,21=0.11,22=0.10,23=0.09,24=0.06,25=0.04,26=0.03,27=0.01,28=0.00,29=0.00,30=0.00,31=0.00,32=0.00,33=0.00,34=0.00,35=0.00,36=0.00,37=0.00,38=0.00,39=0.00,40=0.00,41=0.00,42=0.00,43=0.00,44=0.00,45=0.00,46=0.00,47=0.00", + "img_2_latent_cn_weights" : "0=0.09,1=0.10,2=0.11,3=0.11,4=0.11,5=0.11,6=0.11,7=0.13,8=0.13,9=0.15,10=0.17,11=0.20,12=0.24,13=0.25,14=0.28,15=0.30,16=0.33,17=0.38,18=0.41,19=0.47,20=0.56,21=0.74,22=0.82,23=1.00,24=1.00,25=0.82,26=0.74,27=0.56,28=0.47,29=0.41,30=0.38,31=0.33,32=0.30,33=0.28,34=0.25,35=0.24,36=0.20,37=0.17,38=0.15,39=0.13,40=0.13,41=0.11,42=0.11,43=0.11,44=0.11,45=0.11,46=0.10,47=0.09\n\n\n\n", + "img_3_latent_cn_weights" : "0=0.00,1=0.00,2=0.00,3=0.00,4=0.00,5=0.00,6=0.00,7=0.00,8=0.00,9=0.00,10=0.00,11=0.00,12=0.00,13=0.00,14=0.00,15=0.00,16=0.00,17=0.00,18=0.00,19=0.00,20=0.01,21=0.03,22=0.04,23=0.06,24=0.09,25=0.10,26=0.11,27=0.11,28=0.11,29=0.11,30=0.11,31=0.13,32=0.13,33=0.15,34=0.17,35=0.20,36=0.24,37=0.25,38=0.28,39=0.30,40=0.33,41=0.38,42=0.41,43=0.47,44=0.56,45=0.74,46=0.82,47=1.00", + "ip_adapter_weight" : 0.4, + "ip_adapter_noise" : 0.5, + "output_format" : "video/h264-mp4", # can also be "image/gif" + "queue_inference" : queue_inference, + "image_dimension": settings['image_dimension'] } res = ml_client.predict_model_output(REPLICATE_MODEL.ad_interpolation, **data) diff --git a/utils/ml_processor/replicate/constants.py b/utils/ml_processor/replicate/constants.py index 40fb1b58..c09c6dc4 100644 --- a/utils/ml_processor/replicate/constants.py +++ b/utils/ml_processor/replicate/constants.py @@ -45,7 +45,7 @@ class REPLICATE_MODEL: epicrealism_v5 = ReplicateModel("pagebrain/epicrealism-v5", "222465e57e4d9812207f14133c9499d47d706ecc41a8bf400120285b2f030b42") sdxl_controlnet = ReplicateModel("lucataco/sdxl-controlnet", "db2ffdbdc7f6cb4d6dab512434679ee3366ae7ab84f89750f8947d5594b79a47") realistic_vision_v5_img2img = ReplicateModel("lucataco/realistic-vision-v5-img2img", "82bbb4595458d6be142450fc6d8c4d79c936b92bd184dd2d6dd71d0796159819") - ad_interpolation = ReplicateModel("piyushk52/ad_interpolation", "4a478c659d96673b81992b866f1072fc62f297b7ad9945632cda027a6a07c624") + ad_interpolation = ReplicateModel("piyushk52/ad_3_img_smooth", "c3395c9373ac64cf9618ce51c8a4ae0da9d570649820cfcba93b58a3b3bee045") # addition 17/10/2023 llama_2_7b = ReplicateModel("meta/llama-2-7b", "527827021d8756c7ab79fde0abbfaac885c37a3ed5fe23c7465093f0878d55ef") From 8e3b5736bb3b9a40040b89da9cb42db6e632c20a Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 25 Oct 2023 21:07:58 +0530 Subject: [PATCH 117/164] wip: restructuring for shots workflow --- backend/db_repo.py | 175 +++++++++++-- backend/models.py | 76 ++++++ backend/serializers/dao.py | 1 + backend/serializers/dto.py | 28 ++- banodoco_settings.py | 16 +- .../components/frame_styling_page.py | 38 +-- ui_components/components/new_project_page.py | 10 +- .../components/video_rendering_page.py | 26 +- ui_components/methods/common_methods.py | 130 +++------- ui_components/methods/video_methods.py | 238 ++---------------- ui_components/models.py | 21 +- .../widgets/add_key_frame_element.py | 71 +++--- .../widgets/animation_style_element.py | 5 +- .../widgets/frame_clip_generation_elements.py | 4 +- .../widgets/frame_movement_widgets.py | 79 ++---- ui_components/widgets/frame_selector.py | 21 +- ui_components/widgets/frame_switch_btn.py | 19 +- ui_components/widgets/frame_time_selector.py | 90 ------- ui_components/widgets/image_carousal.py | 27 +- ui_components/widgets/inpainting_element.py | 9 +- ui_components/widgets/list_view.py | 130 ---------- ui_components/widgets/sidebar_logger.py | 21 +- .../widgets/style_explorer_element.py | 19 +- ui_components/widgets/styling_element.py | 11 +- ui_components/widgets/timeline_view.py | 43 ++-- utils/cache/cache_methods.py | 10 - utils/data_repo/api_repo.py | 9 - utils/data_repo/data_repo.py | 42 +++- 28 files changed, 543 insertions(+), 826 deletions(-) delete mode 100644 ui_components/widgets/frame_time_selector.py delete mode 100644 ui_components/widgets/list_view.py diff --git a/backend/db_repo.py b/backend/db_repo.py index e024f44c..6d4c4eea 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -16,12 +16,12 @@ from typing import List import uuid from shared.constants import Colors, InternalFileType, SortOrder -from backend.serializers.dto import AIModelDto, AppSettingDto, BackupDto, BackupListDto, InferenceLogDto, InternalFileDto, ProjectDto, SettingDto, TimingDto, UserDto +from backend.serializers.dto import AIModelDto, AppSettingDto, BackupDto, BackupListDto, InferenceLogDto, InternalFileDto, ProjectDto, SettingDto, ShotDto, TimingDto, UserDto from shared.constants import AUTOMATIC_FILE_HOSTING, LOCAL_DATABASE_NAME, SERVER, ServerType from shared.file_upload.s3 import upload_file, upload_file_from_obj -from backend.models import AIModel, AIModelParamMap, AppSetting, BackupTiming, InferenceLog, InternalFileObject, Lock, Project, Setting, Timing, User +from backend.models import AIModel, AIModelParamMap, AppSetting, BackupTiming, InferenceLog, InternalFileObject, Lock, Project, Setting, Shot, Timing, User from backend.serializers.dao import CreateAIModelDao, CreateAIModelParamMapDao, CreateAppSettingDao, CreateFileDao, CreateInferenceLogDao, CreateProjectDao, CreateSettingDao, CreateTimingDao, CreateUserDao, UpdateAIModelDao, UpdateAppSettingDao, UpdateSettingDao from shared.constants import InternalResponse @@ -704,10 +704,10 @@ def get_timing_from_uuid(self, uuid): return InternalResponse(payload, 'timing fetched', True) - def get_timing_from_frame_number(self, project_uuid, frame_number): - project: Project = Project.objects.filter(uuid=project_uuid, is_disabled=False).first() - if project: - timing = Timing.objects.filter(aux_frame_index=frame_number, project_id=project.id, is_disabled=False).first() + def get_timing_from_frame_number(self, shot_uuid, frame_number): + shot: Shot = Shot.objects.filter(uuid=shot_uuid, is_disabled=False).first() + if shot: + timing = Timing.objects.filter(aux_frame_index=frame_number, shot_id=shot.id, is_disabled=False).first() if timing: payload = { 'data': TimingDto(timing).data @@ -734,7 +734,7 @@ def get_next_timing(self, uuid): if not timing: return InternalResponse({}, 'invalid timing uuid', False) - next_timing = Timing.objects.filter(aux_frame_index=timing.aux_frame_index + 1, project_id=timing.project_id, is_disabled=False).order_by('aux_frame_index').first() + next_timing = Timing.objects.filter(aux_frame_index=timing.aux_frame_index + 1, shot_id=timing.shot_id, is_disabled=False).order_by('aux_frame_index').first() payload = { 'data': TimingDto(next_timing).data if next_timing else None @@ -747,7 +747,7 @@ def get_prev_timing(self, uuid): if not timing: return InternalResponse({}, 'invalid timing uuid', False) - prev_timing = Timing.objects.filter(aux_frame_index=timing.aux_frame_index - 1, project_id=timing.project_id, is_disabled=False).order_by('aux_frame_index').first() + prev_timing = Timing.objects.filter(aux_frame_index=timing.aux_frame_index - 1, shot_id=timing.shot_id, is_disabled=False).order_by('aux_frame_index').first() payload = { 'data': TimingDto(prev_timing).data if prev_timing else None @@ -761,7 +761,7 @@ def get_alternative_image_list(self, uuid): return InternalResponse([], 'invalid timing uuid', False) return timing.alternative_image_list - + def get_timing_list_from_project(self, project_uuid=None): if project_uuid: project: Project = Project.objects.filter(uuid=project_uuid, is_disabled=False).first() @@ -777,7 +777,20 @@ def get_timing_list_from_project(self, project_uuid=None): } return InternalResponse(payload, 'timing list fetched', True) - + + def get_timing_list_from_shot(self, shot_uuid): + shot: Shot = Shot.objects.filter(uuid=shot_uuid, is_disabled=False).first() + if not shot: + return InternalResponse({}, 'invalid shot', False) + + timing_list = Timing.objects.filter(shot_id=shot.id, is_disabled=False).order_by('aux_frame_index').all() + + payload = { + 'data': TimingDto(timing_list, many=True).data + } + + return InternalResponse(payload, 'timing list fetched', True) + def create_timing(self, **kwargs): attributes = CreateTimingDao(data=kwargs) if not attributes.is_valid(): @@ -792,6 +805,13 @@ def create_timing(self, **kwargs): print(attributes.data) attributes._data['project_id'] = project.id + + if 'shot_id' in attributes.data and attributes.data['shot_id']: + shot = Shot.objects.filter(uuid=attributes.data['shot_id'], is_disabled=False).first() + if not shot: + return InternalResponse({}, 'invalid shot', False) + + attributes._data['shot_id'] = shot.id if 'aux_frame_index' not in attributes.data or attributes.data['aux_frame_index'] == None: attributes._data['aux_frame_index'] = Timing.objects.filter(project_id=attributes.data['project_id'], is_disabled=False).count() @@ -902,8 +922,7 @@ def add_interpolated_clip(self, uuid, **kwargs): return InternalResponse({}, 'success', True) - # TODO: add dao in this method - def update_specific_timing(self, uuid, **kwargs): + def update_specific_timing(self, uuid, **kwargs): ## change this timing = Timing.objects.filter(uuid=uuid, is_disabled=False).first() if not timing: return InternalResponse({}, 'invalid timing uuid', False) @@ -916,6 +935,14 @@ def update_specific_timing(self, uuid, **kwargs): kwargs['primary_image_id'] = primary_image.id + if 'shot_id' in kwargs: + if kwargs['shot_id'] != None: + shot: Shot = Shot.objects.filter(uuid=kwargs['shot_id'], is_disabled=False).first() + if not shot: + return InternalResponse({}, 'invalid shot uuid', False) + + kwargs['shot_id'] = shot.id + if 'model_id' in kwargs: if kwargs['model_id'] != None: model: AIModel = AIModel.objects.filter(uuid=kwargs['model_id'], is_disabled=False).first() @@ -1024,19 +1051,6 @@ def remove_source_image(self, uuid): timing.save() return InternalResponse({}, 'source image removed successfully', True) - def move_frame_one_step_forward(self, project_uuid, index_of_frame): - project: Project = Project.objects.filter(uuid=project_uuid, is_disabled=False).first() - if not project: - return InternalResponse({}, 'invalid project uuid', False) - - timing_list = Timing.objects.filter(project_id=project.id, \ - aux_frame_index__gte=index_of_frame, is_disabled=False).order_by('frame_number') - - timing_list.update(aux_frame_index=F('aux_frame_index') + 1) - - return InternalResponse({}, 'frames moved successfully', True) - - # app setting def get_app_setting_from_uuid(self, uuid=None): if uuid: @@ -1503,4 +1517,113 @@ def acquire_lock(self, key): def release_lock(self, key): with transaction.atomic(): Lock.objects.filter(row_key=key).delete() - return InternalResponse({'data': True}, 'success', True) \ No newline at end of file + return InternalResponse({'data': True}, 'success', True) + + + # shot + def get_shot_from_number(self, project_uuid, shot_number=0): + shot: Shot = Shot.objects.filter(project_id=project_uuid, shot_idx=shot_number, is_disabled=False).first() + if not shot: + return InternalResponse({}, 'invalid shot number', False) + + timing_list = Timing.objects.filter(shot_id=shot.id, is_disabled=False).all() + context = {'timing_list': timing_list} + payload = { + 'data': ShotDto(shot, context=context).data + } + + return InternalResponse(payload, 'shot fetched successfully', True) + + def get_shot_from_uuid(self, shot_uuid): + shot: Shot = Shot.objects.filter(uuid=shot_uuid, is_disabled=False).first() + if not shot: + return InternalResponse({}, 'invalid shot uuid', False) + + timing_list = Timing.objects.filter(shot_id=shot.id, is_disabled=False).all() + context = {'timing_list': timing_list} + + payload = { + 'data': ShotDto(shot, context=context).data + } + + return InternalResponse(payload, 'shot fetched successfully', True) + + def get_shot_list(self, project_uuid): + project = Project.objects.filter(uuid=project_uuid, is_disabled=False).first() + if not project: + return InternalResponse({}, 'invalid project uuid', False) + + shot_list: List[Shot] = Shot.objects.filter(project_id=project.id, is_disabled=False).all() + timing_list = Timing.objects.filter(is_disabled=False).all() + context = {'timing_list': timing_list} + + payload = { + 'data': ShotDto(shot_list, context=context, many=True).data + } + + return InternalResponse(payload, 'shot list fetched successfully', True) + + def create_shot(self, project_uuid, name, duration, meta_data="", desc=""): + project = Project.objects.filter(uuid=project_uuid, is_disabled=False).first() + if not project: + return InternalResponse({}, 'invalid project uuid', False) + + shot_number = Shot.objects.filter(project_id=project.id, is_disabled=False).count() + 1 + + shot_data = { + "name" : name, + "desc" : desc, + "shot_idx" : shot_number, + "duration" : duration, + "meta_data" : meta_data, + "project_id" : project.id + } + + shot = Shot.objects.create(**shot_data) + + timing_list = Timing.objects.filter(is_disabled=False).all() + context = {'timing_list': timing_list} + + payload = { + 'data': ShotDto(shot, context=context).data + } + + return InternalResponse(payload, 'shot created successfully', True) + + def update_shot(self, shot_uuid, name=None, duration=None, meta_data=None, desc=None): + shot: Shot = Shot.objects.filter(uuid=shot_uuid, is_disabled=False).first() + if not shot: + return InternalResponse({}, 'invalid shot uuid', False) + + update_data = {} + if name != None: + update_data['name'] = name + if duration != None: + update_data['duration'] = duration + if meta_data != None: + update_data['meta_data'] = meta_data + if desc != None: + update_data['desc'] = desc + + for k,v in update_data.items(): + setattr(shot, k, v) + + shot.save() + timing_list = Timing.objects.filter(is_disabled=False).all() + context = {'timing_list': timing_list} + + payload = { + 'data': ShotDto(shot, context=context).data + } + + return InternalResponse(payload, 'shot updated successfully', True) + + def delete_shot(self, shot_uuid): + shot: Shot = Shot.objects.filter(uuid=shot_uuid, is_disabled=False).first() + if not shot: + return InternalResponse({}, 'invalid shot uuid', False) + + shot.is_disabled = True + shot.save() + + return InternalResponse({}, 'shot deleted successfully', True) diff --git a/backend/models.py b/backend/models.py index 0ed90f57..03b22dfe 100644 --- a/backend/models.py +++ b/backend/models.py @@ -146,6 +146,81 @@ class Meta: @property def data_dump_dict(self): return json.loads(self.data_dump) if self.data_dump else None + + +class Shot(BaseModel): + name = models.CharField(max_length=255, default="", blank=True) + project_id = models.ForeignKey(Project, on_delete=models.CASCADE) + main_clip_id = models.ForeignKey(InternalFileObject, default=None, null=True) # main clip has the correct duration + desc = models.TextField(default="", blank=True) + shot_idx = models.IntegerField() + duration = models.FloatField(default=2.5) + meta_data = models.TextField(default="", blank=True) + interpolated_clip_list = models.TextField(default=None, null=True) + + class Meta: + app_label = 'backend' + db_table = 'shot' + + @property + def meta_data_dict(self): + return json.loads(self.meta_data) if self.meta_data else None + + def __init__(self, *args, **kwargs): + super(Shot, self).__init__(*args, **kwargs) + self.old_shot_idx = self.shot_idx + self.old_is_disabled = self.is_disabled + self.old_duration = self.duration + + def add_interpolated_clip_list(self, clip_uuid_list): + cur_list = json.loads(self.interpolated_clip_list) if self.interpolated_clip_list else [] + cur_list.extend(clip_uuid_list) + cur_list = list(set(cur_list)) + self.interpolated_clip_list = json.dumps(cur_list) + + def save(self, *args, **kwargs): + # --------------- handling shot_idx change -------------- + # if the shot is being deleted (disabled) + if self.old_is_disabled != self.is_disabled and self.is_disabled: + shot_list = Shot.objects.filter(project_id=self.project_id, is_disabled=False).order_by('shot_idx') + + # if this is disabled then shifting every shot backwards one step + if self.is_disabled: + shot_list.update(shot_idx=F('shot_idx') - 1) + else: + shot_list.update(shot_idx=F('shot_idx') + 1) + + # if this is a newly created shot or assigned new shot_idx (and not disabled) + if (not self.id or self.old_shot_idx != self.shot_idx) and not self.is_disabled: + # newly created shot + if not self.id: + # if a shot already exists at this place then moving everything one step forward + if Shot.objects.filter(project_id=self.project_id, shot_idx=self.shot_idx, is_disabled=False).exists(): + shot_list = Shot.objects.filter(project_id=self.project_id, \ + shot_idx__gte=self.shot_idx, is_disabled=False) + shot_list.update(shot_idx=F('shot_idx') + 1) + elif self.old_shot_idx != self.shot_idx: + if self.shot_idx >= self.old_shot_idx: + shots_to_move = Shot.objects.filter(project_id=self.project_id, shot_idx__gt=self.old_shot_idx, \ + shot_idx__lte=self.shot_idx, is_disabled=False).order_by('shot_idx') + # moving the frames between old and new index one step backwards + shots_to_move.update(shot_idx=F('shot_idx') - 1) + else: + shots_to_move = Shot.objects.filter(project_id=self.project_id, shot_idx__gte=self.shot_idx, \ + shot_idx__lt=self.old_shot_idx, is_disabled=False).order_by('shot_idx') + # moving frames + shots_to_move.update(shot_idx=F('shot_idx') + 1, timed_clip=None, preview_video=None) + + super(Shot, self).save(*args, **kwargs) + + # if the overall duration of the shot is updated + # then we update the duration of the last frame inside the shot + if self.old_duration != self.duration: + shot_timing_list = Timing.objects.filter(shot_id=self.id, is_disabled=False).order_by('aux_frame_index') + if shot_timing_list and len(shot_timing_list): + clip_duration = round(self.duration - shot_timing_list[len(shot_timing_list)-1].frame_time, 2) + shot_timing_list[len(shot_timing_list)-1].update(clip_duration=clip_duration) + class Timing(BaseModel): project = models.ForeignKey(Project, on_delete=models.CASCADE, null=True) @@ -157,6 +232,7 @@ class Timing(BaseModel): canny_image = models.ForeignKey(InternalFileObject, related_name="canny_image", on_delete=models.DO_NOTHING, null=True) preview_video = models.ForeignKey(InternalFileObject, related_name="preview_video", on_delete=models.DO_NOTHING, null=True) primary_image = models.ForeignKey(InternalFileObject, related_name="primary_image", on_delete=models.DO_NOTHING, null=True) # variant number that is currently selected (among alternative images) NONE if none is present + shot_id = models.ForeignKey(Shot, on_delete=models.CASCADE, null=True) custom_model_id_list = models.TextField(default=None, null=True, blank=True) frame_time = models.FloatField(default=None, null=True) frame_number = models.IntegerField(default=None, null=True) diff --git a/backend/serializers/dao.py b/backend/serializers/dao.py index 2c3bf6fd..fcf9e513 100644 --- a/backend/serializers/dao.py +++ b/backend/serializers/dao.py @@ -84,6 +84,7 @@ class CreateTimingDao(serializers.Serializer): canny_image_id = serializers.CharField(max_length=100, required=False) preview_video_id = serializers.CharField(max_length=100, required=False) custom_model_id_list = serializers.CharField(max_length=100, required=False) + shot_id = serializers.CharField(max_length=100) frame_time = serializers.CharField(max_length=100) frame_number = serializers.CharField(max_length=100, required=False) primary_image = serializers.CharField(max_length=100, required=False) diff --git a/backend/serializers/dto.py b/backend/serializers/dto.py index a84dd699..7badce7e 100644 --- a/backend/serializers/dto.py +++ b/backend/serializers/dto.py @@ -1,7 +1,7 @@ import json from rest_framework import serializers -from backend.models import AIModel, AppSetting, BackupTiming, InferenceLog, InternalFileObject, Project, Setting, Timing, User +from backend.models import AIModel, AppSetting, BackupTiming, InferenceLog, InternalFileObject, Project, Setting, Shot, Timing, User class UserDto(serializers.ModelSerializer): @@ -204,4 +204,28 @@ class Meta: "name", "note", "created_on" - ) \ No newline at end of file + ) + + +class ShotDto(serializers.ModelSerializer): + timing_list = serializers.SerializerMethodField() + main_clip = InternalFileDto() + + class Meta: + model = Shot + fields = ( + "uuid", + "name", + "desc", + "shot_idx", + "duration", + "meta_data", + "timing_list", + "interpolated_clip_list", + "main_clip" + ) + + def get_timing_list(self, obj): + timing_list = self.context.get("timing_list", []) + return [TimingDto(timing).data for timing in timing_list] + \ No newline at end of file diff --git a/banodoco_settings.py b/banodoco_settings.py index f4d568c2..eb4f66ba 100644 --- a/banodoco_settings.py +++ b/banodoco_settings.py @@ -87,6 +87,17 @@ def create_new_project(user: InternalUserObject, project_name: str, width=512, h } project: InternalProjectObject = data_repo.create_project(**project_data) + # create a default first shot + shot_data = { + "name": "Shot 1", + "project_id": project.uuid, + "desc": "", + "shot_idx": 0, + "duration": 2 + } + + shot = data_repo.create_shot(**shot_data) + # create a sample timing frame st.session_state["project_uuid"] = project.uuid sample_file_location = "sample_assets/sample_images/v.jpeg" @@ -112,7 +123,8 @@ def create_new_project(user: InternalUserObject, project_name: str, width=512, h "frame_time": 0.0, "animation_style": animation_style, "aux_frame_index": 0, - "source_image_id": source_image.uuid + "source_image_id": source_image.uuid, + "shot_id": shot.uuid, } timing: InternalFrameTimingObject = data_repo.create_timing(**timing_data) @@ -148,7 +160,7 @@ def create_new_project(user: InternalUserObject, project_name: str, width=512, h create_working_assets(project.uuid) - return project + return project, shot def create_predefined_models(user): diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index e752d0aa..26f87655 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -1,4 +1,3 @@ - import streamlit as st from shared.constants import ViewType @@ -24,10 +23,11 @@ from utils.data_repo.data_repo import DataRepo -def frame_styling_page(mainheader2, project_uuid: str): +def frame_styling_page(shot_uuid: str): data_repo = DataRepo() - timing_details = data_repo.get_timing_list_from_project(project_uuid) - project_settings = data_repo.get_project_setting(project_uuid) + shot = data_repo.get_shot_from_uuid(shot_uuid) + timing_list = data_repo.get_timing_list_from_shot(shot_uuid) + project_settings = data_repo.get_project_setting(shot.project.uuid) if "strength" not in st.session_state: st.session_state['strength'] = project_settings.default_strength @@ -40,10 +40,10 @@ def frame_styling_page(mainheader2, project_uuid: str): st.session_state['num_inference_steps'] = project_settings.default_num_inference_steps st.session_state['transformation_stage'] = project_settings.default_stage - if "current_frame_uuid" not in st.session_state: - timing = data_repo.get_timing_list_from_project(project_uuid)[0] - st.session_state['current_frame_uuid'] = timing.uuid - st.session_state['current_frame_index'] = timing.aux_frame_index + 1 + if f"{shot.uuid}__" + "current_frame_uuid" not in st.session_state: + timing = data_repo.get_timing_list_from_shot(shot_uuid)[0] + st.session_state[f"{shot.uuid}__" + 'current_frame_uuid'] = timing.uuid + st.session_state[f"{shot.uuid}__" + 'current_frame_index'] = timing.aux_frame_index + 1 if 'frame_styling_view_type' not in st.session_state: st.session_state['frame_styling_view_type'] = "Individual" @@ -59,10 +59,10 @@ def frame_styling_page(mainheader2, project_uuid: str): st.markdown( f"#### :red[{st.session_state['main_view_type']}] > **:green[{st.session_state['frame_styling_view_type']}]** > :orange[{st.session_state['page']}] > :blue[Frame #{st.session_state['current_frame_index']}]") - project_settings = data_repo.get_project_setting(project_uuid) + project_settings = data_repo.get_project_setting(shot.project.uuid) if st.session_state['frame_styling_view_type'] == "Explorer": - style_explorer_element(project_uuid) + style_explorer_element(shot_uuid) # -------------------- INDIVIDUAL VIEW ---------------------- elif st.session_state['frame_styling_view_type'] == "Individual": @@ -74,7 +74,7 @@ def frame_styling_page(mainheader2, project_uuid: str): st.markdown("***") with st.expander("🎬 Choose Animation Style & Create Variants", expanded=True): - animation_style_element(st.session_state['current_frame_uuid'], project_uuid) + animation_style_element(st.session_state['current_frame_uuid'], shot_uuid) elif st.session_state['page'] == CreativeProcessType.STYLING.value: variant_comparison_grid(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) @@ -132,14 +132,14 @@ def frame_styling_page(mainheader2, project_uuid: str): st.info( "You can restyle multiple frames at once in the Timeline view.") st.markdown("***") - style_cloning_element(timing_details) + style_cloning_element(timing_list) with st.expander("🔍 Prompt Finder"): - prompt_finder_element(project_uuid) + prompt_finder_element(shot_uuid) elif st.session_state['styling_view'] == "Crop, Move & Rotate Image": with st.expander("🤏 Crop, Move & Rotate Image", expanded=True): - cropping_selector_element(project_uuid) + cropping_selector_element(shot_uuid) elif st.session_state['styling_view'] == "Inpainting & BG Removal": with st.expander("🌌 Inpainting, Background Removal & More", expanded=True): @@ -147,10 +147,10 @@ def frame_styling_page(mainheader2, project_uuid: str): elif st.session_state['styling_view'] == "Draw On Image": with st.expander("📝 Draw On Image", expanded=True): - drawing_element(timing_details,project_settings,project_uuid) + drawing_element(timing_list,project_settings, shot_uuid) with st.expander("➕ Add Key Frame", expanded=True): - selected_image, inherit_styling_settings, how_long_after, _ = add_key_frame_element(timing_details, project_uuid) + selected_image, inherit_styling_settings, how_long_after, _ = add_key_frame_element(shot_uuid) if st.button(f"Add key frame",type="primary",use_container_width=True): add_key_frame(selected_image, inherit_styling_settings, how_long_after) st.rerun() @@ -162,11 +162,11 @@ def frame_styling_page(mainheader2, project_uuid: str): with st.sidebar: with st.expander("🌀 Batch Styling", expanded=False): styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.LIST.value) - timeline_view(project_uuid, "Key Frames") + timeline_view(shot_uuid, "Key Frames") elif st.session_state['page'] == "Videos": - timeline_view(project_uuid, "Videos") + timeline_view(shot_uuid, "Videos") # -------------------- SIDEBAR NAVIGATION -------------------------- with st.sidebar: with st.expander("🔍 Inference Logging", expanded=True): - sidebar_logger(project_uuid) + sidebar_logger(shot_uuid) diff --git a/ui_components/components/new_project_page.py b/ui_components/components/new_project_page.py index 4acc449f..46cb7654 100644 --- a/ui_components/components/new_project_page.py +++ b/ui_components/components/new_project_page.py @@ -78,8 +78,8 @@ def new_project_page(): new_project_name = new_project_name.replace(" ", "_") current_user = data_repo.get_first_active_user() - new_project = create_new_project(current_user, new_project_name, width, height, "Images", "Interpolation") - new_timing = create_timings_row_at_frame_number(new_project.uuid, 0) + new_project, shot = create_new_project(current_user, new_project_name, width, height, "Images", "Interpolation") + new_timing = create_timings_row_at_frame_number(shot.uuid, 0) if starting_image: try: @@ -88,8 +88,10 @@ def new_project_page(): except Exception as e: st.error(f"Failed to save the uploaded image due to {str(e)}") - # remvoing the initial frame which moved to the 1st position - initial_frame = data_repo.get_timing_from_frame_number(new_project.uuid, 0) + # remvoing the initial frame which moved to the 1st position + # (since creating new project also creates a frame) + shot = data_repo.get_shot_from_number(new_project.uuid, 0) + initial_frame = data_repo.get_timing_from_frame_number(shot.uuid, 0) data_repo.delete_timing_from_uuid(initial_frame.uuid) if uploaded_audio: diff --git a/ui_components/components/video_rendering_page.py b/ui_components/components/video_rendering_page.py index 04242564..034fd36d 100644 --- a/ui_components/components/video_rendering_page.py +++ b/ui_components/components/video_rendering_page.py @@ -13,16 +13,8 @@ from utils.data_repo.data_repo import DataRepo -def video_rendering_page(mainheader2, project_uuid): +def video_rendering_page(project_uuid): data_repo = DataRepo() - project = data_repo.get_project_from_uuid(project_uuid) - project_name = project.name - - - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - project_uuid) - project_settings = data_repo.get_project_setting(project_uuid) - parody_movie_names = ["The_Lord_of_the_Onion_Rings", "Jurassic_Pork", "Harry_Potter_and_the_Sorcerer_s_Kidney_Stone", "Star_Wars_The_Phantom_of_the_Oprah", "The_Silence_of_the_Yams", "The_Hunger_Pains", "Free_Willy_Wonka_and_the_Chocolate_Factory", "The_Da_Vinci_Chode", "Forrest_Dump", "The_Shawshank_Inebriation", "A_Clockwork_Orange_Juice", "The_Big_Lebowski_2_Dude_Where_s_My_Car", "The_Princess_Diaries_The_Dark_Knight_Rises", "Eternal_Sunshine_of_the_Spotless_Behind", "Rebel_Without_a_Clue", "The_Terminal_Dentist", "Dr_Strangelove_or_How_I_Learned_to_Stop_Worrying_and_Love_the_Bombastic", "The_Wolf_of_Sesame_Street", "The_Good_the_Bad_and_the_Fluffy", "The_Sound_of_Mucus", "Back_to_the_Fuchsia", "The_Curious_Case_of_Benjamin_s_Button", "The_Fellowship_of_the_Bing", "The_Texas_Chainsaw_Manicure", "The_Iron_Manatee", "Night_of_the_Living_Bread", "Indiana_Jones_and_the_Temple_of_Groom", "Kill_Billiards", "The_Bourne_Redundancy", "The_SpongeBob_SquarePants_Movie_Sponge_Out_of_Water_and_Ideas", "Planet_of_the_Snapes", "No_Country_for_Old_Yentas", "The_Expendable_Accountant", "The_Terminal_Illness", "A_Streetcar_Named_Retire", "The_Secret_Life_of_Walter_s_Mitty", "The_Hunger_Games_Catching_Foam", "The_Godfather_Part_Time_Job", "How_To_Kill_a_Mockingbird", "Star_Trek_III_The_Search_for_Spock_s_Missing_Sock", "Gone_with_the_Wind_Chimes", "Dr_No_Clue", "Ferris_Bueller_s_Day_Off_Sick", "Monty_Python_and_the_Holy_Fail", "A_Fistful_of_Quarters", "Willy_Wonka_and_the_Chocolate_Heartburn", "The_Good_the_Bad_and_the_Dandruff", "The_Princess_Bride_of_Frankenstein", "The_Wizard_of_Bras", "Pulp_Friction", "Die_Hard_with_a_Clipboard", "Indiana_Jones_and_the_Last_Audit", "Finding_Nemoy", "The_Silence_of_the_Lambs_The_Musical", "Titanic_2_The_Iceberg_Strikes_Back", "Fast_Times_at_Ridgemont_Mortuary", "The_Graduate_But_Only_Because_He_Has_an_Advanced_Degree", "Beauty_and_the_Yeast", "The_Blair_Witch_Takes_Manhattan", "Reservoir_Bitches", "Die_Hard_with_a_Pension"] random_name = random.choice(parody_movie_names) @@ -32,22 +24,8 @@ def video_rendering_page(mainheader2, project_uuid): attach_audio_element(project_uuid, True) - - quality1, quality2 = st.columns([1, 2]) - - with quality1: - quality_of_video = st.radio("What quality would you like?", [ - 'Preview', 'High-Quality'], horizontal=True) - - with quality2: - if quality_of_video == "Preview": - st.info("This means it'll generate videos at lower resolutions and frame rates.") - else: - st.info("This means it'll generate videos at higher resolutions and frame rates.") - if st.button("Render New Video"): - - render_video(final_video_name, project_uuid, quality_of_video, InternalFileTag.COMPLETE_GENERATED_VIDEO.value) + render_video(final_video_name, project_uuid, InternalFileTag.COMPLETE_GENERATED_VIDEO.value) st.success("Video rendered!") time.sleep(1.5) st.rerun() diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 94ab4eae..ea7e01ad 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -12,13 +12,13 @@ from io import BytesIO import numpy as np import urllib3 -from shared.constants import SERVER, InferenceType, InternalFileTag, InternalFileType, ProjectMetaData, ServerType +from shared.constants import InferenceType, InternalFileTag, InternalFileType, ProjectMetaData from pydub import AudioSegment from backend.models import InternalFileObject from ui_components.constants import SECOND_MASK_FILE, SECOND_MASK_FILE_PATH, WorkflowStageType from ui_components.methods.file_methods import add_temp_file_to_project, convert_bytes_to_file, generate_pil_image, generate_temp_file, save_or_host_file, save_or_host_file_bytes -from ui_components.methods.video_methods import calculate_desired_duration_of_individual_clip, update_speed_of_video_clip -from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject +from ui_components.methods.video_methods import update_speed_of_video_clip +from ui_components.models import InternalFrameTimingObject, InternalSettingObject from utils.common_utils import acquire_lock, release_lock from utils.data_repo.data_repo import DataRepo from shared.constants import AnimationStyleType @@ -31,11 +31,10 @@ def clone_styling_settings(source_frame_number, target_frame_uuid): data_repo = DataRepo() target_timing = data_repo.get_timing_from_uuid(target_frame_uuid) - timing_details = data_repo.get_timing_list_from_project( - target_timing.project.uuid) + timing_list = data_repo.get_timing_list_from_shot( + target_timing.shot.uuid) - primary_image = data_repo.get_file_from_uuid(timing_details[source_frame_number].primary_image.uuid) - params = primary_image.inference_params + params = timing_list[source_frame_number].primary_image.inference_params if params: target_timing.prompt = params.prompt @@ -53,7 +52,9 @@ def clone_styling_settings(source_frame_number, target_frame_uuid): target_timing.model = model # TODO: image format is assumed to be PNG, change this later -def save_new_image(img: Union[Image.Image, str, np.ndarray, io.BytesIO], project_uuid) -> InternalFileObject: +def save_new_image(img: Union[Image.Image, str, np.ndarray, io.BytesIO], shot_uuid) -> InternalFileObject: + data_repo = DataRepo() + shot = data_repo.get_shot_from_uuid(shot_uuid) img = generate_pil_image(img) file_name = str(uuid.uuid4()) + ".png" @@ -64,15 +65,14 @@ def save_new_image(img: Union[Image.Image, str, np.ndarray, io.BytesIO], project file_data = { "name": str(uuid.uuid4()) + ".png", "type": InternalFileType.IMAGE.value, - "project_id": project_uuid + "project_id": shot.project.uuid } if hosted_url: file_data.update({'hosted_url': hosted_url}) else: file_data.update({'local_path': file_path}) - - data_repo = DataRepo() + new_image = data_repo.create_file(**file_data) return new_image @@ -221,14 +221,14 @@ def apply_image_transformations(image: Image, zoom_level, rotation_angle, x_shif return cropped_image -def fetch_image_by_stage(project_uuid, stage, frame_idx): +def fetch_image_by_stage(shot_uuid, stage, frame_idx): data_repo = DataRepo() - timing_details = data_repo.get_timing_list_from_project(project_uuid) + timing_list = data_repo.get_timing_list_from_shot(shot_uuid) if stage == WorkflowStageType.SOURCE.value: - return timing_details[frame_idx].source_image + return timing_list[frame_idx].source_image elif stage == WorkflowStageType.STYLED.value: - return timing_details[frame_idx].primary_image + return timing_list[frame_idx].primary_image else: return None @@ -247,30 +247,22 @@ def rotate_image(location, degree): rotated_image = image.rotate(-degree, resample=Image.BICUBIC, expand=False) return rotated_image - -def update_timings_in_order(project_uuid): - data_repo = DataRepo() - - timing_list: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project(project_uuid) - # Iterate through the timing objects - for i, timing in enumerate(timing_list): - # Set the frame time to the index of the timing object only if it's different from the current one - if timing.frame_time != float(i): - print(f"Updating timing {timing.uuid} frame time to {float(i)}") - data_repo.update_specific_timing(timing.uuid, frame_time=float(i)) - -def save_uploaded_image(image, project_uuid, frame_uuid, save_type): +def save_uploaded_image(image, shot_uuid, frame_uuid, stage_type): + ''' + saves the image file (which can be a PIL, arr or url) into the project, without + any tags or logs. then adds that file as the source_image/primary_image, depending + on the stage selected + ''' data_repo = DataRepo() try: - saved_image = save_new_image(image, project_uuid) - - # Update records based on save_type - if save_type == "source": + saved_image = save_new_image(image, shot_uuid) + # Update records based on stage_type + if stage_type == WorkflowStageType.SOURCE.value: data_repo.update_specific_timing(frame_uuid, source_image_id=saved_image.uuid) - elif save_type == "styled": + elif stage_type == WorkflowStageType.STYLED.value: number_of_image_variants = add_image_variant(saved_image.uuid, frame_uuid) promote_image_variant(frame_uuid, number_of_image_variants - 1) @@ -283,7 +275,7 @@ def save_uploaded_image(image, project_uuid, frame_uuid, save_type): def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): ''' this methods promotes the variant to the primary image (also referred to as styled image) - and clears the interpolation data of the prev and the current frame + interpolated_clips/videos of the shot are not cleared ''' data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) @@ -291,24 +283,7 @@ def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): # promoting variant variant_to_promote = timing.alternative_images_list[variant_to_promote_frame_number] data_repo.update_specific_timing(timing_uuid, primary_image_id=variant_to_promote.uuid) - - prev_timing = data_repo.get_prev_timing(timing_uuid) - # clearing the interpolation data of the prev frame - if prev_timing: - data_repo.update_specific_timing(prev_timing.uuid, interpolated_clip_list=None) - data_repo.update_specific_timing(timing_uuid, interpolated_clip_list=None) - data_repo.update_specific_timing(prev_timing.uuid, timed_clip_id=None) - - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - timing.project.uuid) - - # if this is not the last element then clearing it's interpolation data - if timing.aux_frame_index < len(timing_details): - data_repo.update_specific_timing(timing.uuid, interpolated_clip_list=None) - data_repo.update_specific_timing(timing.uuid, timed_clip_id=None) - - data_repo.update_specific_timing(timing_uuid, timed_clip_id=None) - _ = data_repo.get_timing_list_from_project(timing.project.uuid) + _ = data_repo.get_timing_list_from_shot(timing.project.uuid) def promote_video_variant(timing_uuid, variant_uuid): @@ -547,65 +522,18 @@ def get_audio_bytes_for_slice(timing_uuid): audio_bytes.seek(0) return audio_bytes -# calculates and updates clip duration of all the timings -def update_clip_duration_of_all_timing_frames(project_uuid): - data_repo = DataRepo() - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - project_uuid) - - length_of_list = len(timing_details) - - for i in timing_details: - index_of_current_item = timing_details.index(i) - length_of_list = len(timing_details) - timing_item: InternalFrameTimingObject = data_repo.get_timing_from_frame_number( - project_uuid, index_of_current_item) - - # last frame - if index_of_current_item == (length_of_list - 1): - time_of_frame = timing_item.frame_time - duration_of_static_time = 0.0 - end_duration_of_frame = float( - time_of_frame) + float(duration_of_static_time) - total_duration_of_frame = float( - end_duration_of_frame) - float(time_of_frame) - else: - time_of_frame = timing_item.frame_time - next_timing = data_repo.get_next_timing(timing_item.uuid) - time_of_next_frame = next_timing.frame_time - total_duration_of_frame = float( - time_of_next_frame) - float(time_of_frame) - total_duration_of_frame = round(total_duration_of_frame, 2) - data_repo.update_specific_timing(timing_item.uuid, clip_duration=total_duration_of_frame) - - _ = data_repo.get_timing_list_from_project(project_uuid) - - -def create_timings_row_at_frame_number(project_uuid, index_of_frame, frame_time=0.0): +def create_timings_row_at_frame_number(shot_uuid, index_of_frame, frame_time=0.0): data_repo = DataRepo() - # remove the interpolated video from the current row and the row before and after - unless it is the first or last row timing_data = { - "project_id": project_uuid, + "shot_id": shot_uuid, "frame_time": frame_time, "animation_style": AnimationStyleType.INTERPOLATION.value, "aux_frame_index": index_of_frame } timing: InternalFrameTimingObject = data_repo.create_timing(**timing_data) - prev_timing: InternalFrameTimingObject = data_repo.get_prev_timing( - timing.uuid) - if prev_timing: - prev_clip_duration = calculate_desired_duration_of_individual_clip(prev_timing.uuid) - data_repo.update_specific_timing( - prev_timing.uuid, interpolated_clip_list=None, clip_duration=prev_clip_duration) - - next_timing: InternalAIModelObject = data_repo.get_next_timing(timing.uuid) - if next_timing: - data_repo.update_specific_timing( - next_timing.uuid, interpolated_clip_list=None) - return timing diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index a7f2931f..9a2a4b57 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -15,7 +15,7 @@ from shared.file_upload.s3 import is_s3_image_url from ui_components.constants import VideoQuality from ui_components.methods.file_methods import convert_bytes_to_file, generate_temp_file -from ui_components.models import InternalFrameTimingObject, InternalSettingObject +from ui_components.models import InternalFrameTimingObject, InternalSettingObject, InternalShotObject from utils.data_repo.data_repo import DataRepo from utils.media_processor.interpolator import VideoInterpolator from utils.media_processor.video import VideoProcessor @@ -109,11 +109,6 @@ def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None) data_repo.update_specific_timing(timing_uuid, preview_video_id=preview_video.uuid) os.remove(temp_output_file.name) - # preview has the correct length (equal to the time difference between the current and the next frame) - # which the interpolated video may or maynot have - # clip_duration = calculate_desired_duration_of_individual_clip(timing_uuid) - # data_repo.update_specific_timing(timing_uuid, clip_duration=clip_duration) - # adding audio if the audio file is present if project_details.audio: audio_bytes = get_audio_bytes_for_slice(timing_uuid) @@ -177,99 +172,6 @@ def create_single_interpolated_clip(timing_uuid, quality, settings={}, variant_c process_inference_output(**inference_data) - -# preview_clips have frame numbers on them. Preview clip is generated from index-2 to index+2 frames -def create_full_preview_video(timing_uuid, speed=1) -> InternalFileObject: - from ui_components.methods.file_methods import save_or_host_file_bytes, convert_bytes_to_file, generate_temp_file - - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - timing.project.uuid) - index_of_item = timing.aux_frame_index - - num_timing_details = len(timing_details) - clips = [] - - temp_file_list = [] - - for i in range(index_of_item - 2, index_of_item + 3): - if i < 0 or i >= num_timing_details-1: - continue - - primary_variant_location = timing_details[i].primary_image_location - print(f"primary_variant_location for i={i}: {primary_variant_location}") - - if not primary_variant_location: - break - - preview_video = create_or_get_single_preview_video(timing_details[i].uuid) - - clip = VideoFileClip(preview_video.location) - - number_text = TextClip(str(i), fontsize=24, color='white') - number_background = TextClip(" ", fontsize=24, color='black', bg_color='black', size=( - number_text.w + 10, number_text.h + 10)) - number_background = number_background.set_position( - ('left', 'top')).set_duration(clip.duration) - number_text = number_text.set_position( - (number_background.w - number_text.w - 5, number_background.h - number_text.h - 5)).set_duration(clip.duration) - - clip_with_number = CompositeVideoClip( - [clip, number_background, number_text]) - - # remove existing preview video - temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4", mode='wb') - temp_file_list.append(temp_file) - clip_with_number.write_videofile(temp_file.name, codec='libx264', bitrate='3000k') - video_bytes = None - with open(temp_file.name, 'rb') as f: - video_bytes = f.read() - - hosted_url = save_or_host_file_bytes(video_bytes, preview_video.local_path) - if hosted_url: - data_repo.update_file(preview_video.uuid, hosted_url=hosted_url) - - clips.append(preview_video) - - print(clips) - video_clips = [] - - for v in clips: - path = v.location - if 'http' in path: - temp_file = generate_temp_file(path) - temp_file_list.append(temp_file) - path = temp_file.name - - video_clips.append(VideoFileClip(path)) - - # video_clips = [VideoFileClip(v.location) for v in clips] - combined_clip = concatenate_videoclips(video_clips) - output_filename = str(uuid.uuid4()) + ".mp4" - video_location = f"videos/{timing.project.uuid}/assets/videos/1_final/{output_filename}" - - temp_output_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4", mode='wb') - combined_clip = combined_clip.fx(vfx.speedx, speed) - combined_clip.write_videofile(temp_output_file.name) - - video_bytes = None - with open(temp_output_file.name, 'rb') as f: - video_bytes = f.read() - - video_file = convert_bytes_to_file( - video_location, - "video/mp4", - video_bytes, - timing.project.uuid - ) - - for file in temp_file_list: - os.remove(file.name) - - return video_file - def update_speed_of_video_clip(video_file: InternalFileObject, timing_uuid) -> InternalFileObject: from ui_components.methods.file_methods import generate_temp_file, convert_bytes_to_file @@ -308,25 +210,6 @@ def update_speed_of_video_clip(video_file: InternalFileObject, timing_uuid) -> I return video_file - -def calculate_desired_duration_of_individual_clip(timing_uuid): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - timing_details = data_repo.get_timing_list_from_project(timing.project.uuid) - length_of_list = len(timing_details) - - # last frame - if timing.aux_frame_index == length_of_list - 1: - time_of_frame = timing.frame_time - total_duration_of_frame = 0.0 # can be changed - else: - time_of_frame = timing.frame_time - time_of_next_frame = data_repo.get_next_timing(timing_uuid).frame_time - total_duration_of_frame = float(time_of_next_frame) - float(time_of_frame) - - return total_duration_of_frame - - def add_audio_to_video_slice(video_file, audio_bytes): video_location = video_file.local_path # Save the audio bytes to a temporary file @@ -356,112 +239,45 @@ def add_audio_to_video_slice(video_file, audio_bytes): os.rename("output_with_audio.mp4", video_location) -# final video rendering of all the frames involved (it bascially combines all the timed clips) -def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileTag.GENERATED_VIDEO.value): - from ui_components.methods.common_methods import update_clip_duration_of_all_timing_frames +def render_video(final_video_name, project_uuid, file_tag=InternalFileTag.GENERATED_VIDEO.value): + ''' + combines the main variant of all the shots to form the final video. no processing happens in this, only + simple combination + ''' from ui_components.methods.file_methods import convert_bytes_to_file, generate_temp_file - from shared.constants import QUEUE_INFERENCE_QUERIES data_repo = DataRepo() if not final_video_name: st.error("Please enter a video name") + time.sleep(0.3) return - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - project_uuid) - - update_clip_duration_of_all_timing_frames(project_uuid) - - total_number_of_videos = len(timing_details) - 1 - - # creating timed clip for every frame - for i in range(0, total_number_of_videos): - index_of_current_item = i - current_timing: InternalFrameTimingObject = data_repo.get_timing_from_frame_number( - project_uuid, i) - - timing = timing_details[i] - - # updating the interpolation steps - if quality == VideoQuality.HIGH.value: - # data_repo.update_specific_timing(current_timing.uuid, timed_clip_id=None) - interpolation_steps = VideoInterpolator.calculate_dynamic_interpolations_steps( - timing_details[index_of_current_item].clip_duration) - timing.interpolation_steps = interpolation_steps - else: - if not timing.interpolation_steps or timing.interpolation_steps < 3: - data_repo.update_specific_timing( - current_timing.uuid, interpolation_steps=3) - - # TODO: add this flow in the async inference as well - # creating timed clips if not already present - if not timing.timed_clip: - # creating an interpolated clip if not already present - if not len(timing.interpolated_clip_list): - next_timing = data_repo.get_next_timing(current_timing.uuid) - settings = { - "animation_tool": current_timing.animation_tool, - "interpolation_steps": current_timing.interpolation_steps - } - - res = VideoInterpolator.create_interpolated_clip( - img_location_list=[current_timing.source_image.location, next_timing.source_image.location], - animation_style=current_timing.animation_style, - settings=settings, - variant_count=1, - queue_inference=QUEUE_INFERENCE_QUERIES - ) - - video_bytes, log = res[0] - - file_location = "videos/" + current_timing.project.name + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" - video_file = convert_bytes_to_file( - file_location_to_save=file_location, - mime_type="video/mp4", - file_bytes=video_bytes, - project_uuid=current_timing.project.uuid, - inference_log_id=log.uuid - ) - - data_repo.add_interpolated_clip( - current_timing.uuid, interpolated_clip_id=video_file.uuid) - else: - video_file = timing.interpolated_clip_list[0] - - # add timed clip - output_video = update_speed_of_video_clip(video_file, current_timing.uuid) - data_repo.update_specific_timing(current_timing.uuid, timed_clip_id=output_video.uuid) - - project_settings: InternalSettingObject = data_repo.get_project_setting(project_uuid) - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project(project_uuid) - total_number_of_videos = len(timing_details) - 2 - video_list = [] temp_file_list = [] - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - timing.project.uuid) - - # joining all the timed clips - for i in timing_details: - index_of_current_item = timing_details.index(i) - current_timing: InternalFrameTimingObject = data_repo.get_timing_from_frame_number( - project_uuid, index_of_current_item) - if index_of_current_item <= total_number_of_videos: - temp_video_file = None - if current_timing.timed_clip.hosted_url: - temp_video_file = generate_temp_file(current_timing.timed_clip.hosted_url, '.mp4') - temp_file_list.append(temp_video_file) - - file_path = temp_video_file.name if temp_video_file else current_timing.timed_clip.local_path + # combining all the main_clip of shots in finalclip, and keeping track of temp video files + # in temp_file_list + shot_list: List[InternalShotObject] = data_repo.get_shot_list_from_project(project_uuid) + for shot in shot_list: + if not shot.main_clip: + st.error("Please generate all videos") + time.sleep(0.3) + return - video_list.append(file_path) + temp_video_file = None + if shot.main_clip.hosted_url: + temp_video_file = generate_temp_file(shot.main_clip.hosted_url, '.mp4') + temp_file_list.append(temp_video_file) + + file_path = temp_video_file.name if temp_video_file else shot.main_clip.local_path + video_list.append(file_path) - video_clip_list = [VideoFileClip(v) for v in video_list] - finalclip = concatenate_videoclips(video_clip_list) + finalclip = concatenate_videoclips([VideoFileClip(v) for v in video_list]) - output_video_file = f"videos/{timing.project.uuid}/assets/videos/2_completed/{final_video_name}.mp4" + # attaching audio to finalclip + project_settings = data_repo.get_project_settings_from_uuid(project_uuid) + output_video_file = f"videos/{project_uuid}/assets/videos/2_completed/{final_video_name}.mp4" if project_settings.audio: temp_audio_file = None if 'http' in project_settings.audio.location: @@ -473,8 +289,8 @@ def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileT audio_clip = AudioFileClip(audio_location) finalclip = finalclip.set_audio(audio_clip) + # writing the video to the temp file temp_video_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") - finalclip.write_videofile( temp_video_file.name, fps=60, # or 60 if your original video is 60fps diff --git a/ui_components/models.py b/ui_components/models.py index a699b605..39886ac8 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -181,7 +181,26 @@ def primary_interpolated_video_index(self): return idx return -1 - + +class InternalShotObject: + def __init__(self, **kwargs): + self.uuid = kwargs['uuid'] if key_present('uuid', kwargs) else None + self.name = kwargs['name'] if key_present('name', kwargs) else "" + self.project_id = kwargs['project_id'] if key_present('project_id', kwargs) else None + self.desc = kwargs['desc'] if key_present('desc', kwargs) else "" + self.shot_idx = kwargs['shot_idx'] if key_present('shot_idx', kwargs) else 0 + self.duration = kwargs['duration'] if key_present('duration', kwargs) else 0 + self.meta_data = kwargs['meta_data'] if key_present('meta_data', kwargs) else {} + self.timing_list = [InternalFrameTimingObject(**timing) for timing in kwargs["timing_list"]] \ + if key_present('timing_list', kwargs) and kwargs["timing_list"] else [] + self.interpolated_clip_list = kwargs['interpolated_clip_list'] if key_present('interpolated_clip_list', kwargs) \ + else "" + self.main_clip = InternalFileObject(**kwargs['main_clip']) if key_present('main_clip', kwargs) else \ + None + + @property + def meta_data_dict(self): + return json.loads(self.meta_data) if self.meta_data else {} class InternalAppSettingObject: diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index a4bc5f40..eb835b8d 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -1,6 +1,5 @@ import streamlit as st from ui_components.constants import CreativeProcessType, WorkflowStageType -from ui_components.methods.video_methods import calculate_desired_duration_of_individual_clip from ui_components.widgets.image_zoom_widgets import zoom_inputs from utils import st_memory @@ -9,19 +8,18 @@ from utils.constants import ImageStage from ui_components.methods.file_methods import generate_pil_image,save_or_host_file -from ui_components.methods.common_methods import apply_image_transformations, clone_styling_settings, create_timings_row_at_frame_number, save_uploaded_image, update_clip_duration_of_all_timing_frames +from ui_components.methods.common_methods import apply_image_transformations, clone_styling_settings, create_timings_row_at_frame_number, save_uploaded_image from PIL import Image -def add_key_frame_element(timing_details, project_uuid): +def add_key_frame_element(shot_uuid): data_repo = DataRepo() - - timing_details = data_repo.get_timing_list_from_project(project_uuid) + shot = data_repo.get_shot_from_uuid(shot_uuid) + timing_list = data_repo.get_timing_list_from_shot(shot_uuid) add1, add2 = st.columns(2) with add1: - selected_image_location = "" image1,image2 = st.columns(2) with image1: @@ -40,18 +38,18 @@ def add_key_frame_element(timing_details, project_uuid): image_idx = st.number_input( "Which frame would you like to use?", min_value=1, - max_value=max(1, len(timing_details)), + max_value=max(1, len(timing_list)), value=st.session_state['current_frame_index'], step=1, key="image_idx" ) if transformation_stage == ImageStage.SOURCE_IMAGE.value: - if timing_details[image_idx - 1].source_image is not None and timing_details[image_idx - 1].source_image != "": - selected_image_location = timing_details[image_idx - 1].source_image.location + if timing_list[image_idx - 1].source_image is not None and timing_list[image_idx - 1].source_image != "": + selected_image_location = timing_list[image_idx - 1].source_image.location else: selected_image_location = "" elif transformation_stage == ImageStage.MAIN_VARIANT.value: - selected_image_location = timing_details[image_idx - 1].primary_image_location + selected_image_location = timing_list[image_idx - 1].primary_image_location elif source_of_starting_image == "Uploaded image": with image2: uploaded_image = st.file_uploader( @@ -59,7 +57,7 @@ def add_key_frame_element(timing_details, project_uuid): # FILE UPLOAD HANDLE-- if uploaded_image is not None: image = Image.open(uploaded_image) - file_location = f"videos/{project_uuid}/assets/frames/1_selected/{uploaded_image.name}" + file_location = f"videos/{shot.uuid}/assets/frames/1_selected/{uploaded_image.name}" selected_image_location = save_or_host_file(image, file_location) selected_image_location = selected_image_location or file_location else: @@ -97,51 +95,48 @@ def add_key_frame_element(timing_details, project_uuid): def add_key_frame(selected_image, inherit_styling_settings, how_long_after, target_frame_position=None, refresh_state=True): data_repo = DataRepo() - project_uuid = st.session_state['project_uuid'] - timing_details = data_repo.get_timing_list_from_project(project_uuid) - project_settings = data_repo.get_project_setting(project_uuid) + shot_uuid = st.session_state['shot_uuid'] + timing_list = data_repo.get_timing_list_from_shot(shot_uuid) + project_settings = data_repo.get_project_setting(shot_uuid) - if len(timing_details) == 0: + if len(timing_list) == 0: index_of_current_item = 1 else: target_frame_position = st.session_state['current_frame_index'] if target_frame_position is None else target_frame_position - index_of_current_item = min(len(timing_details), target_frame_position) - - timing_details = data_repo.get_timing_list_from_project(project_uuid) + index_of_current_item = min(len(timing_list), target_frame_position) - if len(timing_details) == 0: + if len(timing_list) == 0: key_frame_time = 0.0 elif target_frame_position is not None: - key_frame_time = float(timing_details[target_frame_position - 1].frame_time) + how_long_after - elif index_of_current_item == len(timing_details): - key_frame_time = float(timing_details[index_of_current_item - 1].frame_time) + how_long_after + key_frame_time = float(timing_list[target_frame_position - 1].frame_time) + how_long_after + elif index_of_current_item == len(timing_list): + key_frame_time = float(timing_list[index_of_current_item - 1].frame_time) + how_long_after else: - key_frame_time = (float(timing_details[index_of_current_item - 1].frame_time) + float( - timing_details[index_of_current_item].frame_time)) / 2.0 + key_frame_time = (float(timing_list[index_of_current_item - 1].frame_time) + float( + timing_list[index_of_current_item].frame_time)) / 2.0 - if len(timing_details) == 0: - new_timing = create_timings_row_at_frame_number(project_uuid, 0) + if len(timing_list) == 0: + _ = create_timings_row_at_frame_number(shot_uuid, 0) else: - new_timing = create_timings_row_at_frame_number(project_uuid, index_of_current_item, frame_time=key_frame_time) - update_clip_duration_of_all_timing_frames(project_uuid) + _ = create_timings_row_at_frame_number(shot_uuid, index_of_current_item, frame_time=key_frame_time) - timing_details = data_repo.get_timing_list_from_project(project_uuid) + timing_list = data_repo.get_timing_list_from_shot(shot_uuid) if selected_image: - save_uploaded_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, WorkflowStageType.SOURCE.value) - save_uploaded_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, WorkflowStageType.STYLED.value) + save_uploaded_image(selected_image, shot_uuid, timing_list[index_of_current_item].uuid, WorkflowStageType.SOURCE.value) + save_uploaded_image(selected_image, shot_uuid, timing_list[index_of_current_item].uuid, WorkflowStageType.STYLED.value) if inherit_styling_settings == "Yes": - clone_styling_settings(index_of_current_item - 1, timing_details[index_of_current_item].uuid) + clone_styling_settings(index_of_current_item - 1, timing_list[index_of_current_item].uuid) - timing_details[index_of_current_item].animation_style = project_settings.default_animation_style + timing_list[index_of_current_item].animation_style = project_settings.default_animation_style - if len(timing_details) == 1: + if len(timing_list) == 1: st.session_state['current_frame_index'] = 1 - st.session_state['current_frame_uuid'] = timing_details[0].uuid + st.session_state['current_frame_uuid'] = timing_list[0].uuid else: - st.session_state['prev_frame_index'] = min(len(timing_details), index_of_current_item + 1) - st.session_state['current_frame_index'] = min(len(timing_details), index_of_current_item + 1) - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid + st.session_state['prev_frame_index'] = min(len(timing_list), index_of_current_item + 1) + st.session_state['current_frame_index'] = min(len(timing_list), index_of_current_item + 1) + st.session_state['current_frame_uuid'] = timing_list[st.session_state['current_frame_index'] - 1].uuid st.session_state['page'] = CreativeProcessType.STYLING.value st.session_state['section_index'] = 0 diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index a2ca859c..6122bb0e 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -6,10 +6,11 @@ from utils.data_repo.data_repo import DataRepo from utils.ml_processor.motion_module import AnimateDiffCheckpoint -def animation_style_element(timing_uuid, project_uuid): +def animation_style_element(timing_uuid, shot_uuid): motion_modules = AnimateDiffCheckpoint.get_name_list() data_repo = DataRepo() - project_settings = data_repo.get_project_setting(project_uuid) + shot = data_repo.get_shot_from_uuid(shot_uuid) + project_settings = data_repo.get_project_setting(shot.project.uuid) timing = data_repo.get_timing_from_uuid(timing_uuid) current_animation_style = timing.animation_style variant_count = 1 diff --git a/ui_components/widgets/frame_clip_generation_elements.py b/ui_components/widgets/frame_clip_generation_elements.py index 0770be4d..f6001763 100644 --- a/ui_components/widgets/frame_clip_generation_elements.py +++ b/ui_components/widgets/frame_clip_generation_elements.py @@ -1,8 +1,6 @@ -import uuid import streamlit as st from shared.constants import AnimationStyleType -from ui_components.methods.file_methods import convert_bytes_to_file -from ui_components.methods.video_methods import create_full_preview_video, create_single_interpolated_clip, update_speed_of_video_clip +from ui_components.methods.video_methods import create_single_interpolated_clip, update_speed_of_video_clip from ui_components.models import InternalFrameTimingObject from utils.data_repo.data_repo import DataRepo from utils.media_processor.interpolator import VideoInterpolator diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index 0cf7ebbf..718776fb 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -1,15 +1,19 @@ import time import streamlit as st -from ui_components.methods.common_methods import add_image_variant, promote_image_variant, save_and_promote_image, save_uploaded_image, update_clip_duration_of_all_timing_frames +from ui_components.methods.common_methods import add_image_variant, promote_image_variant, save_and_promote_image from ui_components.models import InternalFrameTimingObject from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo def change_frame_position_input(timing_uuid, src): + ''' + changes frame position inside the respective shot + (used when large change is required, like moving a frame from 2nd pos to 10th pos) + ''' data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) + timing_list = data_repo.get_timing_list_from_shot(shot_uuid=timing.shot.uuid) min_value = 1 max_value = len(timing_list) @@ -23,6 +27,9 @@ def change_frame_position_input(timing_uuid, src): def move_frame(direction, timing_uuid): + ''' + arrows that change frame position by 1 step + ''' data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) @@ -35,7 +42,7 @@ def move_frame(direction, timing_uuid): data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index - 1) elif direction == "Down": - timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) + timing_list = data_repo.get_timing_list_from_shot(shot_uuid=timing.shot.uuid) if timing.aux_frame_index == len(timing_list) - 1: st.error("This is the last frame") time.sleep(0.5) @@ -79,53 +86,33 @@ def delete_frame_button(timing_uuid, show_label=False): def delete_frame(timing_uuid): data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - project_uuid = timing.project.uuid + shot_uuid = timing.shot.uuid next_timing = data_repo.get_next_timing(timing_uuid) - timing_details = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) + timing_list = data_repo.get_timing_list_from_shot(shot_uuid=timing.shot.uuid) - if len(timing_details) == 1: - st.error("can't delete the only image present in the project") + if len(timing_list) == 1: + st.error("can't delete the only image present in the shot") return - if next_timing: - data_repo.update_specific_timing( - next_timing.uuid, - interpolated_clip_list=None, - preview_video_id=None, - timed_clip_id=None - ) - - # If the frame being deleted is the first one - if timing.aux_frame_index == 0 and next_timing: - print("first frame") - print(next_timing.uuid) - print(timing.uuid) - data_repo.update_specific_timing( - next_timing.uuid, - start_time=0.0 - ) - - data_repo.delete_timing_from_uuid(timing.uuid) - timing_details = data_repo.get_timing_list_from_project(project_uuid=project_uuid) + timing_list = data_repo.get_timing_list_from_shot(shot_uuid=shot_uuid) # this is the last frame if not next_timing: st.session_state['current_frame_index'] = max(1, st.session_state['current_frame_index'] - 1) st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid + st.session_state['current_frame_uuid'] = timing_list[st.session_state['current_frame_index'] - 1].uuid # this is the first frame or something in the middle else: - st.session_state['current_frame_index'] = min(len(timing_details) - 1, st.session_state['current_frame_index'] + 1) + st.session_state['current_frame_index'] = min(len(timing_list) - 1, st.session_state['current_frame_index'] + 1) st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid - - update_clip_duration_of_all_timing_frames(project_uuid) + st.session_state['current_frame_uuid'] = timing_list[st.session_state['current_frame_index'] - 1].uuid + def replace_image_widget(timing_uuid, stage, options=["Uploaded Frame","Other Frame"]): data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) - timing_details = data_repo.get_timing_list_from_project(timing.project.uuid) + timing_list = data_repo.get_timing_list_from_shot(timing.shot.uuid) replace_with = options[0] if len(options) == 1 else st.radio("Replace with:", options, horizontal=True, key=f"replace_with_what_{stage}_{timing_uuid}") @@ -133,12 +120,12 @@ def replace_image_widget(timing_uuid, stage, options=["Uploaded Frame","Other Fr which_stage_to_use_for_replacement = st.radio("Select stage to use:", [ ImageStage.MAIN_VARIANT.value, ImageStage.SOURCE_IMAGE.value], key=f"which_stage_to_use_for_replacement_{stage}_{timing_uuid}", horizontal=True) which_image_to_use_for_replacement = st.number_input("Select image to use:", min_value=0, max_value=len( - timing_details)-1, value=0, key=f"which_image_to_use_for_replacement_{stage}") + timing_list)-1, value=0, key=f"which_image_to_use_for_replacement_{stage}") if which_stage_to_use_for_replacement == ImageStage.SOURCE_IMAGE.value: - selected_image = timing_details[which_image_to_use_for_replacement].source_image + selected_image = timing_list[which_image_to_use_for_replacement].source_image elif which_stage_to_use_for_replacement == ImageStage.MAIN_VARIANT.value: - selected_image = timing_details[which_image_to_use_for_replacement].primary_image + selected_image = timing_list[which_image_to_use_for_replacement].primary_image st.image(selected_image.local_path, use_column_width=True) @@ -156,7 +143,6 @@ def replace_image_widget(timing_uuid, stage, options=["Uploaded Frame","Other Fr st.success("Replaced") time.sleep(1) st.rerun() - elif replace_with == "Uploaded Frame": if stage == "source": uploaded_file = st.file_uploader("Upload Source Image", type=[ @@ -180,25 +166,10 @@ def replace_image_widget(timing_uuid, stage, options=["Uploaded Frame","Other Fr time.sleep(1) st.rerun() -def jump_to_single_frame_view_button(display_number, timing_details, src): +def jump_to_single_frame_view_button(display_number, timing_list, src): if st.button(f"Jump to #{display_number}", key=src): st.session_state['prev_frame_index'] = display_number - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid + st.session_state['current_frame_uuid'] = timing_list[st.session_state['current_frame_index'] - 1].uuid st.session_state['frame_styling_view_type'] = "Individual" st.session_state['change_view_type'] = True - st.rerun() - -def change_position_input(timing_uuid, src): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) - - min_value = 1 - max_value = len(timing_list) - - new_position = st.number_input("Move to new position:", min_value=min_value, max_value=max_value, - value=timing.aux_frame_index + 1, step=1, key=f"new_position_{timing.uuid}_{src}") - - if st.button('Update Position',key=f"change_frame_position_{timing.uuid}_{src}"): - data_repo.update_specific_timing(timing.uuid, aux_frame_index=new_position - 1) st.rerun() \ No newline at end of file diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index ae5e5fbb..5f0e185e 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -1,6 +1,5 @@ import streamlit as st from ui_components.widgets.frame_movement_widgets import delete_frame, replace_image_widget -from ui_components.widgets.frame_time_selector import single_frame_time_selector from ui_components.widgets.image_carousal import display_image from utils.data_repo.data_repo import DataRepo from ui_components.constants import WorkflowStageType @@ -8,24 +7,20 @@ def frame_selector_widget(): data_repo = DataRepo() - time1, time2 = st.columns([1,1]) - timing_details = data_repo.get_timing_list_from_project(project_uuid=st.session_state["project_uuid"]) - len_timing_details = len(timing_details) if len(timing_details) > 0 else 1.0 - st.progress(st.session_state['current_frame_index'] / len_timing_details) + timing_list = data_repo.get_timing_list_from_shot(shot_uuid=st.session_state["shot_uuid"]) + len_timing_list = len(timing_list) if len(timing_list) > 0 else 1.0 + st.progress(st.session_state['current_frame_index'] / len_timing_list) with time1: if 'prev_frame_index' not in st.session_state: st.session_state['prev_frame_index'] = 1 - st.session_state['current_frame_index'] = st.number_input(f"Key frame # (out of {len(timing_details)})", 1, - len(timing_details), value=st.session_state['prev_frame_index'], + st.session_state['current_frame_index'] = st.number_input(f"Key frame # (out of {len(timing_list)})", 1, + len(timing_list), value=st.session_state['prev_frame_index'], step=1, key="which_image_selector") update_current_frame_index(st.session_state['current_frame_index']) - - with time2: - single_frame_time_selector(st.session_state['current_frame_uuid'], 'navbar', shift_frames=False) with st.expander(f"🖼️ Frame #{st.session_state['current_frame_index']} Details"): a1, a2 = st.columns([1,1]) @@ -57,13 +52,13 @@ def frame_selector_widget(): def update_current_frame_index(index): data_repo = DataRepo() - timing_details = data_repo.get_timing_list_from_project(project_uuid=st.session_state["project_uuid"]) + timing_list = data_repo.get_timing_list_from_shot(shot_uuid=st.session_state["shot_uuid"]) - st.session_state['current_frame_uuid'] = timing_details[index - 1].uuid + st.session_state['current_frame_uuid'] = timing_list[index - 1].uuid if st.session_state['prev_frame_index'] != index: st.session_state['prev_frame_index'] = index - st.session_state['current_frame_uuid'] = timing_details[index - 1].uuid + st.session_state['current_frame_uuid'] = timing_list[index - 1].uuid st.session_state['reset_canvas'] = True st.session_state['frame_styling_view_type_index'] = 0 st.session_state['frame_styling_view_type'] = "Individual View" diff --git a/ui_components/widgets/frame_switch_btn.py b/ui_components/widgets/frame_switch_btn.py index 108218ab..78b6f31d 100644 --- a/ui_components/widgets/frame_switch_btn.py +++ b/ui_components/widgets/frame_switch_btn.py @@ -9,11 +9,10 @@ def back_and_forward_buttons(): data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( st.session_state['current_frame_uuid']) - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - timing.project.uuid) + timing_list: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_shot( + timing.shot.uuid) - smallbutton0, smallbutton1, smallbutton2, smallbutton3, smallbutton4 = st.columns([ - 2, 2, 2, 2, 2]) + smallbutton0, smallbutton1, smallbutton2, smallbutton3, smallbutton4 = st.columns([2, 2, 2, 2, 2]) display_idx = st.session_state['current_frame_index'] with smallbutton0: @@ -21,7 +20,7 @@ def back_and_forward_buttons(): if st.button(f"{display_idx-2} ⏮️", key=f"Previous Previous Image for {display_idx}"): st.session_state['current_frame_index'] = st.session_state['current_frame_index'] - 2 st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid + st.session_state['current_frame_uuid'] = timing_list[st.session_state['current_frame_index'] - 1].uuid st.rerun() with smallbutton1: # if it's not the first image @@ -29,23 +28,23 @@ def back_and_forward_buttons(): if st.button(f"{display_idx-1} ⏪", key=f"Previous Image for {display_idx}"): st.session_state['current_frame_index'] = st.session_state['current_frame_index'] - 1 st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid + st.session_state['current_frame_uuid'] = timing_list[st.session_state['current_frame_index'] - 1].uuid st.rerun() with smallbutton2: st.button(f"{display_idx} 📍", disabled=True) with smallbutton3: # if it's not the last image - if display_idx != len(timing_details): + if display_idx != len(timing_list): if st.button(f"{display_idx+1} ⏩", key=f"Next Image for {display_idx}"): st.session_state['current_frame_index'] = st.session_state['current_frame_index'] + 1 st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid + st.session_state['current_frame_uuid'] = timing_list[st.session_state['current_frame_index'] - 1].uuid st.rerun() with smallbutton4: - if display_idx <= len(timing_details)-2: + if display_idx <= len(timing_list)-2: if st.button(f"{display_idx+2} ⏭️", key=f"Next Next Image for {display_idx}"): st.session_state['current_frame_index'] = st.session_state['current_frame_index'] + 2 st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid + st.session_state['current_frame_uuid'] = timing_list[st.session_state['current_frame_index'] - 1].uuid st.rerun() diff --git a/ui_components/widgets/frame_time_selector.py b/ui_components/widgets/frame_time_selector.py deleted file mode 100644 index ac080b39..00000000 --- a/ui_components/widgets/frame_time_selector.py +++ /dev/null @@ -1,90 +0,0 @@ -from typing import List -from ui_components.methods.common_methods import update_clip_duration_of_all_timing_frames -from ui_components.models import InternalFrameTimingObject -from utils.common_utils import truncate_decimal -from utils.data_repo.data_repo import DataRepo -import streamlit as st - -def shift_subsequent_frames(timing, time_delta): - data_repo = DataRepo() - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - timing.project.uuid) - - if time_delta > 0: - for a in range(timing.aux_frame_index + 1, len(timing_details)): - frame = timing_details[a] - # shift them by the difference between the new frame time and the old frame time - new_frame_time = frame.frame_time + time_delta - data_repo.update_specific_timing(frame.uuid, frame_time=new_frame_time, timed_clip_id=None) - - -def update_frame_duration(timing_uuid, frame_duration, next_timing, shift_frames): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - - if next_timing: - # Calculate time_delta before updating next_timing.frame_time - time_delta = frame_duration - round(next_timing.frame_time - timing.frame_time, 2) - next_timing.frame_time = round(timing.frame_time + frame_duration, 2) - data_repo.update_specific_timing(next_timing.uuid, frame_time=next_timing.frame_time, timed_clip_id=None) - if shift_frames: - shift_subsequent_frames(timing, time_delta) - - # updating clip_duration - update_clip_duration_of_all_timing_frames(timing.project.uuid) - st.rerun() - - -def single_frame_time_duration_setter(timing_uuid, src, shift_frames=True): - data_repo = DataRepo() - - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - next_timing = data_repo.get_next_timing(timing_uuid) - - # Calculate clip_duration - clip_duration = max(float(round(timing.clip_duration, 2)), float(0.0)) - max_value = 100.0 if shift_frames else clip_duration - - disable_duration_input = False if next_timing else True - help_text = None if shift_frames else "You currently won't shift subsequent frames - to do this, go to the List View and turn on Shift Frames." - frame_duration = st.number_input("Duration:", min_value=0.0, max_value=max_value, - value=clip_duration, step=0.1, key=f"frame_duration_{timing.uuid}_{src}", - disabled=disable_duration_input, help=help_text) - - frame_duration = round(frame_duration, 2) - if frame_duration != clip_duration: - update_frame_duration(timing_uuid, frame_duration, next_timing, shift_frames) - - - -def single_frame_time_selector(timing_uuid, src, shift_frames=True): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - prev_timing = data_repo.get_prev_timing(timing_uuid) - - min_value = prev_timing.frame_time if prev_timing else 0.0 - disabled_time_change = True if timing.aux_frame_index == 0 else False - - next_timing = data_repo.get_next_timing(timing_uuid) - if next_timing: - max_value = 100.0 if shift_frames else next_timing.frame_time - else: - max_value = timing.frame_time + 100 # Allow up to 100 seconds more if it's the last item - - # adjusted_value = max(min_value + 0.01, timing.frame_time) if timing.aux_frame_index != 0 else timing.frame_time # hackish solution to avoid timing.frame_time < min_value while streamlit is refreshing - help_text = None if shift_frames else "You currently won't shift subsequent frames - to do this, go to the List View and turn on Shift Frames." - frame_time = st.number_input("Time:", min_value=min_value, max_value=max_value, - value=timing.frame_time, step=0.1, key=f"frame_time_{timing.uuid}_{src}",disabled=disabled_time_change, help=help_text) - - frame_time = int(frame_time * 100) / 100.0 - if frame_time != timing.frame_time: - data_repo.update_specific_timing(timing_uuid, frame_time=frame_time, timed_clip_id=None) - - # if this time is going to be greater than the next frame's time, shift all subsequent frames - if next_timing and frame_time >= next_timing.frame_time: - shift_subsequent_frames(timing, timing.frame_time - frame_time) - update_clip_duration_of_all_timing_frames(timing.project.uuid) - st.rerun() - - - diff --git a/ui_components/widgets/image_carousal.py b/ui_components/widgets/image_carousal.py index ddcfaff5..77be10f4 100644 --- a/ui_components/widgets/image_carousal.py +++ b/ui_components/widgets/image_carousal.py @@ -2,6 +2,7 @@ from st_clickable_images import clickable_images from ui_components.constants import WorkflowStageType +from ui_components.models import InternalShotObject from utils.data_repo.data_repo import DataRepo @@ -39,7 +40,7 @@ def display_image(timing_uuid, stage=None, clickable=False): "max-width": "100%", "height": "auto", "cursor": "pointer"}, key=f"{timing_idx}_{stage}_image_{st.session_state['counter']}") if st.session_state[f'{timing_idx}_{stage}_clicked'] == 0: - timing_details = data_repo.get_timing_list_from_project(timing.project.uuid) + timing_details = data_repo.get_timing_list_from_shot(timing.shot.uuid) st.session_state['current_frame_uuid'] = timing_details[timing_idx].uuid st.session_state['current_frame_index'] = timing_idx + 1 st.session_state['prev_frame_index'] = timing_idx + 1 @@ -54,19 +55,19 @@ def display_image(timing_uuid, stage=None, clickable=False): st.error(f"No {stage} image found for #{timing_idx + 1}") -def carousal_of_images_element(project_uuid, stage=WorkflowStageType.STYLED.value): - +def carousal_of_images_element(shot_uuid, stage=WorkflowStageType.STYLED.value): data_repo = DataRepo() - timing_details = data_repo.get_timing_list_from_project(project_uuid) + shot: InternalShotObject = data_repo.get_shot_from_uuid(shot_uuid) + timing_list = shot.timing_list header1, header2, header3, header4, header5 = st.columns([1, 1, 1, 1, 1]) - current_timing = data_repo.get_timing_from_uuid( - st.session_state['current_frame_uuid']) + current_frame_uuid = st.session_state[f"{shot.uuid}__" + 'current_frame_uuid'] + current_timing = data_repo.get_timing_from_uuid(current_frame_uuid) with header1: if current_timing.aux_frame_index - 2 >=0: - prev_2_timing = data_repo.get_timing_from_frame_number(project_uuid, + prev_2_timing = data_repo.get_timing_from_frame_number(shot_uuid, current_timing.aux_frame_index - 2) if prev_2_timing: @@ -75,7 +76,7 @@ def carousal_of_images_element(project_uuid, stage=WorkflowStageType.STYLED.valu with header2: if current_timing.aux_frame_index - 1 >= 0: - prev_timing = data_repo.get_timing_from_frame_number(project_uuid, + prev_timing = data_repo.get_timing_from_frame_number(shot_uuid, current_timing.aux_frame_index - 1) if prev_timing: display_image(prev_timing.uuid, stage=stage, clickable=True) @@ -83,21 +84,21 @@ def carousal_of_images_element(project_uuid, stage=WorkflowStageType.STYLED.valu with header3: - timing = data_repo.get_timing_from_uuid(st.session_state['current_frame_uuid']) + timing = data_repo.get_timing_from_uuid(current_frame_uuid) display_image(timing.uuid, stage=stage, clickable=True) st.success(f"#{current_timing.aux_frame_index + 1}") with header4: - if current_timing.aux_frame_index + 1 <= len(timing_details): - next_timing = data_repo.get_timing_from_frame_number(project_uuid, + if current_timing.aux_frame_index + 1 <= len(timing_list): + next_timing = data_repo.get_timing_from_frame_number(shot_uuid, current_timing.aux_frame_index + 1) if next_timing: display_image(next_timing.uuid, stage=stage, clickable=True) st.info(f"#{next_timing.aux_frame_index + 1}") with header5: - if current_timing.aux_frame_index + 2 <= len(timing_details): - next_2_timing = data_repo.get_timing_from_frame_number(project_uuid, + if current_timing.aux_frame_index + 2 <= len(timing_list): + next_2_timing = data_repo.get_timing_from_frame_number(shot_uuid, current_timing.aux_frame_index + 2) if next_2_timing: display_image(next_2_timing.uuid, stage=stage, clickable=True) diff --git a/ui_components/widgets/inpainting_element.py b/ui_components/widgets/inpainting_element.py index 5b115ee9..f0cd962b 100644 --- a/ui_components/widgets/inpainting_element.py +++ b/ui_components/widgets/inpainting_element.py @@ -1,6 +1,4 @@ - import json -import os import random import string import time @@ -19,7 +17,6 @@ from utils import st_memory from utils.data_repo.data_repo import DataRepo -from utils import st_memory from ui_components.methods.common_methods import add_image_variant, execute_image_edit, create_or_update_mask, process_inference_output, promote_image_variant from ui_components.models import InternalFrameTimingObject, InternalProjectObject, InternalSettingObject from streamlit_image_comparison import image_comparison @@ -34,8 +31,8 @@ def inpainting_element(timing_uuid): stage = WorkflowStageType.SOURCE.value data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - timing.project.uuid) + timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_shot( + timing.shot.uuid) project_settings: InternalSettingObject = data_repo.get_project_setting( timing.project.uuid) @@ -454,8 +451,6 @@ def inpaint_in_black_space_element(cropped_img, project_uuid, stage=WorkflowStag elif stage == WorkflowStageType.STYLED.value: if st.button("Save + Promote Image"): - timing_details = data_repo.get_timing_list_from_project( - project_uuid) number_of_image_variants = add_image_variant( st.session_state['precision_cropping_inpainted_image_uuid'], st.session_state['current_frame_uuid']) promote_image_variant( diff --git a/ui_components/widgets/list_view.py b/ui_components/widgets/list_view.py deleted file mode 100644 index e89492d3..00000000 --- a/ui_components/widgets/list_view.py +++ /dev/null @@ -1,130 +0,0 @@ -import streamlit as st -from ui_components.constants import WorkflowStageType -from ui_components.widgets.frame_movement_widgets import delete_frame, jump_to_single_frame_view_button, move_frame -from utils.data_repo.data_repo import DataRepo -from ui_components.widgets.frame_time_selector import single_frame_time_selector, single_frame_time_duration_setter -from ui_components.widgets.image_carousal import display_image -import math -from utils.data_repo.data_repo import DataRepo -from ui_components.widgets.frame_clip_generation_elements import current_individual_clip_element, current_preview_video_element, update_animation_style_element -from ui_components.widgets.frame_time_selector import single_frame_time_selector, single_frame_time_duration_setter -from ui_components.widgets.image_carousal import display_image -from ui_components.widgets.frame_clip_generation_elements import current_individual_clip_element, current_preview_video_element -from utils.data_repo.data_repo import DataRepo - -def list_view_set_up(timing_details,project_uuid): - data_repo = DataRepo() - - timing_details = data_repo.get_timing_list_from_project(project_uuid) - if 'current_page' not in st.session_state: - st.session_state['current_page'] = 1 - - if not('index_of_current_page' in st.session_state and st.session_state['index_of_current_page']): - st.session_state['index_of_current_page'] = 1 - - items_per_page = 10 - num_pages = math.ceil(len(timing_details) / items_per_page) + 1 - - return num_pages, items_per_page - -def page_toggle(num_pages, items_per_page, project_uuid, position): - data_repo = DataRepo() - timing_details = data_repo.get_timing_list_from_project(project_uuid) - - st.session_state['current_page'] = st.radio(f"Select page:", options=range( - 1, num_pages), horizontal=True, index=st.session_state['index_of_current_page'] - 1, key=f"page_selection_radio_{position}") - if st.session_state['current_page'] != st.session_state['index_of_current_page']: - st.session_state['index_of_current_page'] = st.session_state['current_page'] - st.rerun() - - start_index = (st.session_state['current_page'] - 1) * items_per_page - end_index = min(start_index + items_per_page,len(timing_details)) - - return start_index, end_index - -def styling_list_view(start_index, end_index, shift_frames_setting, project_uuid): - data_repo = DataRepo() - timing_details = data_repo.get_timing_list_from_project(project_uuid) - for i in range(start_index, end_index): - display_number = i + 1 - st.subheader(f"Frame {display_number}") - image1, image2, image3 = st.columns([2, 3, 2]) - - with image1: - display_image(timing_uuid=timing_details[i].uuid, stage=WorkflowStageType.SOURCE.value, clickable=False) - - with image2: - display_image(timing_uuid=timing_details[i].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) - - with image3: - time1, time2 = st.columns([1, 1]) - with time1: - single_frame_time_selector(timing_details[i].uuid, 'sidebar', shift_frames=shift_frames_setting) - single_frame_time_duration_setter(timing_details[i].uuid,'sidebar',shift_frames=shift_frames_setting) - - with time2: - st.write("") - - - jump_to_single_frame_view_button(display_number,timing_details, 'styling_list_view_'+str(timing_details[i].uuid)) - - st.markdown("---") - btn1, btn2, btn3 = st.columns([2, 1, 1]) - with btn1: - if st.button("Delete this keyframe", key=f'{i}'): - delete_frame(timing_details[i].uuid) - st.rerun() - with btn2: - if st.button("⬆️", key=f"Promote {display_number}"): - move_frame("Up", timing_details[i].uuid) - st.rerun() - with btn3: - if st.button("⬇️", key=f"Demote {display_number}"): - move_frame("Down", timing_details[i].uuid) - st.rerun() - - st.markdown("***") - -def motion_list_view(start_index, end_index, shift_frames_setting, project_uuid): - data_repo = DataRepo() - timing_details = data_repo.get_timing_list_from_project(project_uuid) - num_timing_details = len(timing_details) - timing_details = data_repo.get_timing_list_from_project(project_uuid) - - for idx in range(start_index, end_index): - st.header(f"Frame {idx+1}") - timing1, timing2, timing3 = st.columns([1, 1, 1]) - - with timing1: - frame1, frame2, frame3 = st.columns([2, 1, 2]) - with frame1: - if timing_details[idx].primary_image_location: - st.image(timing_details[idx].primary_image_location) - with frame2: - st.write("") - st.write("") - st.write("") - st.write("") - st.write("") - st.info(" ➜") - with frame3: - if idx+1 < num_timing_details and timing_details[idx+1].primary_image_location: - st.image(timing_details[idx+1].primary_image_location) - elif idx+1 == num_timing_details: - st.write("") - st.write("") - st.write("") - st.write("") - st.markdown("

FIN

", unsafe_allow_html=True) - - single_frame_time_selector(timing_details[idx].uuid, 'motion', shift_frames=shift_frames_setting) - single_frame_time_duration_setter(timing_details[idx].uuid, 'motion', shift_frames=shift_frames_setting) - update_animation_style_element(timing_details[idx].uuid) - - if timing_details[idx].aux_frame_index != len(timing_details) - 1: - with timing2: - current_individual_clip_element(timing_details[idx].uuid) - with timing3: - current_preview_video_element(timing_details[idx].uuid) - - st.markdown("***") \ No newline at end of file diff --git a/ui_components/widgets/sidebar_logger.py b/ui_components/widgets/sidebar_logger.py index ee2079d8..09a304d0 100644 --- a/ui_components/widgets/sidebar_logger.py +++ b/ui_components/widgets/sidebar_logger.py @@ -9,17 +9,14 @@ from utils.data_repo.data_repo import DataRepo from utils.ml_processor.replicate.constants import REPLICATE_MODEL -def sidebar_logger(project_uuid): +def sidebar_logger(shot_uuid): data_repo = DataRepo() - - timing_details = data_repo.get_timing_list_from_project(project_uuid=project_uuid) - + shot = data_repo.get_shot_from_uuid(shot_uuid) + timing_list = data_repo.get_timing_list_from_shot(shot_uuid=shot_uuid) a1, _, a3 = st.columns([1, 0.2, 1]) - - refresh_disabled = False # not any(log.status in [InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value] for log in log_list) + refresh_disabled = False # not any(log.status in [InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value] for log in log_list) if a1.button("Refresh log", disabled=refresh_disabled): st.rerun() - # a3.button("Jump to full log view") status_option = st.radio("Statuses to display:", options=["All", "In Progress", "Succeeded", "Failed"], key="status_option", index=0, horizontal=True) @@ -33,14 +30,14 @@ def sidebar_logger(project_uuid): b1, b2 = st.columns([1, 1]) - project_setting = data_repo.get_project_setting(project_uuid) + project_setting = data_repo.get_project_setting(shot.project.uuid) page_number = b1.number_input('Page number', min_value=1, max_value=project_setting.total_log_pages, value=1, step=1) items_per_page = b2.slider("Items per page", min_value=1, max_value=20, value=5, step=1) log_list, total_page_count = data_repo.get_all_inference_log_list( - project_id=project_uuid, - page=page_number, - data_per_page=items_per_page, + project_id=shot_uuid, + page=page_number, + data_per_page=items_per_page, status_list=status_list ) @@ -102,7 +99,7 @@ def sidebar_logger(project_uuid): if output_url and 'timing_uuid' in origin_data: timing = data_repo.get_timing_from_uuid(origin_data['timing_uuid']) if timing and st.session_state['frame_styling_view_type'] != "Timeline": - jump_to_single_frame_view_button(timing.aux_frame_index + 1, timing_details, 'sidebar_'+str(log.uuid)) + jump_to_single_frame_view_button(timing.aux_frame_index + 1, timing_list, 'sidebar_'+str(log.uuid)) else: if st.session_state['frame_styling_view_type'] != "Explorer": diff --git a/ui_components/widgets/style_explorer_element.py b/ui_components/widgets/style_explorer_element.py index 7b72ef8d..7565bc9e 100644 --- a/ui_components/widgets/style_explorer_element.py +++ b/ui_components/widgets/style_explorer_element.py @@ -13,10 +13,11 @@ from utils.ml_processor.replicate.constants import REPLICATE_MODEL -def style_explorer_element(project_uuid): +def style_explorer_element(shot_uuid): st.markdown("***") - data_repo = DataRepo() - project_settings = data_repo.get_project_setting(project_uuid) + data_repo = DataRepo() + shot = data_repo.get_shot_from_uuid(shot_uuid) + project_settings = data_repo.get_project_setting(shot.project.uuid) _, a2, a3,_= st.columns([0.5, 1, 0.5,0.5]) with a2: @@ -84,7 +85,8 @@ def style_explorer_element(project_uuid): negative_prompt="bad image, worst image, bad anatomy, washed out colors", height=project_settings.height, width=project_settings.width, - project_uuid=project_uuid + project_uuid=shot.project.uuid, + shot_uuid=shot.uuid ) replicate_model = REPLICATE_MODEL.get_model_by_db_obj(model_dict[model_name]) @@ -94,11 +96,12 @@ def style_explorer_element(project_uuid): "inference_type": InferenceType.GALLERY_IMAGE_GENERATION.value, "output": output, "log_uuid": log.uuid, - "project_uuid": project_uuid + "project_uuid": shot.project.uuid, + "shot_uuid": shot.uuid } process_inference_output(**inference_data) - project_setting = data_repo.get_project_setting(project_uuid) + project_setting = data_repo.get_project_setting(shot.project.uuid) st.markdown("***") page_number = st.radio("Select page", options=range(1, project_setting.total_gallery_pages + 1), horizontal=True) @@ -109,7 +112,7 @@ def style_explorer_element(project_uuid): gallery_image_list, res_payload = data_repo.get_all_file_list( file_type=InternalFileType.IMAGE.value, tag=InternalFileTag.GALLERY_IMAGE.value, - project_id=project_uuid, + project_id=shot.project.uuid, page=page_number, data_per_page=num_items_per_page, sort_order=SortOrder.DESCENDING.value # newly created images appear first @@ -146,7 +149,7 @@ def style_explorer_element(project_uuid): if st.button(f"Add to timeline", key=f"{gallery_image_list[i + j].uuid}", help="Promote this variant to the primary image", use_container_width=True): pil_image = generate_pil_image(gallery_image_list[i + j].location) - add_key_frame(pil_image, False, 2.5, len(data_repo.get_timing_list_from_project(project_uuid)), refresh_state=False) + add_key_frame(pil_image, False, 2.5, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False) # removing this from the gallery view data_repo.update_file(gallery_image_list[i + j].uuid, tag="") diff --git a/ui_components/widgets/styling_element.py b/ui_components/widgets/styling_element.py index 555d4390..257acba1 100644 --- a/ui_components/widgets/styling_element.py +++ b/ui_components/widgets/styling_element.py @@ -1,6 +1,5 @@ import json import streamlit as st -import uuid from typing import List from shared.constants import AIModelCategory, AIModelType, ViewType @@ -13,7 +12,7 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project(timing.project.uuid) + timing_list: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_shot(timing.shot.uuid) project_settings: InternalSettingObject = data_repo.get_project_setting(timing.project.uuid) # -------------------- Transfomation Stage -------------------- # @@ -48,10 +47,10 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): with stages2: image = None if st.session_state["transformation_stage"] == ImageStage.SOURCE_IMAGE.value: - source_img = timing_details[st.session_state['current_frame_index'] - 1].source_image + source_img = timing_list[st.session_state['current_frame_index'] - 1].source_image image = source_img.location if source_img else "" elif st.session_state["transformation_stage"] == ImageStage.MAIN_VARIANT.value: - image = timing_details[st.session_state['current_frame_index'] - 1].primary_image_location + image = timing_list[st.session_state['current_frame_index'] - 1].primary_image_location if image: st.image(image, use_column_width=True, caption=f"Image {st.session_state['current_frame_index']}") @@ -381,7 +380,7 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): if view_type == ViewType.LIST.value: batch_run_range = st.slider( - "Select range:", 1, 1, (1, len(timing_details))) + "Select range:", 1, 1, (1, len(timing_list))) first_batch_run_value = batch_run_range[0] - 1 last_batch_run_value = batch_run_range[1] - 1 @@ -415,7 +414,7 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): for i in range(first_batch_run_value, last_batch_run_value+1): for _ in range(0, batch_number_of_variants): trigger_restyling_process( - timing_uuid=timing_details[i].uuid, + timing_uuid=timing_list[i].uuid, model_uuid=st.session_state['model'], prompt=st.session_state['prompt'], strength=st.session_state['strength'], diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index f065b5c1..8aa11505 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -1,6 +1,5 @@ import streamlit as st -from ui_components.widgets.frame_movement_widgets import change_position_input, delete_frame_button, jump_to_single_frame_view_button, move_frame_back_button, move_frame_forward_button, replace_image_widget -from ui_components.widgets.frame_time_selector import single_frame_time_selector, single_frame_time_duration_setter +from ui_components.widgets.frame_movement_widgets import change_frame_position_input, delete_frame_button, jump_to_single_frame_view_button, move_frame_back_button, move_frame_forward_button, replace_image_widget from ui_components.widgets.image_carousal import display_image from utils.data_repo.data_repo import DataRepo from ui_components.constants import WorkflowStageType @@ -9,48 +8,42 @@ from ui_components.widgets.add_key_frame_element import add_key_frame -def timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle, project_uuid): - data_repo = DataRepo() - if time_setter_toggle: - single_frame_time_selector(timing_details[i + j].uuid, 'motion', shift_frames=shift_frames_setting) - if duration_setter_toggle: - single_frame_time_duration_setter(timing_details[i + j].uuid, 'motion', shift_frames=shift_frames_setting) +def timeline_view_buttons(i, j, timing_list, shift_frames_setting, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle, project_uuid): if replace_image_widget_toggle: - replace_image_widget(timing_details[i + j].uuid, stage=WorkflowStageType.STYLED.value,options=["Uploaded Frame"]) + replace_image_widget(timing_list[i + j].uuid, stage=WorkflowStageType.STYLED.value,options=["Uploaded Frame"]) btn1, btn2, btn3, btn4 = st.columns([1, 1, 1, 1]) if move_frames_toggle: with btn1: - move_frame_back_button(timing_details[i + j].uuid, "side-to-side") + move_frame_back_button(timing_list[i + j].uuid, "side-to-side") with btn2: - move_frame_forward_button(timing_details[i + j].uuid, "side-to-side") + move_frame_forward_button(timing_list[i + j].uuid, "side-to-side") if copy_frame_toggle: with btn3: - if st.button("🔁", key=f"copy_frame_{timing_details[i + j].uuid}"): - pil_image = generate_pil_image(timing_details[i + j].primary_image.location) - position_of_current_item = timing_details[i + j].aux_frame_index - add_key_frame(pil_image, False, 2.5, timing_details[i + j].aux_frame_index+1, refresh_state=False) + if st.button("🔁", key=f"copy_frame_{timing_list[i + j].uuid}"): + pil_image = generate_pil_image(timing_list[i + j].primary_image.location) + add_key_frame(pil_image, False, 2.5, timing_list[i + j].aux_frame_index+1, refresh_state=False) st.rerun() if delete_frames_toggle: with btn4: - delete_frame_button(timing_details[i + j].uuid) + delete_frame_button(timing_list[i + j].uuid) if change_position_toggle: - change_position_input(timing_details[i + j].uuid, "side-to-side") + change_frame_position_input(timing_list[i + j].uuid, "side-to-side") if time_setter_toggle or duration_setter_toggle or replace_image_widget_toggle or move_frames_toggle or delete_frames_toggle or change_position_toggle: st.caption("--") - jump_to_single_frame_view_button(i + j + 1, timing_details, 'timeline_btn_'+str(timing_details[i+j].uuid)) + jump_to_single_frame_view_button(i + j + 1, timing_list, 'timeline_btn_'+str(timing_list[i+j].uuid)) -def timeline_view(project_uuid, stage): +def timeline_view(shot_uuid, stage): data_repo = DataRepo() - timing_details = data_repo.get_timing_list_from_project(project_uuid) + timing_list = data_repo.get_timing_list_from_shot(shot_uuid) st.markdown("***") @@ -84,7 +77,7 @@ def timeline_view(project_uuid, stage): st.markdown("***") - total_count = len(timing_details) + total_count = len(timing_list) for i in range(0, total_count, items_per_row): # Step of items_per_row for grid grid = st.columns(items_per_row) # Create items_per_row columns for grid for j in range(items_per_row): @@ -92,13 +85,13 @@ def timeline_view(project_uuid, stage): with grid[j]: display_number = i + j + 1 if stage == 'Key Frames': - display_image(timing_uuid=timing_details[i + j].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) + display_image(timing_uuid=timing_list[i + j].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) elif stage == 'Videos': - if timing_details[i + j].timed_clip: - st.video(timing_details[i + j].timed_clip.location) + if timing_list[i + j].timed_clip: + st.video(timing_list[i + j].timed_clip.location) else: st.error("No video found for this frame.") with st.expander(f'Frame #{display_number}', True): - timeline_view_buttons(i, j, timing_details, shift_frames_setting, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle, project_uuid) + timeline_view_buttons(i, j, timing_list, shift_frames_setting, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle, project_uuid) diff --git a/utils/cache/cache_methods.py b/utils/cache/cache_methods.py index 55891d64..c7396a07 100644 --- a/utils/cache/cache_methods.py +++ b/utils/cache/cache_methods.py @@ -369,17 +369,7 @@ def _cache_remove_source_image(self, *args, **kwargs): setattr(cls, '_original_remove_source_image', cls.remove_source_image) setattr(cls, "remove_source_image", _cache_remove_source_image) - def _cache_move_frame_one_step_forward(self, *args, **kwargs): - original_func = getattr(cls, '_original_move_frame_one_step_forward') - status = original_func(self, *args, **kwargs) - - if status: - StCache.delete_all(CacheKey.TIMING_DETAILS.value) - setattr(cls, '_original_move_frame_one_step_forward', cls.move_frame_one_step_forward) - setattr(cls, "move_frame_one_step_forward", _cache_move_frame_one_step_forward) - - # ------------------ APP SETTING METHODS --------------------- def _cache_get_app_setting_from_uuid(self, *args, **kwargs): app_setting_list = StCache.get_all(CacheKey.APP_SETTING.value) diff --git a/utils/data_repo/api_repo.py b/utils/data_repo/api_repo.py index 3252e8fd..93ba7711 100644 --- a/utils/data_repo/api_repo.py +++ b/utils/data_repo/api_repo.py @@ -363,16 +363,7 @@ def remove_source_image(self, timing_uuid): res = self.http_put(self.TIMING_URL, data=update_data) return InternalResponse(res['payload'], 'success', res['status']) - def move_frame_one_step_forward(self, project_uuid, index_of_frame): - data = { - "project_id": project_uuid, - "index_of_frame": index_of_frame - } - - res = self.http_post(self.SHIFT_TIMING_URL, data=data) - return InternalResponse(res['payload'], 'success', res['status']) - # app setting def get_app_setting_from_uuid(self, uuid=None): res = self.http_get(self.APP_SETTING_URL, params={'uuid': uuid}) diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index d52a8e38..3cff1d0f 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -2,7 +2,7 @@ import json from shared.constants import InferenceParamType, InternalFileType, InternalResponse from shared.constants import SERVER, ServerType -from ui_components.models import InferenceLogObject, InternalAIModelObject, InternalAppSettingObject, InternalBackupObject, InternalFrameTimingObject, InternalProjectObject, InternalFileObject, InternalSettingObject, InternalUserObject +from ui_components.models import InferenceLogObject, InternalAIModelObject, InternalAppSettingObject, InternalBackupObject, InternalFrameTimingObject, InternalProjectObject, InternalFileObject, InternalSettingObject, InternalShotObject, InternalUserObject from utils.cache.cache_methods import cache_data import wrapt @@ -267,6 +267,11 @@ def get_timing_list_from_project(self, project_uuid=None): timing_list = res.data['data'] if res.status else None return [InternalFrameTimingObject(**timing) for timing in timing_list] if timing_list else [] + def get_timing_list_from_shot(self, shot_uuid=None): + res = self.db_repo.get_timing_list_from_shot(shot_uuid) + timing_list = res.data['data'] if res.status else None + return [InternalFrameTimingObject(**timing) for timing in timing_list] if timing_list else [] + def create_timing(self, **kwargs): res = self.db_repo.create_timing(**kwargs) timing = res.data['data'] if res.status else None @@ -296,10 +301,6 @@ def remove_primary_frame(self, timing_uuid): def remove_source_image(self, timing_uuid): res = self.db_repo.remove_source_image(timing_uuid) return res.status - - def move_frame_one_step_forward(self, project_uuid, index_of_frame): - res = self.db_repo.move_frame_one_step_forward(project_uuid, index_of_frame) - return res.status # app setting @@ -395,4 +396,33 @@ def acquire_lock(self, key): def release_lock(self, key): res = self.db_repo.release_lock(key) - return res.status \ No newline at end of file + return res.status + + # shot + def get_shot_from_uuid(self, shot_uuid): + res = self.db_repo.get_shot_from_uuid(shot_uuid) + shot = res.data['data'] if res.status else None + return InternalShotObject(**shot) if shot else None + + def get_shot_from_number(self, project_uuid, shot_number=0): + res = self.db_repo.get_shot_from_number(project_uuid, shot_number) + shot = res.data['data'] if res.status else None + return InternalShotObject(**shot) if shot else None + + def get_shot_list(self, project_uuid): + res = self.db_repo.get_shot_list(project_uuid) + shot_list = res.data['data'] if res.status else None + return [InternalShotObject(**shot) for shot in shot_list] if shot_list else [] + + def create_shot(self, project_uuid, name, duration, meta_data="", desc=""): + res = self.db_repo.create_shot(project_uuid, name, duration, meta_data, desc) + shot = res.data['data'] if res.status else None + return InternalShotObject(**shot) if shot else None + + def update_shot(self, shot_uuid, name=None, duration=None, meta_data=None, desc=None): + res = self.db_repo.update_shot(shot_uuid, name, duration, meta_data, desc) + return res.status + + def delete_shot(self, shot_uuid): + res = self.db_repo.delete_shot(shot_uuid) + return res.status From db932ee068065135ca85ef6de653bafb3d3bf302 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Thu, 26 Oct 2023 04:47:37 +0530 Subject: [PATCH 118/164] wip: frame time removed --- backend/db_repo.py | 2 +- backend/models.py | 48 -------- backend/serializers/dao.py | 1 - backend/serializers/dto.py | 1 - .../components/frame_styling_page.py | 4 +- ui_components/components/new_project_page.py | 4 +- ui_components/methods/common_methods.py | 3 +- ui_components/methods/video_methods.py | 104 +----------------- ui_components/models.py | 1 - .../widgets/add_key_frame_element.py | 22 +--- .../widgets/style_explorer_element.py | 2 +- ui_components/widgets/timeline_view.py | 2 +- 12 files changed, 16 insertions(+), 178 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index 6d4c4eea..8c536612 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -922,7 +922,7 @@ def add_interpolated_clip(self, uuid, **kwargs): return InternalResponse({}, 'success', True) - def update_specific_timing(self, uuid, **kwargs): ## change this + def update_specific_timing(self, uuid, **kwargs): timing = Timing.objects.filter(uuid=uuid, is_disabled=False).first() if not timing: return InternalResponse({}, 'invalid timing uuid', False) diff --git a/backend/models.py b/backend/models.py index 03b22dfe..cf20f1e3 100644 --- a/backend/models.py +++ b/backend/models.py @@ -212,14 +212,6 @@ def save(self, *args, **kwargs): shots_to_move.update(shot_idx=F('shot_idx') + 1, timed_clip=None, preview_video=None) super(Shot, self).save(*args, **kwargs) - - # if the overall duration of the shot is updated - # then we update the duration of the last frame inside the shot - if self.old_duration != self.duration: - shot_timing_list = Timing.objects.filter(shot_id=self.id, is_disabled=False).order_by('aux_frame_index') - if shot_timing_list and len(shot_timing_list): - clip_duration = round(self.duration - shot_timing_list[len(shot_timing_list)-1].frame_time, 2) - shot_timing_list[len(shot_timing_list)-1].update(clip_duration=clip_duration) class Timing(BaseModel): @@ -234,7 +226,6 @@ class Timing(BaseModel): primary_image = models.ForeignKey(InternalFileObject, related_name="primary_image", on_delete=models.DO_NOTHING, null=True) # variant number that is currently selected (among alternative images) NONE if none is present shot_id = models.ForeignKey(Shot, on_delete=models.CASCADE, null=True) custom_model_id_list = models.TextField(default=None, null=True, blank=True) - frame_time = models.FloatField(default=None, null=True) frame_number = models.IntegerField(default=None, null=True) alternative_images = models.TextField(default=None, null=True) custom_pipeline = models.CharField(max_length=255, default=None, null=True, blank=True) @@ -263,7 +254,6 @@ def __init__(self, *args, **kwargs): self.old_is_disabled = self.is_disabled self.old_aux_frame_index = self.aux_frame_index self.old_timed_clip = self.timed_clip - self.old_frame_time = self.frame_time def save(self, *args, **kwargs): # TODO: updating details of every frame this way can be slow - implement a better strategy @@ -294,13 +284,6 @@ def save(self, *args, **kwargs): if self.aux_frame_index >= self.old_aux_frame_index: timings_to_move = Timing.objects.filter(project_id=self.project_id, aux_frame_index__gt=self.old_aux_frame_index, \ aux_frame_index__lte=self.aux_frame_index, is_disabled=False).order_by('aux_frame_index') - frame_time_list = [int(self.frame_time * 100) / 100] - for t in timings_to_move: - frame_time_list.append(t.frame_time) - # updating frame time - for idx, t in enumerate(timings_to_move): - Timing.objects.filter(uuid=t.uuid, is_disabled=False).update(frame_time=int(frame_time_list[idx] * 100) / 100) - self.frame_time = frame_time_list[-1] # moving the frames between old and new index one step backwards timings_to_move.update(aux_frame_index=F('aux_frame_index') - 1) @@ -308,17 +291,6 @@ def save(self, *args, **kwargs): timings_to_move = Timing.objects.filter(project_id=self.project_id, aux_frame_index__gte=self.aux_frame_index, \ aux_frame_index__lt=self.old_aux_frame_index, is_disabled=False).order_by('aux_frame_index') - frame_time_list = [self.frame_time] - for t in reversed(timings_to_move): - frame_time_list.append(t.frame_time) - # updating frame time - frame_time_list.reverse() - idx = 0 - self.frame_time = int(frame_time_list[idx] * 100) / 100 - idx += 1 - for t in timings_to_move: - Timing.objects.filter(uuid=t.uuid, is_disabled=False).update(frame_time=int(frame_time_list[idx] * 100) / 100) - idx += 1 # moving frames timings_to_move.update(aux_frame_index=F('aux_frame_index') + 1, timed_clip=None, preview_video=None) @@ -335,26 +307,6 @@ def save(self, *args, **kwargs): super().save(*args, **kwargs) - if update_frame_duration: - # updating clip_duration - timing_list = Timing.objects.filter(project_id=self.project_id, is_disabled=False).order_by('aux_frame_index') - length_of_list = len(timing_list) - - for idx, timing_item in enumerate(timing_list): - # last frame - if idx == (length_of_list - 1): - time_of_frame = timing_item.frame_time - duration_of_static_time = 0.0 - end_duration_of_frame = float(time_of_frame) + float(duration_of_static_time) - total_duration_of_frame = float(end_duration_of_frame) - float(time_of_frame) - else: - time_of_frame = timing_item.frame_time - next_timing = timing_list[idx + 1] - time_of_next_frame = next_timing.frame_time - total_duration_of_frame = float(time_of_next_frame) - float(time_of_frame) - - total_duration_of_frame = round(total_duration_of_frame, 2) - Timing.objects.filter(uuid=timing_item.uuid, is_disabled=False).update(clip_duration=total_duration_of_frame) def add_interpolated_clip_list(self, clip_uuid_list): cur_list = json.loads(self.interpolated_clip_list) if self.interpolated_clip_list else [] diff --git a/backend/serializers/dao.py b/backend/serializers/dao.py index fcf9e513..32c39dce 100644 --- a/backend/serializers/dao.py +++ b/backend/serializers/dao.py @@ -85,7 +85,6 @@ class CreateTimingDao(serializers.Serializer): preview_video_id = serializers.CharField(max_length=100, required=False) custom_model_id_list = serializers.CharField(max_length=100, required=False) shot_id = serializers.CharField(max_length=100) - frame_time = serializers.CharField(max_length=100) frame_number = serializers.CharField(max_length=100, required=False) primary_image = serializers.CharField(max_length=100, required=False) alternative_images = serializers.CharField(max_length=100, required=False) diff --git a/backend/serializers/dto.py b/backend/serializers/dto.py index 7badce7e..7a108112 100644 --- a/backend/serializers/dto.py +++ b/backend/serializers/dto.py @@ -98,7 +98,6 @@ class Meta: "canny_image", "preview_video", "custom_model_id_list", - "frame_time", "frame_number", "primary_image", "alternative_images", diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 26f87655..15b62217 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -150,9 +150,9 @@ def frame_styling_page(shot_uuid: str): drawing_element(timing_list,project_settings, shot_uuid) with st.expander("➕ Add Key Frame", expanded=True): - selected_image, inherit_styling_settings, how_long_after, _ = add_key_frame_element(shot_uuid) + selected_image, inherit_styling_settings, _ = add_key_frame_element(shot_uuid) if st.button(f"Add key frame",type="primary",use_container_width=True): - add_key_frame(selected_image, inherit_styling_settings, how_long_after) + add_key_frame(selected_image, inherit_styling_settings) st.rerun() # -------------------- TIMELINE VIEW -------------------------- diff --git a/ui_components/components/new_project_page.py b/ui_components/components/new_project_page.py index 46cb7654..486502d4 100644 --- a/ui_components/components/new_project_page.py +++ b/ui_components/components/new_project_page.py @@ -1,6 +1,6 @@ import streamlit as st from banodoco_settings import create_new_project -from ui_components.methods.common_methods import save_audio_file,create_timings_row_at_frame_number, save_and_promote_image +from ui_components.methods.common_methods import save_audio_file,create_frame_inside_shot, save_and_promote_image from utils.common_utils import get_current_user_uuid, reset_project_state from utils.data_repo.data_repo import DataRepo import time @@ -79,7 +79,7 @@ def new_project_page(): current_user = data_repo.get_first_active_user() new_project, shot = create_new_project(current_user, new_project_name, width, height, "Images", "Interpolation") - new_timing = create_timings_row_at_frame_number(shot.uuid, 0) + new_timing = create_frame_inside_shot(shot.uuid, 0) if starting_image: try: diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index ea7e01ad..210b41b8 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -523,12 +523,11 @@ def get_audio_bytes_for_slice(timing_uuid): return audio_bytes -def create_timings_row_at_frame_number(shot_uuid, index_of_frame, frame_time=0.0): +def create_frame_inside_shot(shot_uuid, index_of_frame): data_repo = DataRepo() timing_data = { "shot_id": shot_uuid, - "frame_time": frame_time, "animation_style": AnimationStyleType.INTERPOLATION.value, "aux_frame_index": index_of_frame } diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index 9a2a4b57..ccf27043 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -7,117 +7,19 @@ import ffmpeg import streamlit as st import uuid -from moviepy.editor import concatenate_videoclips, TextClip, VideoFileClip, vfx, AudioFileClip +from moviepy.editor import concatenate_videoclips, TextClip, VideoFileClip, AudioFileClip from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip from backend.models import InternalFileObject -from shared.constants import AnimationToolType, InferenceType, InternalFileTag +from shared.constants import InferenceType, InternalFileTag from shared.file_upload.s3 import is_s3_image_url -from ui_components.constants import VideoQuality -from ui_components.methods.file_methods import convert_bytes_to_file, generate_temp_file +from ui_components.methods.file_methods import convert_bytes_to_file from ui_components.models import InternalFrameTimingObject, InternalSettingObject, InternalShotObject from utils.data_repo.data_repo import DataRepo from utils.media_processor.interpolator import VideoInterpolator from utils.media_processor.video import VideoProcessor -# NOTE: interpolated_clip_uuid signals which clip to promote to timed clip (this is the main variant) -# this function returns the 'single' preview_clip, which is basically timed_clip with the frame number -def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None): - from ui_components.methods.file_methods import generate_temp_file - from ui_components.methods.common_methods import get_audio_bytes_for_slice - from ui_components.methods.common_methods import process_inference_output - from shared.constants import QUEUE_INFERENCE_QUERIES - - data_repo = DataRepo() - - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - project_details: InternalSettingObject = data_repo.get_project_setting( - timing.project.uuid) - - if not len(timing.interpolated_clip_list): - timing.interpolation_steps = 3 - next_timing = data_repo.get_next_timing(timing.uuid) - img_list = [timing.source_image.location, next_timing.source_image.location] - res = VideoInterpolator.video_through_frame_interpolation(img_list, \ - {"interpolation_steps": timing.interpolation_steps}, 1, \ - False) # TODO: queuing is not enabled here - - output_url, log = res[0] - - inference_data = { - "inference_type": InferenceType.SINGLE_PREVIEW_VIDEO.value, - "file_location_to_save": "videos/" + timing.project.uuid + "/assets/videos" + (str(uuid.uuid4())) + ".mp4", - "mime_type": "video/mp4", - "output": output_url, - "project_uuid": timing.project.uuid, - "log_uuid": log.uuid, - "timing_uuid": timing_uuid - } - - process_inference_output(**inference_data) - - timing = data_repo.get_timing_from_uuid(timing_uuid) - if not timing.timed_clip: - interpolated_clip = data_repo.get_file_from_uuid(interpolated_clip_uuid) if interpolated_clip_uuid \ - else timing.interpolated_clip_list[0] - - output_video = update_speed_of_video_clip(interpolated_clip, timing_uuid) - data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) - - if not timing.preview_video: - timing = data_repo.get_timing_from_uuid(timing_uuid) - timed_clip = timing.timed_clip - - temp_video_file = None - if timed_clip.hosted_url and is_s3_image_url(timed_clip.hosted_url): - temp_video_file = generate_temp_file(timed_clip.hosted_url, '.mp4') - - file_path = temp_video_file.name if temp_video_file else timed_clip.local_path - clip = VideoFileClip(file_path) - - if temp_video_file: - os.remove(temp_video_file.name) - - number_text = TextClip(str(timing.aux_frame_index), - fontsize=24, color='white') - number_background = TextClip(" ", fontsize=24, color='black', bg_color='black', size=( - number_text.w + 10, number_text.h + 10)) - number_background = number_background.set_position( - ('left', 'top')).set_duration(clip.duration) - number_text = number_text.set_position( - (number_background.w - number_text.w - 5, number_background.h - number_text.h - 5)).set_duration(clip.duration) - clip_with_number = CompositeVideoClip([clip, number_background, number_text]) - - temp_output_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4", mode='wb') - clip_with_number.write_videofile(filename=temp_output_file.name, codec='libx264', audio_codec='aac') - - if temp_output_file: - video_bytes = None - with open(file_path, 'rb') as f: - video_bytes = f.read() - - preview_video = convert_bytes_to_file( - file_location_to_save="videos/" + str(timing.project.uuid) + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".png", - mime_type="video/mp4", - file_bytes=video_bytes, - project_uuid=timing.project.uuid, - inference_log_id=None - ) - - data_repo.update_specific_timing(timing_uuid, preview_video_id=preview_video.uuid) - os.remove(temp_output_file.name) - - # adding audio if the audio file is present - if project_details.audio: - audio_bytes = get_audio_bytes_for_slice(timing_uuid) - add_audio_to_video_slice(timing.preview_video, audio_bytes) - - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - return timing.preview_video - def create_single_interpolated_clip(timing_uuid, quality, settings={}, variant_count=1): ''' - this includes all the animation styles [direct morphing, interpolation, image to video] diff --git a/ui_components/models.py b/ui_components/models.py index 39886ac8..44f315d2 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -126,7 +126,6 @@ def __init__(self, **kwargs): **kwargs["canny_image"]) if 'canny_image' in kwargs and kwargs["canny_image"] else None self.primary_image = InternalFileObject( **kwargs["primary_image"]) if 'primary_image' in kwargs and kwargs["primary_image"] else None - self.frame_time = kwargs['frame_time'] if 'frame_time' in kwargs else None self.frame_number = kwargs['frame_number'] if 'frame_number' in kwargs else None self.alternative_images = kwargs['alternative_images'] if 'alternative_images' in kwargs and kwargs["alternative_images"] else [ ] diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index eb835b8d..91022ec8 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -8,7 +8,7 @@ from utils.constants import ImageStage from ui_components.methods.file_methods import generate_pil_image,save_or_host_file -from ui_components.methods.common_methods import apply_image_transformations, clone_styling_settings, create_timings_row_at_frame_number, save_uploaded_image +from ui_components.methods.common_methods import apply_image_transformations, clone_styling_settings, create_frame_inside_shot, save_uploaded_image from PIL import Image @@ -65,9 +65,6 @@ def add_key_frame_element(shot_uuid): image_idx = st.session_state['current_frame_index'] - how_long_after = st.slider( - "How long after the current frame?", min_value=0.0, max_value=10.0, value=2.5, step=0.1) - radio_text = "Inherit styling settings from the " + ("current frame?" if source_of_starting_image == "Uploaded image" else "selected frame") inherit_styling_settings = st_memory.radio(radio_text, ["Yes", "No"], \ key="inherit_styling_settings", horizontal=True) @@ -91,9 +88,9 @@ def add_key_frame_element(shot_uuid): else: st.error("No Starting Image Found") - return selected_image, inherit_styling_settings, how_long_after, transformation_stage + return selected_image, inherit_styling_settings, transformation_stage -def add_key_frame(selected_image, inherit_styling_settings, how_long_after, target_frame_position=None, refresh_state=True): +def add_key_frame(selected_image, inherit_styling_settings, target_frame_position=None, refresh_state=True): data_repo = DataRepo() shot_uuid = st.session_state['shot_uuid'] timing_list = data_repo.get_timing_list_from_shot(shot_uuid) @@ -105,20 +102,11 @@ def add_key_frame(selected_image, inherit_styling_settings, how_long_after, targ target_frame_position = st.session_state['current_frame_index'] if target_frame_position is None else target_frame_position index_of_current_item = min(len(timing_list), target_frame_position) - if len(timing_list) == 0: - key_frame_time = 0.0 - elif target_frame_position is not None: - key_frame_time = float(timing_list[target_frame_position - 1].frame_time) + how_long_after - elif index_of_current_item == len(timing_list): - key_frame_time = float(timing_list[index_of_current_item - 1].frame_time) + how_long_after - else: - key_frame_time = (float(timing_list[index_of_current_item - 1].frame_time) + float( - timing_list[index_of_current_item].frame_time)) / 2.0 if len(timing_list) == 0: - _ = create_timings_row_at_frame_number(shot_uuid, 0) + _ = create_frame_inside_shot(shot_uuid, 0) else: - _ = create_timings_row_at_frame_number(shot_uuid, index_of_current_item, frame_time=key_frame_time) + _ = create_frame_inside_shot(shot_uuid, index_of_current_item) timing_list = data_repo.get_timing_list_from_shot(shot_uuid) if selected_image: diff --git a/ui_components/widgets/style_explorer_element.py b/ui_components/widgets/style_explorer_element.py index 7565bc9e..fa49c369 100644 --- a/ui_components/widgets/style_explorer_element.py +++ b/ui_components/widgets/style_explorer_element.py @@ -149,7 +149,7 @@ def style_explorer_element(shot_uuid): if st.button(f"Add to timeline", key=f"{gallery_image_list[i + j].uuid}", help="Promote this variant to the primary image", use_container_width=True): pil_image = generate_pil_image(gallery_image_list[i + j].location) - add_key_frame(pil_image, False, 2.5, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False) + add_key_frame(pil_image, False, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False) # removing this from the gallery view data_repo.update_file(gallery_image_list[i + j].uuid, tag="") diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index 8aa11505..1ac284a5 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -24,7 +24,7 @@ def timeline_view_buttons(i, j, timing_list, shift_frames_setting, time_setter_t with btn3: if st.button("🔁", key=f"copy_frame_{timing_list[i + j].uuid}"): pil_image = generate_pil_image(timing_list[i + j].primary_image.location) - add_key_frame(pil_image, False, 2.5, timing_list[i + j].aux_frame_index+1, refresh_state=False) + add_key_frame(pil_image, False, timing_list[i + j].aux_frame_index+1, refresh_state=False) st.rerun() From 9e9db5f048560060d4eb7fc801f7b5234af284a7 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Thu, 26 Oct 2023 04:58:36 +0530 Subject: [PATCH 119/164] wip: frame_number removed --- backend/models.py | 1 - backend/serializers/dao.py | 1 - backend/serializers/dto.py | 1 - ui_components/components/frame_styling_page.py | 6 +++--- ui_components/models.py | 1 - ui_components/widgets/image_carousal.py | 2 +- utils/data_repo/api_repo.py | 4 ++-- utils/data_repo/data_repo.py | 4 ++-- 8 files changed, 8 insertions(+), 12 deletions(-) diff --git a/backend/models.py b/backend/models.py index cf20f1e3..f709c021 100644 --- a/backend/models.py +++ b/backend/models.py @@ -226,7 +226,6 @@ class Timing(BaseModel): primary_image = models.ForeignKey(InternalFileObject, related_name="primary_image", on_delete=models.DO_NOTHING, null=True) # variant number that is currently selected (among alternative images) NONE if none is present shot_id = models.ForeignKey(Shot, on_delete=models.CASCADE, null=True) custom_model_id_list = models.TextField(default=None, null=True, blank=True) - frame_number = models.IntegerField(default=None, null=True) alternative_images = models.TextField(default=None, null=True) custom_pipeline = models.CharField(max_length=255, default=None, null=True, blank=True) prompt = models.TextField(default='', blank=True) diff --git a/backend/serializers/dao.py b/backend/serializers/dao.py index 32c39dce..183da287 100644 --- a/backend/serializers/dao.py +++ b/backend/serializers/dao.py @@ -85,7 +85,6 @@ class CreateTimingDao(serializers.Serializer): preview_video_id = serializers.CharField(max_length=100, required=False) custom_model_id_list = serializers.CharField(max_length=100, required=False) shot_id = serializers.CharField(max_length=100) - frame_number = serializers.CharField(max_length=100, required=False) primary_image = serializers.CharField(max_length=100, required=False) alternative_images = serializers.CharField(max_length=100, required=False) custom_pipeline = serializers.CharField(max_length=100, required=False, allow_blank=True, default="") diff --git a/backend/serializers/dto.py b/backend/serializers/dto.py index 7a108112..85fbca64 100644 --- a/backend/serializers/dto.py +++ b/backend/serializers/dto.py @@ -98,7 +98,6 @@ class Meta: "canny_image", "preview_video", "custom_model_id_list", - "frame_number", "primary_image", "alternative_images", "custom_pipeline", diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 15b62217..7f6e3e2f 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -40,10 +40,10 @@ def frame_styling_page(shot_uuid: str): st.session_state['num_inference_steps'] = project_settings.default_num_inference_steps st.session_state['transformation_stage'] = project_settings.default_stage - if f"{shot.uuid}__" + "current_frame_uuid" not in st.session_state: + if "current_frame_uuid" not in st.session_state: timing = data_repo.get_timing_list_from_shot(shot_uuid)[0] - st.session_state[f"{shot.uuid}__" + 'current_frame_uuid'] = timing.uuid - st.session_state[f"{shot.uuid}__" + 'current_frame_index'] = timing.aux_frame_index + 1 + st.session_state['current_frame_uuid'] = timing.uuid + st.session_state['current_frame_index'] = timing.aux_frame_index + 1 if 'frame_styling_view_type' not in st.session_state: st.session_state['frame_styling_view_type'] = "Individual" diff --git a/ui_components/models.py b/ui_components/models.py index 44f315d2..ba6e5e7c 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -126,7 +126,6 @@ def __init__(self, **kwargs): **kwargs["canny_image"]) if 'canny_image' in kwargs and kwargs["canny_image"] else None self.primary_image = InternalFileObject( **kwargs["primary_image"]) if 'primary_image' in kwargs and kwargs["primary_image"] else None - self.frame_number = kwargs['frame_number'] if 'frame_number' in kwargs else None self.alternative_images = kwargs['alternative_images'] if 'alternative_images' in kwargs and kwargs["alternative_images"] else [ ] self.custom_pipeline = kwargs['custom_pipeline'] if 'custom_pipeline' in kwargs and kwargs["custom_pipeline"] else None diff --git a/ui_components/widgets/image_carousal.py b/ui_components/widgets/image_carousal.py index 77be10f4..cfc3acf2 100644 --- a/ui_components/widgets/image_carousal.py +++ b/ui_components/widgets/image_carousal.py @@ -62,7 +62,7 @@ def carousal_of_images_element(shot_uuid, stage=WorkflowStageType.STYLED.value): header1, header2, header3, header4, header5 = st.columns([1, 1, 1, 1, 1]) - current_frame_uuid = st.session_state[f"{shot.uuid}__" + 'current_frame_uuid'] + current_frame_uuid = st.session_state['current_frame_uuid'] current_timing = data_repo.get_timing_from_uuid(current_frame_uuid) with header1: diff --git a/utils/data_repo/api_repo.py b/utils/data_repo/api_repo.py index 93ba7711..c18df25e 100644 --- a/utils/data_repo/api_repo.py +++ b/utils/data_repo/api_repo.py @@ -312,8 +312,8 @@ def get_timing_from_uuid(self, uuid): res = self.http_get(self.TIMING_URL, params={'uuid': uuid}) return InternalResponse(res['payload'], 'success', res['status']) - def get_timing_from_frame_number(self, project_uuid, frame_number): - res = self.http_get(self.PROJECT_TIMING_URL, params={'project_id': project_uuid, 'frame_number': frame_number}) + def get_timing_from_frame_number(self, shot_uuid, frame_number): + res = self.http_get(self.PROJECT_TIMING_URL, params={'project_id': shot_uuid, 'frame_number': frame_number}) return InternalResponse(res['payload'], 'success', res['status']) # this is based on the aux_frame_index and not the order in the db diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 3cff1d0f..e8a8fed2 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -247,8 +247,8 @@ def get_timing_from_uuid(self, uuid, **kwargs): timing = self.db_repo.get_timing_from_uuid(uuid).data['data'] return InternalFrameTimingObject(**timing) if timing else None - def get_timing_from_frame_number(self, project_uuid, frame_number): - res = self.db_repo.get_timing_from_frame_number(project_uuid, frame_number) + def get_timing_from_frame_number(self, shot_uuid, frame_number): + res = self.db_repo.get_timing_from_frame_number(shot_uuid, frame_number) timing = res.data['data'] if res.status else None return InternalFrameTimingObject(**timing) if timing else None From 0b8b629558cc4f6e25b0f6634979d06b82e38f1d Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Thu, 26 Oct 2023 06:11:36 +0530 Subject: [PATCH 120/164] wip: backend apis added (untested) --- backend/db_repo.py | 1 + utils/data_repo/api_repo.py | 50 +++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/backend/db_repo.py b/backend/db_repo.py index 8c536612..bbfb2b2f 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -1548,6 +1548,7 @@ def get_shot_from_uuid(self, shot_uuid): return InternalResponse(payload, 'shot fetched successfully', True) + # TODO: implement pagination if shot count gets too high def get_shot_list(self, project_uuid): project = Project.objects.filter(uuid=project_uuid, is_disabled=False).first() if not project: diff --git a/utils/data_repo/api_repo.py b/utils/data_repo/api_repo.py index c18df25e..8ec7a5a2 100644 --- a/utils/data_repo/api_repo.py +++ b/utils/data_repo/api_repo.py @@ -77,6 +77,13 @@ def _setup_urls(self): # payment self.STRIPE_PAYMENT_URL = '/v1/payment/stripe-link' + # lock + self.LOCK_URL = 'v1/data/lock' + + # shot + self.SHOT_URL = 'v1/data/shot' + self.SHOT_LIST_URL = 'v1/data/shot/list' + def logout(self): delete_url_param(AUTH_TOKEN) st.rerun() @@ -329,6 +336,10 @@ def get_timing_list_from_project(self, project_uuid=None): res = self.http_get(self.TIMING_LIST_URL, params={'project_id': project_uuid, 'page': 1}) return InternalResponse(res['payload'], 'success', res['status']) + def get_timing_list_from_shot(self, shot_uuid=None): + res = self.http_get(self.TIMING_LIST_URL, params={'shot_id': shot_uuid, 'page': 1}) + return InternalResponse(res['payload'], 'success', res['status']) + def create_timing(self, **kwargs): res = self.http_post(url=self.TIMING_URL, data=kwargs) return InternalResponse(res['payload'], 'success', res['status']) @@ -447,4 +458,43 @@ def acquire_lock(self, key): def release_lock(self, key): res = self.http_get(self.LOCK_URL, params={'key': key, 'action': 'release'}) + return InternalResponse(res['payload'], 'success', res['status']) + + # shot + def get_shot_from_uuid(self, shot_uuid): + res = self.http_get(self.SHOT_URL, params={'uuid': shot_uuid}) + return InternalResponse(res['payload'], 'success', res['status']) + + def get_shot_from_number(self, project_uuid, shot_number=0): + res = self.http_get(self.SHOT_URL, params={'project_id': project_uuid, 'shot_idx': shot_number}) + return InternalResponse(res['payload'], 'success', res['status']) + + def get_shot_list(self, project_uuid): + res = self.http_get(self.SHOT_LIST_URL, params={'project_id': project_uuid}) + return InternalResponse(res['payload'], 'success', res['status']) + + def create_shot(self, project_uuid, name, duration, meta_data="", desc=""): + data = { + 'project_id': project_uuid, + 'name': name, + 'duration': duration, + 'meta_data': meta_data, + 'desc': desc + } + res = self.http_post(self.SHOT_URL, data=data) + return InternalResponse(res['payload'], 'success', res['status']) + + def update_shot(self, shot_uuid, name=None, duration=None, meta_data=None, desc=None): + update_data = { + 'uuid': shot_uuid, + 'name': name, + 'duration': duration, + 'meta_data': meta_data, + 'desc': desc + } + res = self.http_put(self.SHOT_URL, data=update_data) + return InternalResponse(res['payload'], 'success', res['status']) + + def delete_shot(self, shot_uuid): + res = self.http_delete(self.SHOT_URL, params={'uuid': shot_uuid}) return InternalResponse(res['payload'], 'success', res['status']) \ No newline at end of file From 09866b4867c48517f5bcf2bf9f50eb415da65e98 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 28 Oct 2023 11:25:49 +0530 Subject: [PATCH 121/164] wip: interpolated clip, timed clip and preview clip removed from the timing table --- backend/db_repo.py | 88 ++----------------- backend/models.py | 37 ++------ backend/serializers/dao.py | 3 - backend/serializers/dto.py | 17 ++-- banodoco_runner.py | 1 - shared/constants.py | 1 - .../components/frame_styling_page.py | 4 +- ui_components/constants.py | 1 - ui_components/methods/common_methods.py | 48 ++-------- ui_components/methods/video_methods.py | 28 ++---- ui_components/models.py | 28 +++--- .../widgets/animation_style_element.py | 9 +- .../widgets/frame_clip_generation_elements.py | 75 ---------------- ui_components/widgets/timeline_view.py | 13 +-- .../widgets/variant_comparison_grid.py | 17 +++- 15 files changed, 70 insertions(+), 300 deletions(-) delete mode 100644 ui_components/widgets/frame_clip_generation_elements.py diff --git a/backend/db_repo.py b/backend/db_repo.py index bbfb2b2f..7e42f6a1 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -834,24 +834,6 @@ def create_timing(self, **kwargs): attributes._data['source_image_id'] = source_image.id - if 'interpolated_clip_list' in attributes.data and attributes.data['interpolated_clip_list'] != None: - for clip_uuid in attributes.data['interpolated_clip_list']: - interpolated_clip: InternalFileObject = InternalFileObject.objects.filter(uuid=clip_uuid, is_disabled=False).first() - if not interpolated_clip: - return InternalResponse({}, 'invalid interpolated clip uuid', False) - - attributes._data['interpolated_clip_list'] = list(set(attributes._data['interpolated_clip_list'])) - - - if 'timed_clip_id' in attributes.data: - if attributes.data['timed_clip_id'] != None: - timed_clip: InternalFileObject = InternalFileObject.objects.filter(uuid=attributes.data['timed_clip_id'], is_disabled=False).first() - if not timed_clip: - return InternalResponse({}, 'invalid timed clip uuid', False) - - attributes._data['timed_clip_id'] = timed_clip.id - - if 'mask_id' in attributes.data: if attributes.data['mask_id'] != None: mask: InternalFileObject = InternalFileObject.objects.filter(uuid=attributes.data['mask_id'], is_disabled=False).first() @@ -870,15 +852,6 @@ def create_timing(self, **kwargs): attributes._data['canny_image_id'] = canny_image.id - if 'preview_video_id' in attributes.data: - if attributes.data['preview_video_id'] != None: - preview_video: InternalFileObject = InternalFileObject.objects.filter(uuid=attributes.data['preview_video_id'], is_disabled=False).first() - if not preview_video: - return InternalResponse({}, 'invalid preview video uuid', False) - - attributes._data['preview_video_id'] = preview_video.id - - if 'primay_image_id' in attributes.data: if attributes.data['primay_image_id'] != None: primay_image: InternalFileObject = InternalFileObject.objects.filter(uuid=attributes.data['primay_image_id'], is_disabled=False).first() @@ -908,17 +881,17 @@ def remove_existing_timing(self, project_uuid): return InternalResponse({}, 'timing removed successfully', True) def add_interpolated_clip(self, uuid, **kwargs): - timing = Timing.objects.filter(uuid=uuid, is_disabled=False).first() - if not timing: - return InternalResponse({}, 'invalid timing uuid', False) + shot = Shot.objects.filter(uuid=uuid, is_disabled=False).first() + if not shot: + return InternalResponse({}, 'invalid shot uuid', False) if 'interpolated_clip_id' in kwargs and kwargs['interpolated_clip_id'] != None: interpolated_clip: InternalFileObject = InternalFileObject.objects.filter(uuid=kwargs['interpolated_clip_id'], is_disabled=False).first() if not interpolated_clip: return InternalResponse({}, 'invalid interpolated clip uuid', False) - timing.add_interpolated_clip_list([interpolated_clip.uuid.hex]) - timing.save() + shot.add_interpolated_clip_list([interpolated_clip.uuid.hex]) + shot.save() return InternalResponse({}, 'success', True) @@ -961,26 +934,6 @@ def update_specific_timing(self, uuid, **kwargs): kwargs['source_image_id'] = source_image.id - if 'interpolated_clip_list' in kwargs and kwargs['interpolated_clip_list'] != None: - cur_list = [] - for clip_uuid in kwargs['interpolated_clip_list']: - interpolated_clip: InternalFileObject = InternalFileObject.objects.filter(uuid=clip_uuid, is_disabled=False).first() - if not interpolated_clip: - return InternalResponse({}, 'invalid interpolated clip uuid', False) - - cur_list.append(interpolated_clip.uuid) - kwargs['interpolated_clip_list'] = list(set(kwargs['interpolated_clip_list'])) - - - if 'timed_clip_id' in kwargs: - if kwargs['timed_clip_id'] != None: - timed_clip: InternalFileObject = InternalFileObject.objects.filter(uuid=kwargs['timed_clip_id'], is_disabled=False).first() - if not timed_clip: - return InternalResponse({}, 'invalid timed clip uuid', False) - - kwargs['timed_clip_id'] = timed_clip.id - - if 'mask_id' in kwargs: if kwargs['mask_id'] != None: mask: InternalFileObject = InternalFileObject.objects.filter(uuid=kwargs['mask_id'], is_disabled=False).first() @@ -999,15 +952,6 @@ def update_specific_timing(self, uuid, **kwargs): kwargs['canny_image_id'] = canny_image.id - if 'preview_video_id' in kwargs: - if kwargs['preview_video_id'] != None: - preview_video: InternalFileObject = InternalFileObject.objects.filter(uuid=kwargs['preview_video_id'], is_disabled=False).first() - if not preview_video: - return InternalResponse({}, 'invalid preview video uuid', False) - - kwargs['preview_video_id'] = preview_video.id - - if 'primay_image_id' in kwargs: if kwargs['primay_image_id'] != None: primay_image: InternalFileObject = InternalFileObject.objects.filter(uuid=kwargs['primay_image_id'], is_disabled=False).first() @@ -1340,21 +1284,12 @@ def create_backup(self, project_uuid, backup_name): if timing.source_image: file_uuid_list.add(timing.source_image.uuid) - if timing.interpolated_clip_list: - file_uuid_list.extend(json.loads(timing.interpolated_clip_list)) - - if timing.timed_clip: - file_uuid_list.add(timing.timed_clip.uuid) - if timing.mask: file_uuid_list.add(timing.mask.uuid) if timing.canny_image: file_uuid_list.add(timing.canny_image.uuid) - if timing.preview_video: - file_uuid_list.add(timing.preview_video.uuid) - if timing.primary_image: file_uuid_list.add(timing.primary_image.uuid) @@ -1382,22 +1317,12 @@ def create_backup(self, project_uuid, backup_name): timing['source_image_uuid'] = str(id_file_dict[timing['source_image_id']].uuid) if timing['source_image_id'] else None del timing['source_image_id'] - # TODO: fix this code using interpolated_clip_list - timing['interpolated_clip_uuid'] = str(id_file_dict[timing['interpolated_clip_id']].uuid) if timing['interpolated_clip_id'] else None - del timing['interpolated_clip_id'] - - timing['timed_clip_uuid'] = str(id_file_dict[timing['timed_clip_id']].uuid) if timing['timed_clip_id'] else None - del timing['timed_clip_id'] - timing['mask_uuid'] = str(id_file_dict[timing['mask_id']].uuid) if timing['mask_id'] else None del timing['mask_id'] timing['canny_image_uuid'] = str(id_file_dict[timing['canny_image_id']].uuid) if timing['canny_image_id'] else None del timing['canny_image_id'] - timing['preview_video_uuid'] = str(id_file_dict[timing['preview_video_id']].uuid) if timing['preview_video_id'] else None - del timing['preview_video_id'] - timing['primary_image_uuid'] = str(id_file_dict[timing['primary_image_id']].uuid) if timing['primary_image_id'] else None del timing['primary_image_id'] @@ -1475,11 +1400,8 @@ def restore_backup(self, backup_uuid: str): timing.uuid, model_uuid=backup_timing['model_uuid'], source_image_uuid=backup_timing['source_image_uuid'], - interpolated_clip_list=backup_timing['interpolated_clip_list'], - timed_clip=backup_timing['timed_clip_uuid'], mask=backup_timing['mask_uuid'], canny_image=backup_timing['canny_image_uuid'], - preview_video=backup_timing['preview_video_uuid'], primary_image=backup_timing['primary_image_uuid'], custom_model_id_list=backup_timing['custom_model_id_list'], frame_time=backup_timing['frame_time'], diff --git a/backend/models.py b/backend/models.py index f709c021..8ae95ada 100644 --- a/backend/models.py +++ b/backend/models.py @@ -208,8 +208,6 @@ def save(self, *args, **kwargs): else: shots_to_move = Shot.objects.filter(project_id=self.project_id, shot_idx__gte=self.shot_idx, \ shot_idx__lt=self.old_shot_idx, is_disabled=False).order_by('shot_idx') - # moving frames - shots_to_move.update(shot_idx=F('shot_idx') + 1, timed_clip=None, preview_video=None) super(Shot, self).save(*args, **kwargs) @@ -218,11 +216,8 @@ class Timing(BaseModel): project = models.ForeignKey(Project, on_delete=models.CASCADE, null=True) model = models.ForeignKey(AIModel, on_delete=models.DO_NOTHING, null=True) source_image = models.ForeignKey(InternalFileObject, related_name="source_image", on_delete=models.DO_NOTHING, null=True) - interpolated_clip_list = models.TextField(default=None, null=True) - timed_clip = models.ForeignKey(InternalFileObject, related_name="timed_clip", on_delete=models.DO_NOTHING, null=True) mask = models.ForeignKey(InternalFileObject, related_name="mask", on_delete=models.DO_NOTHING, null=True) canny_image = models.ForeignKey(InternalFileObject, related_name="canny_image", on_delete=models.DO_NOTHING, null=True) - preview_video = models.ForeignKey(InternalFileObject, related_name="preview_video", on_delete=models.DO_NOTHING, null=True) primary_image = models.ForeignKey(InternalFileObject, related_name="primary_image", on_delete=models.DO_NOTHING, null=True) # variant number that is currently selected (among alternative images) NONE if none is present shot_id = models.ForeignKey(Shot, on_delete=models.CASCADE, null=True) custom_model_id_list = models.TextField(default=None, null=True, blank=True) @@ -252,16 +247,14 @@ def __init__(self, *args, **kwargs): super(Timing, self).__init__(*args, **kwargs) self.old_is_disabled = self.is_disabled self.old_aux_frame_index = self.aux_frame_index - self.old_timed_clip = self.timed_clip def save(self, *args, **kwargs): # TODO: updating details of every frame this way can be slow - implement a better strategy # ------ handling aux_frame_index ------ - update_frame_duration = False # if the frame is being deleted (disabled) if self.old_is_disabled != self.is_disabled and self.is_disabled: - timing_list = Timing.objects.filter(project_id=self.project_id, \ + timing_list = Timing.objects.filter(shot_id=self.shot_id, \ aux_frame_index__gte=self.aux_frame_index, is_disabled=False).order_by('aux_frame_index') # shifting aux_frame_index of all frames after this frame one backwards @@ -275,44 +268,24 @@ def save(self, *args, **kwargs): if (not self.id or self.old_aux_frame_index != self.aux_frame_index) and not self.is_disabled: if not self.id: # shifting aux_frame_index of all frames after this frame one forward - if Timing.objects.filter(project_id=self.project_id, aux_frame_index=self.aux_frame_index, is_disabled=False).exists(): - timing_list = Timing.objects.filter(project_id=self.project_id, \ + if Timing.objects.filter(shot_id=self.shot_id, aux_frame_index=self.aux_frame_index, is_disabled=False).exists(): + timing_list = Timing.objects.filter(shot_id=self.shot_id, \ aux_frame_index__gte=self.aux_frame_index, is_disabled=False) timing_list.update(aux_frame_index=F('aux_frame_index') + 1) elif self.old_aux_frame_index != self.aux_frame_index: if self.aux_frame_index >= self.old_aux_frame_index: - timings_to_move = Timing.objects.filter(project_id=self.project_id, aux_frame_index__gt=self.old_aux_frame_index, \ + timings_to_move = Timing.objects.filter(shot_id=self.shot_id, aux_frame_index__gt=self.old_aux_frame_index, \ aux_frame_index__lte=self.aux_frame_index, is_disabled=False).order_by('aux_frame_index') # moving the frames between old and new index one step backwards timings_to_move.update(aux_frame_index=F('aux_frame_index') - 1) else: - timings_to_move = Timing.objects.filter(project_id=self.project_id, aux_frame_index__gte=self.aux_frame_index, \ + timings_to_move = Timing.objects.filter(shot_id=self.shot_id, aux_frame_index__gte=self.aux_frame_index, \ aux_frame_index__lt=self.old_aux_frame_index, is_disabled=False).order_by('aux_frame_index') - - # moving frames - timings_to_move.update(aux_frame_index=F('aux_frame_index') + 1, timed_clip=None, preview_video=None) - - - self.interpolated_video_id = None - self.timed_clip_id = None - - update_frame_duration = True - - # ------ handling timed_clip ------ - # if timed_clip is deleted/changed then preview_video will be deleted - if self.old_timed_clip and (not self.timed_clip or self.old_timed_clip != self.timed_clip): - self.preview_video = None super().save(*args, **kwargs) - def add_interpolated_clip_list(self, clip_uuid_list): - cur_list = json.loads(self.interpolated_clip_list) if self.interpolated_clip_list else [] - cur_list.extend(clip_uuid_list) - cur_list = list(set(cur_list)) - self.interpolated_clip_list = json.dumps(cur_list) - @property def alternative_images_list(self): image_id_list = json.loads(self.alternative_images) if self.alternative_images else [] diff --git a/backend/serializers/dao.py b/backend/serializers/dao.py index 183da287..4a19028b 100644 --- a/backend/serializers/dao.py +++ b/backend/serializers/dao.py @@ -78,11 +78,8 @@ class CreateTimingDao(serializers.Serializer): project_id = serializers.CharField(max_length=100) model_id = serializers.CharField(max_length=100, required=False) source_image_id = serializers.CharField(max_length=100, required=False) - interpolated_clip_list = serializers.CharField(max_length=None, required=False) - timed_clip_id = serializers.CharField(max_length=100, required=False) mask_id = serializers.CharField(max_length=100, required=False) canny_image_id = serializers.CharField(max_length=100, required=False) - preview_video_id = serializers.CharField(max_length=100, required=False) custom_model_id_list = serializers.CharField(max_length=100, required=False) shot_id = serializers.CharField(max_length=100) primary_image = serializers.CharField(max_length=100, required=False) diff --git a/backend/serializers/dto.py b/backend/serializers/dto.py index 85fbca64..7883f4ca 100644 --- a/backend/serializers/dto.py +++ b/backend/serializers/dto.py @@ -78,11 +78,8 @@ class TimingDto(serializers.ModelSerializer): project = ProjectDto() model = AIModelDto() source_image = InternalFileDto() - interpolated_clip_list = serializers.SerializerMethodField() - timed_clip = InternalFileDto() mask = InternalFileDto() canny_image = InternalFileDto() - preview_video = InternalFileDto() primary_image = InternalFileDto() class Meta: @@ -92,11 +89,8 @@ class Meta: "project", "model", "source_image", - "interpolated_clip_list", - "timed_clip", "mask", "canny_image", - "preview_video", "custom_model_id_list", "primary_image", "alternative_images", @@ -119,11 +113,6 @@ class Meta: "transformation_stage" ) - def get_interpolated_clip_list(self, obj): - id_list = json.loads(obj.interpolated_clip_list) if obj.interpolated_clip_list else [] - file_list = InternalFileObject.objects.filter(uuid__in=id_list, is_disabled=False).all() - return [InternalFileDto(file).data for file in file_list] - class AppSettingDto(serializers.ModelSerializer): @@ -207,6 +196,7 @@ class Meta: class ShotDto(serializers.ModelSerializer): timing_list = serializers.SerializerMethodField() + interpolated_clip_list = serializers.SerializerMethodField() main_clip = InternalFileDto() class Meta: @@ -226,4 +216,9 @@ class Meta: def get_timing_list(self, obj): timing_list = self.context.get("timing_list", []) return [TimingDto(timing).data for timing in timing_list] + + def get_interpolated_clip_list(self, obj): + id_list = json.loads(obj.interpolated_clip_list) if obj.interpolated_clip_list else [] + file_list = InternalFileObject.objects.filter(uuid__in=id_list, is_disabled=False).all() + return [InternalFileDto(file).data for file in file_list] \ No newline at end of file diff --git a/banodoco_runner.py b/banodoco_runner.py index 0ebc844e..0589011f 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -104,7 +104,6 @@ def check_and_update_db(): if origin_data['inference_type'] in [InferenceType.FRAME_INTERPOLATION.value, \ InferenceType.FRAME_TIMING_IMAGE_INFERENCE.value, \ - InferenceType.SINGLE_PREVIEW_VIDEO.value, \ InferenceType.FRAME_INPAINTING.value]: if str(log.project.uuid) not in timing_update_list: timing_update_list[str(log.project.uuid)] = [] diff --git a/shared/constants.py b/shared/constants.py index a3d60749..0d46e3ec 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -77,7 +77,6 @@ class ViewType(ExtendedEnum): class InferenceType(ExtendedEnum): FRAME_TIMING_IMAGE_INFERENCE = "frame_timing_inference" # for generating variants of a frame FRAME_TIMING_VIDEO_INFERENCE = "frame_timing_video_inference" # for generating variants of a video - SINGLE_PREVIEW_VIDEO = "single_preview_video" # for generating a single preview video FRAME_INTERPOLATION = "frame_interpolation" # for generating single/multiple interpolated videos GALLERY_IMAGE_GENERATION = "gallery_image_generation" # for generating gallery images FRAME_INPAINTING = "frame_inpainting" # for generating inpainted frames diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 7f6e3e2f..2ff9ae46 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -70,11 +70,11 @@ def frame_styling_page(shot_uuid: str): frame_selector_widget() if st.session_state['page'] == CreativeProcessType.MOTION.value: - variant_comparison_grid(st.session_state['current_frame_uuid'], stage=CreativeProcessType.MOTION.value) + variant_comparison_grid(shot_uuid, stage=CreativeProcessType.MOTION.value) st.markdown("***") with st.expander("🎬 Choose Animation Style & Create Variants", expanded=True): - animation_style_element(st.session_state['current_frame_uuid'], shot_uuid) + animation_style_element(shot_uuid) elif st.session_state['page'] == CreativeProcessType.STYLING.value: variant_comparison_grid(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) diff --git a/ui_components/constants.py b/ui_components/constants.py index c13192e3..99fb7347 100644 --- a/ui_components/constants.py +++ b/ui_components/constants.py @@ -10,7 +10,6 @@ class WorkflowStageType(ExtendedEnum): class VideoQuality(ExtendedEnum): HIGH = "High-Quality" - PREVIEW = "Preview" LOW = "Low" class CreativeProcessType(ExtendedEnum): diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 210b41b8..281f3238 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -286,16 +286,16 @@ def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): _ = data_repo.get_timing_list_from_shot(timing.project.uuid) -def promote_video_variant(timing_uuid, variant_uuid): +def promote_video_variant(shot_uuid, variant_uuid): ''' this first changes the duration of the interpolated_clip to the frame clip_duration then adds the clip to the timed_clip (which is considered as the main variant) ''' data_repo = DataRepo() - timing = data_repo.get_timing_from_uuid(timing_uuid) + shot = data_repo.get_shot_from_uuid(shot_uuid) variant_to_promote = None - for variant in timing.interpolated_clip_list: + for variant in shot.interpolated_clip_list: if variant.uuid == variant_uuid: variant_to_promote = variant break @@ -309,18 +309,18 @@ def promote_video_variant(timing_uuid, variant_uuid): else: video = VideoFileClip(variant_to_promote.location) - if video.duration != timing.clip_duration: + if video.duration != shot.duration: video_bytes = VideoProcessor.update_video_speed( variant_to_promote.location, - timing.animation_style, - timing.clip_duration + shot.animation_style, + shot.duration ) hosted_url = save_or_host_file_bytes(video_bytes, variant_to_promote.local_path) if hosted_url: data_repo.update_file(video.uuid, hosted_url=hosted_url) - data_repo.update_specific_timing(timing.uuid, timed_clip_id=variant_to_promote.uuid) + data_repo.update_shot(shot.uuid, main_clip_id=variant_to_promote.uuid) @@ -741,40 +741,6 @@ def process_inference_output(**kwargs): del kwargs['log_uuid'] data_repo.update_inference_log_origin_data(log_uuid, **kwargs) - # --------------------- SINGLE PREVIEW VIDEO INFERENCE ------------------- - elif inference_type == InferenceType.SINGLE_PREVIEW_VIDEO.value: - output = kwargs.get('output') - file_bytes = None - if isinstance(output, str) and output.startswith('http'): - temp_output_file = generate_temp_file(output, '.mp4') - file_bytes = None - with open(temp_output_file.name, 'rb') as f: - file_bytes = f.read() - - os.remove(temp_output_file.name) - - if file_bytes: - file_data = { - "file_location_to_save": kwargs.get('file_location_to_save'), - "mime_type": kwargs.get('mime_type'), - "file_bytes": file_bytes, - "project_uuid": kwargs.get('project_uuid'), - "inference_log_id": kwargs.get('log_uuid') - } - - timing_uuid = kwargs.get('timing_uuid') - timing = data_repo.get_timing_from_uuid(timing_uuid) - if not timing: - return False - - video_fie = convert_bytes_to_file(**file_data) - data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video_fie.uuid) - - else: - log_uuid = kwargs.get('log_uuid') - del kwargs['log_uuid'] - data_repo.update_inference_log_origin_data(log_uuid, **kwargs) - # --------------------- MULTI VIDEO INFERENCE (INTERPOLATION + MORPHING) ------------------- elif inference_type == InferenceType.FRAME_INTERPOLATION.value: output = kwargs.get('output') diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index ccf27043..e57119a2 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -20,7 +20,7 @@ from utils.media_processor.video import VideoProcessor -def create_single_interpolated_clip(timing_uuid, quality, settings={}, variant_count=1): +def create_single_interpolated_clip(shot_uuid, quality, settings={}, variant_count=1): ''' - this includes all the animation styles [direct morphing, interpolation, image to video] - this stores the newly created video in the interpolated_clip_list and promotes them to @@ -31,33 +31,21 @@ def create_single_interpolated_clip(timing_uuid, quality, settings={}, variant_c from shared.constants import QUEUE_INFERENCE_QUERIES data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - next_timing: InternalFrameTimingObject = data_repo.get_next_timing(timing_uuid) - prev_timing: InternalFrameTimingObject = data_repo.get_prev_timing(timing_uuid) - - if not next_timing: - st.error('This is the last image. Please select images having both prev & next images') - time.sleep(0.5) - return None - - if not prev_timing: - st.error('This is the first image. Please select images having both prev & next images') - time.sleep(0.5) - return None + shot = data_repo.get_shot_from_uuid(shot_uuid) + timing_list = data_repo.get_timing_list_from_shot(shot_uuid) if quality == 'full': - interpolation_steps = VideoInterpolator.calculate_dynamic_interpolations_steps(timing.clip_duration) + interpolation_steps = VideoInterpolator.calculate_dynamic_interpolations_steps(shot.duration) elif quality == 'preview': interpolation_steps = 3 - timing.interpolated_steps = interpolation_steps - img_list = [prev_timing.primary_image.location, timing.primary_image.location, next_timing.primary_image.location] - settings.update(interpolation_steps=timing.interpolation_steps) + img_list = [t.primary_image for t in timing_list] + settings.update(interpolation_steps=interpolation_steps) # res is an array of tuples (video_bytes, log) res = VideoInterpolator.create_interpolated_clip( img_list, - timing.animation_style, + shot.animation_style, # TODO: fix this settings, variant_count, QUEUE_INFERENCE_QUERIES @@ -69,7 +57,7 @@ def create_single_interpolated_clip(timing_uuid, quality, settings={}, variant_c "output": output, "log_uuid": log.uuid, "settings": settings, - "timing_uuid": timing_uuid + "shot_uuid": shot_uuid } process_inference_output(**inference_data) diff --git a/ui_components/models.py b/ui_components/models.py index ba6e5e7c..14d38ad9 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -115,11 +115,6 @@ def __init__(self, **kwargs): **kwargs["project"]) if 'project' in kwargs and kwargs["project"] else None self.source_image = InternalFileObject( **kwargs["source_image"]) if 'source_image' in kwargs and kwargs["source_image"] else None - self.interpolated_clip_list = [InternalFileObject(**file) for file in kwargs["interpolated_clip_list"]] \ - if 'interpolated_clip_list' in kwargs and kwargs["interpolated_clip_list"] else [] - self.timed_clip = InternalFileObject( - **kwargs["timed_clip"]) if 'timed_clip' in kwargs and kwargs["timed_clip"] else None - self.preview_video = InternalFileObject(**kwargs['preview_video']) if 'preview_video' in kwargs and kwargs['preview_video'] else None self.mask = InternalFileObject( **kwargs["mask"]) if 'mask' in kwargs and kwargs["mask"] else None self.canny_image = InternalFileObject( @@ -168,17 +163,7 @@ def primary_variant_index(self): idx += 1 return -1 - - @property - def primary_interpolated_video_index(self): - if not (self.interpolated_clip_list and len(self.interpolated_clip_list)) or not self.timed_clip: - return -1 - - for idx, img in enumerate(self.interpolated_clip_list): - if img.uuid == self.timed_clip.uuid: - return idx - - return -1 + class InternalShotObject: def __init__(self, **kwargs): @@ -199,6 +184,17 @@ def __init__(self, **kwargs): @property def meta_data_dict(self): return json.loads(self.meta_data) if self.meta_data else {} + + @property + def primary_interpolated_video_index(self): + if not (self.interpolated_clip_list and len(self.interpolated_clip_list)) or not self.main_clip: + return -1 + + for idx, img in enumerate(self.interpolated_clip_list): + if img.uuid == self.main_clip.uuid: + return idx + + return -1 class InternalAppSettingObject: diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 6122bb0e..1a0a9990 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -6,14 +6,13 @@ from utils.data_repo.data_repo import DataRepo from utils.ml_processor.motion_module import AnimateDiffCheckpoint -def animation_style_element(timing_uuid, shot_uuid): +def animation_style_element(shot_uuid): motion_modules = AnimateDiffCheckpoint.get_name_list() data_repo = DataRepo() shot = data_repo.get_shot_from_uuid(shot_uuid) project_settings = data_repo.get_project_setting(shot.project.uuid) - timing = data_repo.get_timing_from_uuid(timing_uuid) - current_animation_style = timing.animation_style variant_count = 1 + current_animation_style = AnimationStyleType.INTERPOLATION.value # setting a default value if current_animation_style == AnimationStyleType.INTERPOLATION.value: animation_tool = st.radio("Animation Tool:", options=AnimationToolType.value_list(), key="animation_tool", horizontal=True) @@ -22,7 +21,7 @@ def animation_style_element(timing_uuid, shot_uuid): settings = { "animation_tool": animation_tool } - timing.animation_tool = animation_tool + if animation_tool == AnimationToolType.ANIMATEDIFF.value: c1, c2 = st.columns([1,1]) with c1: @@ -114,7 +113,7 @@ def animation_style_element(timing_uuid, shot_uuid): vid_quality = "full" if video_resolution == "Full Resolution" else "preview" st.write("Generating animation clip...") create_single_interpolated_clip( - timing_uuid, + shot_uuid, vid_quality, settings, variant_count diff --git a/ui_components/widgets/frame_clip_generation_elements.py b/ui_components/widgets/frame_clip_generation_elements.py deleted file mode 100644 index f6001763..00000000 --- a/ui_components/widgets/frame_clip_generation_elements.py +++ /dev/null @@ -1,75 +0,0 @@ -import streamlit as st -from shared.constants import AnimationStyleType -from ui_components.methods.video_methods import create_single_interpolated_clip, update_speed_of_video_clip -from ui_components.models import InternalFrameTimingObject -from utils.data_repo.data_repo import DataRepo -from utils.media_processor.interpolator import VideoInterpolator - - -# get audio_bytes of correct duration for a given frame -def current_individual_clip_element(timing_uuid): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - idx = timing.aux_frame_index - - st.info(f"Individual Clip for #{idx+1}:") - if timing.timed_clip: - st.video(timing.timed_clip.location) - - if timing.interpolation_steps is not None: - if VideoInterpolator.calculate_dynamic_interpolations_steps(timing.clip_duration) > timing.interpolation_steps: - st.error("Low Resolution") - if st.button("Generate Full Resolution Clip", key=f"generate_full_resolution_video_{idx}"): - create_single_interpolated_clip(timing.uuid, 'full') - st.rerun() - else: - st.success("Full Resolution") - else: - st.error(''' - **----------------------------------------** - - --------- - - ================== - - **No Individual Clip Created Yet** - - ================== - - --------- - - **----------------------------------------** - - - ''') - gen1, gen2 = st.columns([1, 1]) - - with gen1: - if st.button("Generate Low-Resolution Clip", key=f"generate_preview_video_{idx}"): - create_single_interpolated_clip(timing.uuid, 'preview') - st.rerun() - with gen2: - if st.button("Generate Full Resolution Clip", key=f"generate_full_resolution_video_{idx}"): - create_single_interpolated_clip(timing.uuid, 'full') - st.rerun() - - -def update_animation_style_element(timing_uuid, horizontal=True): - data_repo = DataRepo() - timing = data_repo.get_timing_from_uuid(timing_uuid) - idx = timing.aux_frame_index - - animation_styles = AnimationStyleType.value_list() - - if f"animation_style_index_{idx}" not in st.session_state: - st.session_state[f"animation_style_index_{idx}"] = animation_styles.index( - timing.animation_style) - st.session_state[f"animation_style_{idx}"] = timing.animation_style - - st.session_state[f"animation_style_{idx}"] = st.radio( - "Animation style:", animation_styles, index=st.session_state[f"animation_style_index_{idx}"], key=f"animation_style_radio_{idx}", help="This is for the morph from the current frame to the next one.", horizontal=horizontal) - - if st.session_state[f"animation_style_{idx}"] != timing.animation_style: - st.session_state[f"animation_style_index_{idx}"] = animation_styles.index(st.session_state[f"animation_style_{idx}"]) - timing.animation_style = st.session_state[f"animation_style_{idx}"] - st.rerun() \ No newline at end of file diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index 1ac284a5..2553e727 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -43,7 +43,8 @@ def timeline_view_buttons(i, j, timing_list, shift_frames_setting, time_setter_t def timeline_view(shot_uuid, stage): data_repo = DataRepo() - timing_list = data_repo.get_timing_list_from_shot(shot_uuid) + shot = data_repo.get_shot_from_uuid(shot_uuid) + shot_list = data_repo.get_shot_list(shot.project.uuid) st.markdown("***") @@ -77,7 +78,7 @@ def timeline_view(shot_uuid, stage): st.markdown("***") - total_count = len(timing_list) + total_count = len(shot_list) for i in range(0, total_count, items_per_row): # Step of items_per_row for grid grid = st.columns(items_per_row) # Create items_per_row columns for grid for j in range(items_per_row): @@ -85,13 +86,13 @@ def timeline_view(shot_uuid, stage): with grid[j]: display_number = i + j + 1 if stage == 'Key Frames': - display_image(timing_uuid=timing_list[i + j].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) + display_image(timing_uuid=shot_list[i + j].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) elif stage == 'Videos': - if timing_list[i + j].timed_clip: - st.video(timing_list[i + j].timed_clip.location) + if shot_list[i + j].main_clip: + st.video(shot_list[i + j].main_clip.location) else: st.error("No video found for this frame.") with st.expander(f'Frame #{display_number}', True): - timeline_view_buttons(i, j, timing_list, shift_frames_setting, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle, project_uuid) + timeline_view_buttons(i, j, shot_list, shift_frames_setting, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle, project_uuid) diff --git a/ui_components/widgets/variant_comparison_grid.py b/ui_components/widgets/variant_comparison_grid.py index 88d349ab..b087b0d1 100644 --- a/ui_components/widgets/variant_comparison_grid.py +++ b/ui_components/widgets/variant_comparison_grid.py @@ -4,11 +4,22 @@ from utils.data_repo.data_repo import DataRepo -def variant_comparison_grid(timing_uuid, stage=CreativeProcessType.MOTION.value): +def variant_comparison_grid(ele_uuid, stage=CreativeProcessType.MOTION.value): + ''' + UI element which compares different variant of images/videos. For images ele_uuid has to be timing_uuid + and for videos it has to be shot_uuid. + ''' data_repo = DataRepo() - timing = data_repo.get_timing_from_uuid(timing_uuid) - variants = timing.interpolated_clip_list if stage == CreativeProcessType.MOTION.value else timing.alternative_images_list + timing_uuid, shot_uuid = None, None + if stage == CreativeProcessType.MOTION.value: + shot_uuid = ele_uuid + shot = data_repo.get_shot_from_uuid(shot_uuid) + variants = shot.interpolated_clip_list + else: + timing_uuid = ele_uuid + timing = data_repo.get_timing_from_uuid(timing_uuid) + variants = timing.alternative_images_list st.markdown("***") From f4a6f7dc46896914137ae8b8939b45bfdb85c1bd Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 28 Oct 2023 12:06:43 +0530 Subject: [PATCH 122/164] wip: project_id removed from timing --- backend/db_repo.py | 16 ++---- backend/models.py | 7 +-- backend/serializers/dao.py | 1 - backend/serializers/dto.py | 3 - banodoco_settings.py | 3 +- .../components/project_settings_page.py | 57 ------------------- ui_components/methods/common_methods.py | 14 ++--- ui_components/methods/ml_methods.py | 6 +- ui_components/methods/video_methods.py | 4 +- ui_components/models.py | 2 - ui_components/widgets/attach_audio_element.py | 2 - ui_components/widgets/cropping_element.py | 10 ++-- ui_components/widgets/drawing_element.py | 2 +- .../widgets/frame_movement_widgets.py | 4 +- ui_components/widgets/image_zoom_widgets.py | 2 +- ui_components/widgets/inpainting_element.py | 4 +- ui_components/widgets/styling_element.py | 4 +- utils/cache/cache_methods.py | 6 +- 18 files changed, 37 insertions(+), 110 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index 7e42f6a1..9a9619ee 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -15,7 +15,7 @@ import subprocess from typing import List import uuid -from shared.constants import Colors, InternalFileType, SortOrder +from shared.constants import InternalFileType, SortOrder from backend.serializers.dto import AIModelDto, AppSettingDto, BackupDto, BackupListDto, InferenceLogDto, InternalFileDto, ProjectDto, SettingDto, ShotDto, TimingDto, UserDto from shared.constants import AUTOMATIC_FILE_HOSTING, LOCAL_DATABASE_NAME, SERVER, ServerType @@ -768,7 +768,8 @@ def get_timing_list_from_project(self, project_uuid=None): if not project: return InternalResponse({}, 'invalid project', False) - timing_list = Timing.objects.filter(project_id=project.id, is_disabled=False).order_by('aux_frame_index').all() + shot_list = Shot.objects.filter(project_id=project.id, is_disabled=False).all() + timing_list = Timing.objects.filter(shot_id__in=[s.id for s in shot_list], is_disabled=False).order_by('aux_frame_index').all() else: timing_list = Timing.objects.filter(is_disabled=False).order_by('aux_frame_index').all() @@ -798,14 +799,6 @@ def create_timing(self, **kwargs): print(attributes.data) - if 'project_id' in attributes.data and attributes.data['project_id']: - project = Project.objects.filter(uuid=attributes.data['project_id'], is_disabled=False).first() - if not project: - return InternalResponse({}, 'invalid project', False) - - print(attributes.data) - attributes._data['project_id'] = project.id - if 'shot_id' in attributes.data and attributes.data['shot_id']: shot = Shot.objects.filter(uuid=attributes.data['shot_id'], is_disabled=False).first() if not shot: @@ -876,7 +869,8 @@ def remove_existing_timing(self, project_uuid): project: Project = Project.objects.filter(is_disabled=False).first() if project: - Timing.objects.filter(project_id=project.id, is_disabled=False).update(is_disabled=True) + shot_list = Shot.objects.filter(project_id=project.id, is_disabled=False).all() + Timing.objects.filter(shot_id__in=[s.id for s in shot_list], is_disabled=False).update(is_disabled=True) return InternalResponse({}, 'timing removed successfully', True) diff --git a/backend/models.py b/backend/models.py index 8ae95ada..387f55f7 100644 --- a/backend/models.py +++ b/backend/models.py @@ -213,7 +213,6 @@ def save(self, *args, **kwargs): class Timing(BaseModel): - project = models.ForeignKey(Project, on_delete=models.CASCADE, null=True) model = models.ForeignKey(AIModel, on_delete=models.DO_NOTHING, null=True) source_image = models.ForeignKey(InternalFileObject, related_name="source_image", on_delete=models.DO_NOTHING, null=True) mask = models.ForeignKey(InternalFileObject, related_name="mask", on_delete=models.DO_NOTHING, null=True) @@ -298,15 +297,15 @@ def primary_variant_location(self): return "" - # gives the next entry in the project timings + # gives the next entry in the shot timings @property def next_timing(self): - next_timing = Timing.objects.filter(project=self.project, id__gt=self.id, is_disabled=False).order_by('id').first() + next_timing = Timing.objects.filter(shot=self.shot, id__gt=self.id, is_disabled=False).order_by('id').first() return next_timing @property def prev_timing(self): - prev_timing = Timing.objects.filter(project=self.project, id__lt=self.id, is_disabled=False).order_by('id').first() + prev_timing = Timing.objects.filter(shot=self.shot, id__lt=self.id, is_disabled=False).order_by('id').first() return prev_timing diff --git a/backend/serializers/dao.py b/backend/serializers/dao.py index 4a19028b..13419f05 100644 --- a/backend/serializers/dao.py +++ b/backend/serializers/dao.py @@ -75,7 +75,6 @@ class CreateAIModelParamMapDao(serializers.Serializer): class CreateTimingDao(serializers.Serializer): - project_id = serializers.CharField(max_length=100) model_id = serializers.CharField(max_length=100, required=False) source_image_id = serializers.CharField(max_length=100, required=False) mask_id = serializers.CharField(max_length=100, required=False) diff --git a/backend/serializers/dto.py b/backend/serializers/dto.py index 7883f4ca..c0cd0fbc 100644 --- a/backend/serializers/dto.py +++ b/backend/serializers/dto.py @@ -75,7 +75,6 @@ class Meta: class TimingDto(serializers.ModelSerializer): - project = ProjectDto() model = AIModelDto() source_image = InternalFileDto() mask = InternalFileDto() @@ -86,7 +85,6 @@ class Meta: model = Timing fields = ( "uuid", - "project", "model", "source_image", "mask", @@ -114,7 +112,6 @@ class Meta: ) - class AppSettingDto(serializers.ModelSerializer): user = UserDto() diff --git a/banodoco_settings.py b/banodoco_settings.py index eb4f66ba..f9f36749 100644 --- a/banodoco_settings.py +++ b/banodoco_settings.py @@ -119,7 +119,6 @@ def create_new_project(user: InternalUserObject, project_name: str, width=512, h source_image = data_repo.create_file(**file_data) timing_data = { - "project_id": project.uuid, "frame_time": 0.0, "animation_style": animation_style, "aux_frame_index": 0, @@ -156,7 +155,7 @@ def create_new_project(user: InternalUserObject, project_name: str, width=512, h "default_high_threshold" : 100 } - project_setting = data_repo.create_project_setting(**project_setting_data) + _ = data_repo.create_project_setting(**project_setting_data) create_working_assets(project.uuid) diff --git a/ui_components/components/project_settings_page.py b/ui_components/components/project_settings_page.py index 40cc898c..a842db9d 100644 --- a/ui_components/components/project_settings_page.py +++ b/ui_components/components/project_settings_page.py @@ -10,66 +10,9 @@ def project_settings_page(project_uuid): data_repo = DataRepo() - project = data_repo.get_project_from_uuid(project_uuid) project_settings = data_repo.get_project_setting(project_uuid) - # make a list of all the files in videos/{project_name}/assets/resources/music - - project_name = project.name attach_audio_element(project_uuid, True) - # with st.expander("Version History"): - # version_name = st.text_input( - # "What would you like to call this version?", key="version_name") - # version_name = version_name.replace(" ", "_") - - # if st.button("Make a copy of this project", key="copy_project"): - # # shutil.copyfile(f"videos/{project_name}/timings.csv", f"videos/{project_name}/timings_{version_name}.csv") - # data_repo.create_backup(project_uuid, version_name) - # st.success("Project backed up successfully!") - - # # list all the .csv files in that folder starting with timings_ - # # version_list = [list_of_files for list_of_files in os.listdir( - # # "videos/" + project_name) if list_of_files.startswith('timings_')] - # version_list = data_repo.get_backup_list(project_uuid) - - # header1, header2, header3, header4 = st.columns([1, 1, 1, 1]) - - # with header1: - # st.markdown("### Version Name") - # with header2: - # st.markdown("### Created On") - # with header3: - # st.markdown("### Restore Version") - # with header4: - # st.markdown("### Options") - - # for backup in version_list: - # col1, col2, col3, col4 = st.columns([1, 1, 1, 1]) - - # with col1: - # st.write(backup.name) - # with col2: - # st.write(backup.created_on.strftime("%Y-%m-%d %H:%M:%S")) - # with col3: - # if st.button("Restore this version", key=f"restore_version_{backup.name}"): - # # change timings.csv to last_timings.csv - # # os.rename(f"videos/{project_name}/timings.csv", - # # f"videos/{project_name}/timings_previous.csv") - # # rename i to timings.csv - # # make this copy the file instead using shutil os.rename(f"videos/{project_name}/{i}", f"videos/{project_name}/timings.csv") - # # shutil.copyfile( - # # f"videos/{project_name}/{i}", f"videos/{project_name}/timings.csv") - # data_repo.restore_backup(backup.uuid) - # st.success( - # "Version restored successfully! Just in case, the previous version has been saved as last_timings.csv") - # time.sleep(2) - # st.rerun() - # with col4: - # if st.button("Delete this version", key=f"delete_version_{backup.name}"): - # data_repo.delete_backup(backup.uuid) - # st.success("backup deleted successfully!") - # st.rerun() - with st.expander("Frame Size", expanded=True): st.write("Current Size = ", project_settings.width, "x", project_settings.height) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 281f3238..06481938 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -283,7 +283,7 @@ def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): # promoting variant variant_to_promote = timing.alternative_images_list[variant_to_promote_frame_number] data_repo.update_specific_timing(timing_uuid, primary_image_id=variant_to_promote.uuid) - _ = data_repo.get_timing_list_from_shot(timing.project.uuid) + _ = data_repo.get_timing_list_from_shot(timing.shot.uuid) def promote_video_variant(shot_uuid, variant_uuid): @@ -373,7 +373,7 @@ def create_or_update_mask(timing_uuid, image) -> InternalFileObject: timing = data_repo.get_timing_from_uuid(timing_uuid) unique_file_name = str(uuid.uuid4()) + ".png" - file_location = f"videos/{timing.project.uuid}/assets/resources/masks/{unique_file_name}" + file_location = f"videos/{timing.shot.project.uuid}/assets/resources/masks/{unique_file_name}" hosted_url = save_or_host_file(image, file_location) # if mask is not present than creating a new one @@ -509,7 +509,7 @@ def get_audio_bytes_for_slice(timing_uuid): timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) project_settings: InternalSettingObject = data_repo.get_project_setting( - timing.project.uuid) + timing.shot.project.uuid) # TODO: add null check for the audio audio = AudioSegment.from_file(project_settings.audio.local_path) @@ -570,7 +570,7 @@ def execute_image_edit(type_of_mask_selection, type_of_mask_replacement, data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) - project = timing.project + project = timing.shot.project inference_log = None if type_of_mask_selection == "Automated Background Selection": @@ -765,12 +765,12 @@ def process_inference_output(**kwargs): if 'normalise_speed' in settings and settings['normalise_speed']: output = VideoProcessor.update_video_bytes_speed(output, AnimationStyleType.INTERPOLATION.value, timing.clip_duration) - video_location = "videos/" + str(timing.project.uuid) + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" + video_location = "videos/" + str(timing.shot.project.uuid) + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" video = convert_bytes_to_file( file_location_to_save=video_location, mime_type="video/mp4", file_bytes=output, - project_uuid=timing.project.uuid, + project_uuid=timing.shot.project.uuid, inference_log_id=log_uuid ) @@ -822,7 +822,7 @@ def process_inference_output(**kwargs): type=InternalFileType.IMAGE.value, hosted_url=output[0] if isinstance(output, list) else output, inference_log_id=str(log_uuid), - project_id=timing.project.uuid + project_id=timing.shot.project.uuid ) if stage == WorkflowStageType.SOURCE.value: diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index 8b9424d5..5e4c938c 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -23,7 +23,7 @@ def trigger_restyling_process(timing_uuid, update_inference_settings, \ data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - project_settings: InternalSettingObject = data_repo.get_project_setting(timing.project.uuid) + project_settings: InternalSettingObject = data_repo.get_project_setting(timing.shot.project.uuid) source_image = timing.source_image if transformation_stage == ImageStage.SOURCE_IMAGE.value else \ timing.primary_image @@ -41,7 +41,7 @@ def trigger_restyling_process(timing_uuid, update_inference_settings, \ prompt = prompt.replace(",", ".") prompt = prompt.replace("\n", "") data_repo.update_project_setting( - timing.project.uuid, + timing.shot.project.uuid, default_prompt=prompt, default_strength=query_obj.strength, default_model_id=query_obj.model_uuid, @@ -198,7 +198,7 @@ def inpainting(input_image: str, prompt, negative_prompt, timing_uuid, mask_in_p if mask_in_project == False: mask = timing.mask.location else: - mask = timing.project.get_temp_mask_file(TEMP_MASK_FILE).location + mask = timing.shot.project.get_temp_mask_file(TEMP_MASK_FILE).location if not mask.startswith("http"): mask = open(mask, "rb") diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index e57119a2..8081d30e 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -80,7 +80,7 @@ def update_speed_of_video_clip(video_file: InternalFileObject, timing_uuid) -> I location_of_video = temp_video_file.name if temp_video_file else video_file.local_path new_file_name = ''.join(random.choices(string.ascii_lowercase + string.digits, k=16)) + ".mp4" - new_file_location = "videos/" + str(timing.project.uuid) + "/assets/videos/1_final/" + str(new_file_name) + new_file_location = "videos/" + str(timing.shot.project.uuid) + "/assets/videos/1_final/" + str(new_file_name) video_bytes = VideoProcessor.update_video_speed( location_of_video, @@ -92,7 +92,7 @@ def update_speed_of_video_clip(video_file: InternalFileObject, timing_uuid) -> I new_file_location, "video/mp4", video_bytes, - timing.project.uuid + timing.shot.project.uuid ) if temp_video_file: diff --git a/ui_components/models.py b/ui_components/models.py index 14d38ad9..6c8b1a25 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -111,8 +111,6 @@ def _get_training_image_list(self, training_image_list): class InternalFrameTimingObject: def __init__(self, **kwargs): self.uuid = kwargs['uuid'] if 'uuid' in kwargs else None - self.project = InternalProjectObject( - **kwargs["project"]) if 'project' in kwargs and kwargs["project"] else None self.source_image = InternalFileObject( **kwargs["source_image"]) if 'source_image' in kwargs and kwargs["source_image"] else None self.mask = InternalFileObject( diff --git a/ui_components/widgets/attach_audio_element.py b/ui_components/widgets/attach_audio_element.py index deb13664..56b186ac 100644 --- a/ui_components/widgets/attach_audio_element.py +++ b/ui_components/widgets/attach_audio_element.py @@ -6,8 +6,6 @@ def attach_audio_element(project_uuid, expanded): data_repo = DataRepo() - project: InternalProjectObject = data_repo.get_project_from_uuid( - uuid=project_uuid) project_setting: InternalSettingObject = data_repo.get_project_setting(project_uuid) with st.expander("Audio", expanded=expanded): diff --git a/ui_components/widgets/cropping_element.py b/ui_components/widgets/cropping_element.py index 495a7352..6226dac5 100644 --- a/ui_components/widgets/cropping_element.py +++ b/ui_components/widgets/cropping_element.py @@ -20,7 +20,7 @@ def cropping_selector_element(project_uuid): - selector1, selector2, selector3 = st.columns([1, 1, 1]) + selector1, selector2, _ = st.columns([1, 1, 1]) with selector1: which_stage = st_memory.radio("Which stage to work on?", ["Styled Key Frame", "Unedited Key Frame"], key="which_stage", horizontal=True) with selector2: @@ -86,7 +86,7 @@ def manual_cropping_element(stage, timing_uuid): data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) - project_uuid = timing.project.uuid + project_uuid = timing.shot.project.uuid if not timing.source_image: st.error("Please select a source image before cropping") @@ -115,7 +115,7 @@ def get_working_image(): get_working_image() st.rerun() - options1, options2, option3, option4 = st.columns([3, 1, 1, 1]) + options1, _, _, _ = st.columns([3, 1, 1, 1]) with options1: sub_options_1, sub_options_2 = st.columns(2) if 'degrees_rotated_to' not in st.session_state: @@ -140,7 +140,7 @@ def get_working_image(): st.rerun() project_settings: InternalProjectObject = data_repo.get_project_setting( - timing.project.uuid) + timing.shot.project.uuid) width = project_settings.width height = project_settings.height @@ -193,4 +193,4 @@ def get_working_image(): st.warning("Warning: This will overwrite the original image") inpaint_in_black_space_element( - cropped_img, timing.project.uuid, stage=stage) + cropped_img, timing.shot.project.uuid, stage=stage) diff --git a/ui_components/widgets/drawing_element.py b/ui_components/widgets/drawing_element.py index 32acad06..b8cc8832 100644 --- a/ui_components/widgets/drawing_element.py +++ b/ui_components/widgets/drawing_element.py @@ -225,7 +225,7 @@ def drawing_element(timing_details,project_settings,project_uuid,stage=WorkflowS "RGB") unique_file_name = str(uuid.uuid4()) + ".png" - file_location = f"videos/{timing.project.uuid}/assets/resources/masks/{unique_file_name}" + file_location = f"videos/{timing.shot.project.uuid}/assets/resources/masks/{unique_file_name}" hosted_url = save_or_host_file(new_canny_image, file_location) file_data = { "name": str(uuid.uuid4()) + ".png", diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index 718776fb..dc75d513 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -151,7 +151,7 @@ def replace_image_widget(timing_uuid, stage, options=["Uploaded Frame","Other Fr if st.button("Upload Source Image"): if uploaded_file: timing = data_repo.get_timing_from_uuid(timing.uuid) - if save_and_promote_image(uploaded_file, timing.project.uuid, timing.uuid, "source"): + if save_and_promote_image(uploaded_file, timing.shot.project.uuid, timing.uuid, "source"): time.sleep(1.5) st.rerun() else: @@ -161,7 +161,7 @@ def replace_image_widget(timing_uuid, stage, options=["Uploaded Frame","Other Fr if st.button("Replace frame", disabled=False): timing = data_repo.get_timing_from_uuid(timing.uuid) if replacement_frame: - save_and_promote_image(replacement_frame, timing.project.uuid, timing.uuid, "styled") + save_and_promote_image(replacement_frame, timing.shot.project.uuid, timing.uuid, "styled") st.success("Replaced") time.sleep(1) st.rerun() diff --git a/ui_components/widgets/image_zoom_widgets.py b/ui_components/widgets/image_zoom_widgets.py index 54cf2fec..392570d2 100644 --- a/ui_components/widgets/image_zoom_widgets.py +++ b/ui_components/widgets/image_zoom_widgets.py @@ -35,7 +35,7 @@ def zoom_inputs(position='in-frame', horizontal=False): def save_zoomed_image(image, timing_uuid, stage, promote=False): data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) - project_uuid = timing.project.uuid + project_uuid = timing.shot.project.uuid file_name = str(uuid.uuid4()) + ".png" diff --git a/ui_components/widgets/inpainting_element.py b/ui_components/widgets/inpainting_element.py index f0cd962b..ba01980f 100644 --- a/ui_components/widgets/inpainting_element.py +++ b/ui_components/widgets/inpainting_element.py @@ -34,7 +34,7 @@ def inpainting_element(timing_uuid): timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_shot( timing.shot.uuid) project_settings: InternalSettingObject = data_repo.get_project_setting( - timing.project.uuid) + timing.shot.project.uuid) if "type_of_mask_replacement" not in st.session_state: st.session_state["type_of_mask_replacement"] = "Replace With Image" @@ -214,7 +214,7 @@ def inpainting_element(timing_uuid): if st.session_state["type_of_mask_replacement"] == "Replace With Image": data_repo = DataRepo() - project: InternalProjectObject = data_repo.get_project_from_uuid(timing.project.uuid) + project: InternalProjectObject = data_repo.get_project_from_uuid(timing.shot.project.uuid) prompt = "" negative_prompt = "" diff --git a/ui_components/widgets/styling_element.py b/ui_components/widgets/styling_element.py index 257acba1..129900c1 100644 --- a/ui_components/widgets/styling_element.py +++ b/ui_components/widgets/styling_element.py @@ -13,14 +13,14 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) timing_list: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_shot(timing.shot.uuid) - project_settings: InternalSettingObject = data_repo.get_project_setting(timing.project.uuid) + project_settings: InternalSettingObject = data_repo.get_project_setting(timing.shot.project.uuid) # -------------------- Transfomation Stage -------------------- # stages = ImageStage.value_list() if view_type == ViewType.SINGLE.value: append_to_item_name = f"{timing_uuid}" elif view_type == ViewType.LIST.value: - append_to_item_name = str(timing.project.uuid) + append_to_item_name = str(timing.shot.project.uuid) st.markdown("## Batch queries") if project_settings.default_stage: diff --git a/utils/cache/cache_methods.py b/utils/cache/cache_methods.py index c7396a07..60663969 100644 --- a/utils/cache/cache_methods.py +++ b/utils/cache/cache_methods.py @@ -262,7 +262,7 @@ def _cache_get_timing_list_from_project(self, *args, **kwargs): if timing_list and len(timing_list) and len(args) > 0: project_specific_list = [] for timing in timing_list: - if timing.project.uuid == args[0]: + if timing.shot.project.uuid == args[0]: project_specific_list.append(timing) # if there are any timings for the project, return them @@ -301,9 +301,9 @@ def _cache_update_specific_timing(self, *args, **kwargs): # updating the timing list timing_func = getattr(cls, '_original_get_timing_from_uuid') timing = timing_func(self, args[0]) - if timing and timing.project: + if timing and timing.shot.project: original_func = getattr(cls, '_original_get_timing_list_from_project') - timing_list = original_func(self, timing.project.uuid) + timing_list = original_func(self, timing.shot.project.uuid) if timing_list and len(timing_list): StCache.add_all(timing_list, CacheKey.TIMING_DETAILS.value) From 833670d5b9695568e04ecff87f0272e2ac2b40ed Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 28 Oct 2023 19:50:48 +0530 Subject: [PATCH 123/164] wip: animation_style removed --- backend/db_repo.py | 1 - backend/models.py | 2 - backend/serializers/dao.py | 2 - backend/serializers/dto.py | 2 - banodoco_settings.py | 1 - ui_components/methods/common_methods.py | 1 - ui_components/methods/video_methods.py | 11 +--- .../widgets/add_key_frame_element.py | 2 - .../widgets/animation_style_element.py | 1 + utils/media_processor/video.py | 57 +++++++++---------- 10 files changed, 32 insertions(+), 48 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index 9a9619ee..f652d1e6 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -1412,7 +1412,6 @@ def restore_backup(self, backup_uuid: str): adapter_type=backup_timing['adapter_type'], clip_duration=backup_timing['clip_duration'], animation_style=backup_timing['animation_style'], - interpolation_steps=backup_timing['interpolation_steps'], low_threshold=backup_timing['low_threshold'], high_threshold=backup_timing['high_threshold'], aux_frame_index=backup_timing['aux_frame_index'] diff --git a/backend/models.py b/backend/models.py index 387f55f7..bf8d355b 100644 --- a/backend/models.py +++ b/backend/models.py @@ -231,8 +231,6 @@ class Timing(BaseModel): notes = models.TextField(default="", blank=True) adapter_type = models.CharField(max_length=255, default=None, null=True, blank=True) clip_duration = models.FloatField(default=None, null=True) # clip duration of the timed_clip - animation_style = models.CharField(max_length=255, default=None, null=True) - interpolation_steps = models.IntegerField(default=0) low_threshold = models.FloatField(default=0) high_threshold = models.FloatField(default=0) aux_frame_index = models.IntegerField(default=0) # starts with 0 # TODO: udpate this diff --git a/backend/serializers/dao.py b/backend/serializers/dao.py index 13419f05..3673cd85 100644 --- a/backend/serializers/dao.py +++ b/backend/serializers/dao.py @@ -93,8 +93,6 @@ class CreateTimingDao(serializers.Serializer): notes = serializers.CharField(max_length=1024, required=False) adapter_type = serializers.CharField(max_length=255, required=False) clip_duration = serializers.FloatField(default=0, required=False) - animation_style = serializers.CharField(max_length=100, default=AnimationStyleType.INTERPOLATION.value, required=False) - interpolation_steps = serializers.IntegerField(required=False) low_threshold = serializers.FloatField(default=100, required=False) high_threshold = serializers.FloatField(default=200, required=False) aux_frame_index = serializers.IntegerField(required=False) diff --git a/backend/serializers/dto.py b/backend/serializers/dto.py index c0cd0fbc..565b4dbd 100644 --- a/backend/serializers/dto.py +++ b/backend/serializers/dto.py @@ -102,8 +102,6 @@ class Meta: "notes", "adapter_type", "clip_duration", - "animation_style", - "interpolation_steps", "low_threshold", "high_threshold", "aux_frame_index", diff --git a/banodoco_settings.py b/banodoco_settings.py index f9f36749..a7c810b1 100644 --- a/banodoco_settings.py +++ b/banodoco_settings.py @@ -120,7 +120,6 @@ def create_new_project(user: InternalUserObject, project_name: str, width=512, h timing_data = { "frame_time": 0.0, - "animation_style": animation_style, "aux_frame_index": 0, "source_image_id": source_image.uuid, "shot_id": shot.uuid, diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 06481938..b9ee1f28 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -312,7 +312,6 @@ def promote_video_variant(shot_uuid, variant_uuid): if video.duration != shot.duration: video_bytes = VideoProcessor.update_video_speed( variant_to_promote.location, - shot.animation_style, shot.duration ) diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index 8081d30e..cc92a2fc 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -6,15 +6,12 @@ from typing import List import ffmpeg import streamlit as st -import uuid -from moviepy.editor import concatenate_videoclips, TextClip, VideoFileClip, AudioFileClip -from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip +from moviepy.editor import concatenate_videoclips, VideoFileClip, AudioFileClip from backend.models import InternalFileObject from shared.constants import InferenceType, InternalFileTag from shared.file_upload.s3 import is_s3_image_url -from ui_components.methods.file_methods import convert_bytes_to_file -from ui_components.models import InternalFrameTimingObject, InternalSettingObject, InternalShotObject +from ui_components.models import InternalFrameTimingObject, InternalShotObject from utils.data_repo.data_repo import DataRepo from utils.media_processor.interpolator import VideoInterpolator from utils.media_processor.video import VideoProcessor @@ -45,7 +42,7 @@ def create_single_interpolated_clip(shot_uuid, quality, settings={}, variant_cou # res is an array of tuples (video_bytes, log) res = VideoInterpolator.create_interpolated_clip( img_list, - shot.animation_style, # TODO: fix this + settings['animation_style'], settings, variant_count, QUEUE_INFERENCE_QUERIES @@ -71,7 +68,6 @@ def update_speed_of_video_clip(video_file: InternalFileObject, timing_uuid) -> I timing_uuid) desired_duration = timing.clip_duration - animation_style = timing.animation_style temp_video_file = None if video_file.hosted_url and is_s3_image_url(video_file.hosted_url): @@ -84,7 +80,6 @@ def update_speed_of_video_clip(video_file: InternalFileObject, timing_uuid) -> I video_bytes = VideoProcessor.update_video_speed( location_of_video, - animation_style, desired_duration ) diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index 91022ec8..1e11234c 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -116,8 +116,6 @@ def add_key_frame(selected_image, inherit_styling_settings, target_frame_positio if inherit_styling_settings == "Yes": clone_styling_settings(index_of_current_item - 1, timing_list[index_of_current_item].uuid) - timing_list[index_of_current_item].animation_style = project_settings.default_animation_style - if len(timing_list) == 1: st.session_state['current_frame_index'] = 1 st.session_state['current_frame_uuid'] = timing_list[0].uuid diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 1a0a9990..5f6aa3d3 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -112,6 +112,7 @@ def animation_style_element(shot_uuid): if st.button("Generate Animation Clip", key="generate_animation_clip"): vid_quality = "full" if video_resolution == "Full Resolution" else "preview" st.write("Generating animation clip...") + settings.update(animation_style=current_animation_style) create_single_interpolated_clip( shot_uuid, vid_quality, diff --git a/utils/media_processor/video.py b/utils/media_processor/video.py index f1c9e2a9..6b20554c 100644 --- a/utils/media_processor/video.py +++ b/utils/media_processor/video.py @@ -7,13 +7,13 @@ class VideoProcessor: @staticmethod - def update_video_speed(video_location, animation_style, desired_duration): + def update_video_speed(video_location, desired_duration): clip = VideoFileClip(video_location) - return VideoProcessor.update_clip_speed(clip, animation_style, desired_duration) + return VideoProcessor.update_clip_speed(clip, desired_duration) @staticmethod - def update_video_bytes_speed(video_bytes, animation_style, desired_duration): + def update_video_bytes_speed(video_bytes, desired_duration): # video_io = BytesIO(video_bytes) temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4", mode='wb') with open(temp_file.name, 'wb') as out_file: @@ -21,43 +21,42 @@ def update_video_bytes_speed(video_bytes, animation_style, desired_duration): clip = VideoFileClip(temp_file.name) os.remove(temp_file.name) - return VideoProcessor.update_clip_speed(clip, animation_style, desired_duration) + return VideoProcessor.update_clip_speed(clip, desired_duration) @staticmethod - def update_clip_speed(clip: VideoFileClip, animation_style, desired_duration): + def update_clip_speed(clip: VideoFileClip, desired_duration): temp_output_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4", mode='wb') - if animation_style == AnimationStyleType.DIRECT_MORPHING.value: - clip = clip.set_fps(120) + # if animation_style == AnimationStyleType.DIRECT_MORPHING.value: + # clip = clip.set_fps(120) - # Calculate the number of frames to keep - input_duration = clip.duration - total_frames = len(list(clip.iter_frames())) - target_frames = int(total_frames * (desired_duration / input_duration)) + # # Calculate the number of frames to keep + # input_duration = clip.duration + # total_frames = len(list(clip.iter_frames())) + # target_frames = int(total_frames * (desired_duration / input_duration)) - # Determine which frames to keep - keep_every_n_frames = total_frames / target_frames - frames_to_keep = [int(i * keep_every_n_frames) - for i in range(target_frames)] + # # Determine which frames to keep + # keep_every_n_frames = total_frames / target_frames + # frames_to_keep = [int(i * keep_every_n_frames) + # for i in range(target_frames)] - # Create a new video clip with the selected frames - output_clip = concatenate_videoclips( - [clip.subclip(i/clip.fps, (i+1)/clip.fps) for i in frames_to_keep]) + # # Create a new video clip with the selected frames + # output_clip = concatenate_videoclips( + # [clip.subclip(i/clip.fps, (i+1)/clip.fps) for i in frames_to_keep]) - output_clip.write_videofile(filename=temp_output_file.name, codec="libx265") + # output_clip.write_videofile(filename=temp_output_file.name, codec="libx265") - # modifying speed for any other animation method - else: - input_video_duration = clip.duration - desired_speed_change = float( - input_video_duration) / float(desired_duration) + # changing the video speed + input_video_duration = clip.duration + desired_speed_change = float( + input_video_duration) / float(desired_duration) - print("Desired Speed Change: " + str(desired_speed_change)) + print("Desired Speed Change: " + str(desired_speed_change)) - # Apply the speed change using moviepy - output_clip = clip.fx(vfx.speedx, desired_speed_change) - - output_clip.write_videofile(filename=temp_output_file.name, codec="libx264", preset="fast") + # Apply the speed change using moviepy + output_clip = clip.fx(vfx.speedx, desired_speed_change) + + output_clip.write_videofile(filename=temp_output_file.name, codec="libx264", preset="fast") with open(temp_output_file.name, 'rb') as f: video_bytes = f.read() From 71116b31a9f8ea1871f779c10699c600ef0f55cb Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 28 Oct 2023 21:39:19 +0530 Subject: [PATCH 124/164] wip: removing redundant fields --- backend/db_repo.py | 25 ------- backend/models.py | 39 +--------- backend/serializers/dao.py | 39 +--------- backend/serializers/dto.py | 27 ------- banodoco_settings.py | 24 ++----- .../components/frame_styling_page.py | 15 ++-- ui_components/methods/ml_methods.py | 3 - ui_components/models.py | 56 ++------------- .../widgets/animation_style_element.py | 5 +- ui_components/widgets/cropping_element.py | 29 +++----- ui_components/widgets/drawing_element.py | 7 +- .../widgets/frame_movement_widgets.py | 10 +-- ui_components/widgets/inpainting_element.py | 22 +++--- ui_components/widgets/styling_element.py | 72 +++++-------------- ui_components/widgets/timeline_view.py | 9 +-- utils/common_utils.py | 2 +- utils/media_processor/interpolator.py | 4 +- 17 files changed, 80 insertions(+), 308 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index f652d1e6..b343f2b0 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -1139,13 +1139,6 @@ def create_project_setting(self, **kwargs): return InternalResponse({}, 'invalid audio', False) attributes._data["audio_id"] = audio.id - - if "input_video_id" in attributes.data and attributes.data["input_video_id"]: - video = InternalFileObject.objects.filter(uuid=attributes.data["input_video_id"], is_disabled=False).first() - if not video: - return InternalResponse({}, 'invalid video', False) - - attributes._data["input_video_id"] = video.id setting = Setting.objects.create(**attributes.data) @@ -1184,13 +1177,6 @@ def update_project_setting(self, **kwargs): return InternalResponse({}, 'invalid audio', False) attributes._data["audio_id"] = audio.id - - if "input_video_id" in attributes.data and attributes.data["input_video_id"]: - video = InternalFileObject.objects.filter(uuid=attributes.data["input_video_id"], is_disabled=False).first() - if not video: - return InternalResponse({}, 'invalid video', False) - - attributes._data["input_video_id"] = video.id if 'model_id' in attributes.data and attributes.data['model_id']: model = AIModel.objects.filter(uuid=attributes.data['model_id'], is_disabled=False).first() @@ -1242,13 +1228,6 @@ def bulk_update_project_setting(self, **kwargs): attributes._data['audio_id'] = audio.id - if 'input_video_id' in attributes.data and attributes.data['input_video_id']: - video = InternalFileObject.objects.filter(uuid=attributes.data['input_video_id'], is_disabled=False).first() - if not video: - return InternalResponse({}, 'invalid video', False) - - attributes._data['input_video_id'] = video.id - for attr, value in attributes.data.items(): setattr(setting, attr, value) setting.save() @@ -1397,22 +1376,18 @@ def restore_backup(self, backup_uuid: str): mask=backup_timing['mask_uuid'], canny_image=backup_timing['canny_image_uuid'], primary_image=backup_timing['primary_image_uuid'], - custom_model_id_list=backup_timing['custom_model_id_list'], frame_time=backup_timing['frame_time'], frame_number=backup_timing['frame_number'], alternative_images=backup_timing['alternative_images'], - custom_pipeline=backup_timing['custom_pipeline'], prompt=backup_timing['prompt'], negative_prompt=backup_timing['negative_prompt'], guidance_scale=backup_timing['guidance_scale'], seed=backup_timing['seed'], - num_inteference_steps=backup_timing['num_inteference_steps'], strength=backup_timing['strength'], notes=backup_timing['notes'], adapter_type=backup_timing['adapter_type'], clip_duration=backup_timing['clip_duration'], animation_style=backup_timing['animation_style'], - low_threshold=backup_timing['low_threshold'], high_threshold=backup_timing['high_threshold'], aux_frame_index=backup_timing['aux_frame_index'] ) diff --git a/backend/models.py b/backend/models.py index bf8d355b..5ac42964 100644 --- a/backend/models.py +++ b/backend/models.py @@ -1,4 +1,3 @@ -from django.utils import timezone from django.db import models import uuid import json @@ -218,23 +217,11 @@ class Timing(BaseModel): mask = models.ForeignKey(InternalFileObject, related_name="mask", on_delete=models.DO_NOTHING, null=True) canny_image = models.ForeignKey(InternalFileObject, related_name="canny_image", on_delete=models.DO_NOTHING, null=True) primary_image = models.ForeignKey(InternalFileObject, related_name="primary_image", on_delete=models.DO_NOTHING, null=True) # variant number that is currently selected (among alternative images) NONE if none is present - shot_id = models.ForeignKey(Shot, on_delete=models.CASCADE, null=True) - custom_model_id_list = models.TextField(default=None, null=True, blank=True) + shot_id = models.ForeignKey(Shot, on_delete=models.CASCADE, null=True) alternative_images = models.TextField(default=None, null=True) - custom_pipeline = models.CharField(max_length=255, default=None, null=True, blank=True) - prompt = models.TextField(default='', blank=True) - negative_prompt = models.TextField(default="", blank=True) - guidance_scale = models.FloatField(default=7.5) - seed = models.IntegerField(default=0) - num_inteference_steps = models.IntegerField(default=50) - strength = models.FloatField(default=1) notes = models.TextField(default="", blank=True) - adapter_type = models.CharField(max_length=255, default=None, null=True, blank=True) - clip_duration = models.FloatField(default=None, null=True) # clip duration of the timed_clip - low_threshold = models.FloatField(default=0) - high_threshold = models.FloatField(default=0) - aux_frame_index = models.IntegerField(default=0) # starts with 0 # TODO: udpate this - transformation_stage = models.CharField(max_length=255, default=None, null=True) + clip_duration = models.FloatField(default=None, null=True) + aux_frame_index = models.IntegerField(default=0) class Meta: app_label = 'backend' @@ -379,29 +366,9 @@ class Setting(BaseModel): project = models.ForeignKey(Project, on_delete=models.CASCADE) default_model = models.ForeignKey(AIModel, on_delete=models.DO_NOTHING, null=True) audio = models.ForeignKey(InternalFileObject, related_name="audio", on_delete=models.DO_NOTHING, null=True) - input_video = models.ForeignKey(InternalFileObject, related_name="input_video", on_delete=models.DO_NOTHING, null=True) - default_prompt = models.TextField(default="") - default_strength = models.FloatField(default=0.7) - default_custom_pipeline = models.CharField(max_length=255, default="", blank=True) input_type = models.CharField(max_length=255) # video, image, audio - extraction_type = models.CharField(max_length=255) # Extract manually width = models.IntegerField(default=512) height = models.IntegerField(default=512) - default_negative_prompt = models.TextField(default="") - default_guidance_scale = models.FloatField(default=7.5) - default_seed = models.IntegerField(default=0) - default_num_inference_steps = models.IntegerField(default=50) - default_stage = models.CharField(max_length=255) # extracted_key_frames - default_custom_model_uuid_list = models.TextField(default=None, null=True, blank=True) - default_adapter_type = models.CharField(max_length=255, default="", blank=True) - guidance_type = models.CharField(max_length=255) # "Drawing", "Images", "Video" - default_animation_style = models.CharField(max_length=255) # "Interpolation", "Direct Morphing" - default_low_threshold = models.FloatField(default=0) - default_high_threshold = models.FloatField(default=0) - zoom_level = models.IntegerField(default=100) - x_shift = models.IntegerField(default=0) - y_shift = models.IntegerField(default=0) - rotation_angle_value = models.FloatField(default=0.0) class Meta: app_label = 'backend' diff --git a/backend/serializers/dao.py b/backend/serializers/dao.py index 3673cd85..26ff23ee 100644 --- a/backend/serializers/dao.py +++ b/backend/serializers/dao.py @@ -79,22 +79,17 @@ class CreateTimingDao(serializers.Serializer): source_image_id = serializers.CharField(max_length=100, required=False) mask_id = serializers.CharField(max_length=100, required=False) canny_image_id = serializers.CharField(max_length=100, required=False) - custom_model_id_list = serializers.CharField(max_length=100, required=False) shot_id = serializers.CharField(max_length=100) primary_image = serializers.CharField(max_length=100, required=False) alternative_images = serializers.CharField(max_length=100, required=False) - custom_pipeline = serializers.CharField(max_length=100, required=False, allow_blank=True, default="") prompt = serializers.CharField(max_length=1024, required=False) negative_prompt = serializers.CharField(max_length=1024, required=False) guidance_scale = serializers.FloatField(required=False) seed = serializers.IntegerField(required=False) - num_inteference_steps = serializers.IntegerField(required=False) strength = serializers.FloatField(required=False) notes = serializers.CharField(max_length=1024, required=False) adapter_type = serializers.CharField(max_length=255, required=False) clip_duration = serializers.FloatField(default=0, required=False) - low_threshold = serializers.FloatField(default=100, required=False) - high_threshold = serializers.FloatField(default=200, required=False) aux_frame_index = serializers.IntegerField(required=False) @@ -120,47 +115,15 @@ class CreateSettingDao(serializers.Serializer): project_id = serializers.CharField(max_length=255) default_model_id = serializers.CharField(max_length=255, required=False) audio_id = serializers.CharField(max_length=255, required=False) - input_video_id = serializers.CharField(max_length=255, required=False) - default_prompt = serializers.CharField(max_length=255, required=False) - default_strength = serializers.FloatField(default=0.63, required=False) - default_custom_pipeline = serializers.CharField(max_length=255, default="", allow_blank=True, required=False) input_type = serializers.CharField(max_length=255, required=False) - extraction_type = serializers.CharField(max_length=255, required=False) width = serializers.IntegerField(default=512, required=False) height = serializers.IntegerField(default=512, required=False) - default_negative_prompt = serializers.CharField(max_length=1024, default="", allow_blank=True, required=False) - default_guidance_scale = serializers.FloatField(default=7.5, required=False) - default_seed = serializers.IntegerField(default=255512, required=False) - default_num_inference_steps = serializers.IntegerField(default=40, required=False) - default_stage = serializers.CharField(max_length=255) - default_custom_model_uuid_list = serializers.CharField(default="[]", max_length=1024, required=False) - default_adapter_type = serializers.CharField(default="", max_length=255, required=False) - guidance_type = serializers.CharField(default=GuidanceType.IMAGE.value, max_length=255, required=False) - default_animation_style = serializers.CharField(default="", max_length=255, required=False) - default_low_threshold = serializers.IntegerField(default=0, required=False) - default_high_threshold = serializers.IntegerField(default=100, required=False) class UpdateSettingDao(serializers.Serializer): project_id = serializers.CharField(max_length=255) default_model_id = serializers.CharField(max_length=255, required=False) audio_id = serializers.CharField(max_length=255, required=False) - input_video_id = serializers.CharField(max_length=255, required=False) - default_prompt = serializers.CharField(max_length=255, required=False) - default_strength = serializers.CharField(max_length=255, required=False) - default_custom_pipeline = serializers.CharField(max_length=255, required=False, allow_blank=True) input_type = serializers.CharField(max_length=255, required=False) - extraction_type = serializers.CharField(max_length=255, required=False) width = serializers.IntegerField(required=False) - height = serializers.IntegerField(required=False) - default_negative_prompt = serializers.CharField(max_length=1024, default="", allow_blank=True, required=False) - default_guidance_scale = serializers.FloatField(required=False) - default_seed = serializers.IntegerField(required=False) - default_num_inference_steps = serializers.IntegerField(required=False) - default_stage = serializers.CharField(max_length=255, required=False) - default_custom_model_id_list = serializers.CharField(max_length=1024, required=False) - default_adapter_type = serializers.CharField(max_length=255, required=False) - guidance_type = serializers.CharField(max_length=255, required=False) - default_animation_style = serializers.CharField(max_length=255, required=False) - default_low_threshold = serializers.IntegerField(required=False) - default_high_threshold = serializers.IntegerField(required=False) \ No newline at end of file + height = serializers.IntegerField(required=False) \ No newline at end of file diff --git a/backend/serializers/dto.py b/backend/serializers/dto.py index 565b4dbd..24ade5a0 100644 --- a/backend/serializers/dto.py +++ b/backend/serializers/dto.py @@ -89,24 +89,18 @@ class Meta: "source_image", "mask", "canny_image", - "custom_model_id_list", "primary_image", "alternative_images", - "custom_pipeline", "prompt", "negative_prompt", "guidance_scale", "seed", - "num_inteference_steps", "strength", "notes", "adapter_type", "clip_duration", - "low_threshold", - "high_threshold", "aux_frame_index", "created_on", - "transformation_stage" ) @@ -129,7 +123,6 @@ class SettingDto(serializers.ModelSerializer): project = ProjectDto() default_model = AIModelDto() audio = InternalFileDto() - input_video = InternalFileDto() class Meta: model = Setting fields = ( @@ -137,30 +130,10 @@ class Meta: "project", "default_model", "audio", - "input_video", - "default_prompt", - "default_strength", - "default_custom_pipeline", "input_type", - "extraction_type", "width", "height", - "default_negative_prompt", - "default_guidance_scale", - "default_seed", - "default_num_inference_steps", - "default_stage", - "default_custom_model_uuid_list", - "default_adapter_type", - "guidance_type", - "default_animation_style", - "default_low_threshold", - "default_high_threshold", "created_on", - "zoom_level", - "x_shift", - "y_shift", - "rotation_angle_value" ) diff --git a/banodoco_settings.py b/banodoco_settings.py index a7c810b1..826d69f1 100644 --- a/banodoco_settings.py +++ b/banodoco_settings.py @@ -3,7 +3,7 @@ import streamlit as st from PIL import Image -from shared.constants import SERVER, AIModelCategory, AIModelType, GuidanceType, InternalFileType, ServerType +from shared.constants import SERVER, AIModelCategory, GuidanceType, InternalFileType, ServerType from shared.logging.constants import LoggingType from shared.logging.logging import AppLogger from shared.constants import AnimationStyleType @@ -11,9 +11,8 @@ from ui_components.methods.file_methods import save_or_host_file from ui_components.models import InternalAppSettingObject, InternalFrameTimingObject, InternalProjectObject, InternalUserObject from utils.common_utils import create_working_assets -from utils.constants import ML_MODEL_LIST, ImageStage +from utils.constants import ML_MODEL_LIST from utils.data_repo.data_repo import DataRepo -from utils.ml_processor.replicate.constants import REPLICATE_MODEL logger = AppLogger() @@ -74,8 +73,7 @@ def create_new_user_data(user: InternalUserObject): create_new_project(user, 'my_first_project') -def create_new_project(user: InternalUserObject, project_name: str, width=512, height=512,\ - guidance_type=GuidanceType.DRAWING.value, animation_style=AnimationStyleType.INTERPOLATION.value): +def create_new_project(user: InternalUserObject, project_name: str, width=512, height=512): data_repo = DataRepo() # creating a new project for this user @@ -135,23 +133,9 @@ def create_new_project(user: InternalUserObject, project_name: str, width=512, h project_setting_data = { "project_id" : project.uuid, "input_type" : "video", - "default_strength": 1, - "extraction_type" : "Extract manually", "width" : width, "height" : height, - "default_prompt": "an oil painting", - "default_model_id": model_list[0].uuid, - "default_negative_prompt" : "", - "default_guidance_scale" : 7.5, - "default_seed" : 1234, - "default_num_inference_steps" : 30, - "default_stage" : ImageStage.SOURCE_IMAGE.value, - "default_custom_model_id_list" : "[]", - "default_adapter_type" : "N", - "guidance_type" : guidance_type, - "default_animation_style" : animation_style, - "default_low_threshold" : 50, - "default_high_threshold" : 100 + "default_model_id": model_list[0].uuid } _ = data_repo.create_project_setting(**project_setting_data) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 2ff9ae46..22344a28 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -18,7 +18,7 @@ from ui_components.widgets.variant_comparison_grid import variant_comparison_grid from utils import st_memory -from ui_components.constants import CreativeProcessType +from ui_components.constants import CreativeProcessType, DefaultProjectSettingParams, DefaultTimingStyleParams from utils.data_repo.data_repo import DataRepo @@ -30,15 +30,14 @@ def frame_styling_page(shot_uuid: str): project_settings = data_repo.get_project_setting(shot.project.uuid) if "strength" not in st.session_state: - st.session_state['strength'] = project_settings.default_strength + st.session_state['strength'] = DefaultProjectSettingParams.batch_strength st.session_state['prompt_value'] = project_settings.default_prompt st.session_state['model'] = project_settings.default_model.uuid - st.session_state['custom_pipeline'] = project_settings.default_custom_pipeline - st.session_state['negative_prompt_value'] = project_settings.default_negative_prompt - st.session_state['guidance_scale'] = project_settings.default_guidance_scale - st.session_state['seed'] = project_settings.default_seed - st.session_state['num_inference_steps'] = project_settings.default_num_inference_steps - st.session_state['transformation_stage'] = project_settings.default_stage + st.session_state['negative_prompt_value'] = DefaultProjectSettingParams.batch_negative_prompt + st.session_state['guidance_scale'] = DefaultProjectSettingParams.batch_guidance_scale + st.session_state['seed'] = DefaultProjectSettingParams.batch_seed + st.session_state['num_inference_steps'] = DefaultProjectSettingParams.batch_num_inference_steps + st.session_state['transformation_stage'] = DefaultProjectSettingParams.batch_transformation_stage if "current_frame_uuid" not in st.session_state: timing = data_repo.get_timing_list_from_shot(shot_uuid)[0] diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index 5e4c938c..7f38988d 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -49,11 +49,8 @@ def trigger_restyling_process(timing_uuid, update_inference_settings, \ default_guidance_scale=query_obj.guidance_scale, default_seed=query_obj.seed, default_num_inference_steps=query_obj.num_inference_steps, - default_which_stage_to_run_on=transformation_stage, default_custom_models=query_obj.data.get('custom_models', []), default_adapter_type=query_obj.adapter_type, - default_low_threshold=query_obj.low_threshold, - default_high_threshold=query_obj.high_threshold, add_image_in_params=st.session_state['add_image_in_params'], ) diff --git a/ui_components/models.py b/ui_components/models.py index 6c8b1a25..07fb12d7 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -111,19 +111,14 @@ def _get_training_image_list(self, training_image_list): class InternalFrameTimingObject: def __init__(self, **kwargs): self.uuid = kwargs['uuid'] if 'uuid' in kwargs else None - self.source_image = InternalFileObject( - **kwargs["source_image"]) if 'source_image' in kwargs and kwargs["source_image"] else None - self.mask = InternalFileObject( - **kwargs["mask"]) if 'mask' in kwargs and kwargs["mask"] else None - self.canny_image = InternalFileObject( - **kwargs["canny_image"]) if 'canny_image' in kwargs and kwargs["canny_image"] else None - self.primary_image = InternalFileObject( - **kwargs["primary_image"]) if 'primary_image' in kwargs and kwargs["primary_image"] else None - self.alternative_images = kwargs['alternative_images'] if 'alternative_images' in kwargs and kwargs["alternative_images"] else [ - ] - self.custom_pipeline = kwargs['custom_pipeline'] if 'custom_pipeline' in kwargs and kwargs["custom_pipeline"] else None + self.source_image = InternalFileObject(**kwargs["source_image"]) if key_present('source_image', kwargs) else None + self.shot = InternalShotObject(**kwargs['shot']) if key_present('shot', kwargs) else None + self.mask = InternalFileObject(**kwargs["mask"]) if key_present('mask', kwargs) else None + self.canny_image = InternalFileObject( **kwargs["canny_image"]) if key_present('canny_image', kwargs) else None + self.primary_image = InternalFileObject(**kwargs["primary_image"]) if key_present('primary_image', kwargs) else None + self.alternative_images = kwargs['alternative_images'] if key_present('alternative_images', kwargs) else [] self.notes = kwargs['notes'] if 'notes' in kwargs and kwargs["notes"] else "" - self.clip_duration = kwargs['clip_duration'] if 'clip_duration' in kwargs and kwargs["clip_duration"] else 0 + self.clip_duration = kwargs['clip_duration'] if key_present('clip_duration', kwargs) else 0 self.aux_frame_index = kwargs['aux_frame_index'] if 'aux_frame_index' in kwargs else 0 @property @@ -219,48 +214,11 @@ def __init__(self, **kwargs): **kwargs["default_model"]) if key_present('default_model', kwargs) else None self.audio = InternalFileObject( **kwargs["audio"]) if key_present('audio', kwargs) else None - self.input_video = InternalFileObject( - **kwargs["input_video"]) if key_present('input_video', kwargs) else None - self.default_prompt = kwargs['default_prompt'] if key_present( - 'default_prompt', kwargs) else None - self.default_strength = kwargs['default_strength'] if key_present( - 'default_strength', kwargs) else None - self.default_custom_pipeline = kwargs['default_custom_pipeline'] if key_present( - 'default_custom_pipeline', kwargs) else None self.input_type = kwargs['input_type'] if key_present( 'input_type', kwargs) else None - self.extraction_type = kwargs['extraction_type'] if key_present( - 'extraction_type', kwargs) else None self.width = kwargs['width'] if key_present('width', kwargs) else None self.height = kwargs['height'] if key_present( 'height', kwargs) else None - self.default_negative_prompt = kwargs['default_negative_prompt'] if key_present( - 'default_negative_prompt', kwargs) else None - self.default_guidance_scale = kwargs['default_guidance_scale'] if key_present( - 'default_guidance_scale', kwargs) else None - self.default_seed = kwargs['default_seed'] if key_present( - 'default_seed', kwargs) else None - self.default_num_inference_steps = kwargs['default_num_inference_steps'] if key_present( - 'default_num_inference_steps', kwargs) else None - self.default_stage = kwargs['default_stage'] if key_present( - 'default_stage', kwargs) else None - self.default_custom_model_uuid_list = kwargs['default_custom_model_uuid_list'] if key_present( - 'default_custom_model_uuid_list', kwargs) else [] - self.default_adapter_type = kwargs['default_adapter_type'] if key_present( - 'default_adapter_type', kwargs) else None - self.guidance_type = kwargs['guidance_type'] if key_present( - 'guidance_type', kwargs) else None - self.default_animation_style = kwargs['default_animation_style'] if key_present( - 'default_animation_style', kwargs) else None - self.default_low_threshold = kwargs['default_low_threshold'] if key_present( - 'default_low_threshold', kwargs) else None - self.default_high_threshold = kwargs['default_high_threshold'] if key_present( - 'default_high_threshold', kwargs) else None - self.zoom_level = kwargs['zoom_level'] if key_present( - 'zoom_level', kwargs) else None - self.x_shift = kwargs['x_shift'] if key_present('x_shift', kwargs) else None - self.y_shift = kwargs['y_shift'] if key_present('y_shift', kwargs) else None - self.rotation_angle_value = kwargs['rotation_angle_value'] if key_present('rotation_angle_value', kwargs) else None class InternalBackupObject: diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 5f6aa3d3..49a271d9 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -2,6 +2,7 @@ import streamlit as st from typing import List from shared.constants import AnimationStyleType, AnimationToolType +from ui_components.constants import DefaultProjectSettingParams from ui_components.methods.video_methods import create_single_interpolated_clip from utils.data_repo.data_repo import DataRepo from utils.ml_processor.motion_module import AnimateDiffCheckpoint @@ -39,10 +40,10 @@ def animation_style_element(shot_uuid): prompt_column_1, prompt_column_2 = st.columns([1, 1]) with prompt_column_1: - positive_prompt = st.text_area("Positive Prompt:", value=project_settings.default_prompt, key="positive_prompt") + positive_prompt = st.text_area("Positive Prompt:", value=DefaultProjectSettingParams.batch_prompt, key="positive_prompt") with prompt_column_2: - negative_prompt = st.text_area("Negative Prompt:", value=project_settings.default_negative_prompt, key="negative_prompt") + negative_prompt = st.text_area("Negative Prompt:", value=DefaultProjectSettingParams.batch_negative_prompt, key="negative_prompt") animate_col_1, animate_col_2, _ = st.columns([1, 1, 2]) diff --git a/ui_components/widgets/cropping_element.py b/ui_components/widgets/cropping_element.py index 6226dac5..557468b9 100644 --- a/ui_components/widgets/cropping_element.py +++ b/ui_components/widgets/cropping_element.py @@ -1,5 +1,4 @@ from math import gcd -import os import time import uuid import streamlit as st @@ -11,7 +10,7 @@ from ui_components.methods.common_methods import apply_image_transformations, fetch_image_by_stage from ui_components.constants import WorkflowStageType from ui_components.methods.file_methods import generate_pil_image, save_or_host_file -from ui_components.models import InternalProjectObject, InternalSettingObject +from ui_components.models import InternalProjectObject from ui_components.widgets.image_zoom_widgets import reset_zoom_element, save_zoomed_image, zoom_inputs from ui_components.widgets.inpainting_element import inpaint_in_black_space_element from utils.data_repo.data_repo import DataRepo @@ -19,30 +18,27 @@ from utils import st_memory -def cropping_selector_element(project_uuid): +def cropping_selector_element(shot_uuid): selector1, selector2, _ = st.columns([1, 1, 1]) with selector1: - which_stage = st_memory.radio("Which stage to work on?", ["Styled Key Frame", "Unedited Key Frame"], key="which_stage", horizontal=True) + crop_stage = st_memory.radio("Which stage to work on?", ["Styled Key Frame", "Unedited Key Frame"], key="crop_stage", horizontal=True) with selector2: how_to_crop = st_memory.radio("How to crop:", options=["Precision Cropping","Manual Cropping"], key="how_to_crop",horizontal=True) - if which_stage == "Styled Key Frame": + if crop_stage == "Styled Key Frame": stage_name = WorkflowStageType.STYLED.value - elif which_stage == "Unedited Key Frame": + elif crop_stage == "Unedited Key Frame": stage_name = WorkflowStageType.SOURCE.value if how_to_crop == "Manual Cropping": manual_cropping_element(stage_name, st.session_state['current_frame_uuid']) elif how_to_crop == "Precision Cropping": - precision_cropping_element(stage_name, project_uuid) + precision_cropping_element(stage_name, shot_uuid) -def precision_cropping_element(stage, project_uuid): +def precision_cropping_element(stage, shot_uuid): data_repo = DataRepo() - project_settings: InternalSettingObject = data_repo.get_project_setting( - project_uuid) - - - input_image = fetch_image_by_stage(project_uuid, stage, st.session_state['current_frame_index'] - 1) + shot = data_repo.get_shot_from_uuid(shot_uuid) + input_image = fetch_image_by_stage(shot_uuid, stage, st.session_state['current_frame_index'] - 1) # TODO: CORRECT-CODE check if this code works if not input_image: @@ -78,12 +74,10 @@ def precision_cropping_element(stage, project_uuid): time.sleep(1) st.rerun() - inpaint_in_black_space_element( - output_image, project_settings.project.uuid, stage) + inpaint_in_black_space_element(output_image, shot.project.uuid, stage) def manual_cropping_element(stage, timing_uuid): - data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) project_uuid = timing.shot.project.uuid @@ -192,5 +186,4 @@ def get_working_image(): with cropbtn2: st.warning("Warning: This will overwrite the original image") - inpaint_in_black_space_element( - cropped_img, timing.shot.project.uuid, stage=stage) + inpaint_in_black_space_element(cropped_img, project_uuid, stage=stage) diff --git a/ui_components/widgets/drawing_element.py b/ui_components/widgets/drawing_element.py index b8cc8832..8ae96e05 100644 --- a/ui_components/widgets/drawing_element.py +++ b/ui_components/widgets/drawing_element.py @@ -16,12 +16,11 @@ def drawing_element(timing_details,project_settings,project_uuid,stage=WorkflowStageType.STYLED.value): + drawing_stage = st_memory.radio("Which stage to work on?", ["Styled Image", "Guidance Image"], horizontal=True, key="drawing_stage") - which_stage_to_draw_on = st_memory.radio("Which stage to work on?", ["Styled Image", "Guidance Image"], horizontal=True, key="which_stage_drawing") - - if which_stage_to_draw_on == "Styled Image": + if drawing_stage == "Styled Image": stage=WorkflowStageType.STYLED.value - elif which_stage_to_draw_on == "Guidance Image": + elif drawing_stage == "Guidance Image": stage=WorkflowStageType.SOURCE.value if stage == WorkflowStageType.SOURCE.value: diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index dc75d513..066c4485 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -114,17 +114,17 @@ def replace_image_widget(timing_uuid, stage, options=["Uploaded Frame","Other Fr timing = data_repo.get_timing_from_uuid(timing_uuid) timing_list = data_repo.get_timing_list_from_shot(timing.shot.uuid) - replace_with = options[0] if len(options) == 1 else st.radio("Replace with:", options, horizontal=True, key=f"replace_with_what_{stage}_{timing_uuid}") + replace_with = options[0] if len(options) == 1 else st.radio("Replace with:", options, horizontal=True, key=f"replacement_entity_{stage}_{timing_uuid}") if replace_with == "Other Frame": - which_stage_to_use_for_replacement = st.radio("Select stage to use:", [ - ImageStage.MAIN_VARIANT.value, ImageStage.SOURCE_IMAGE.value], key=f"which_stage_to_use_for_replacement_{stage}_{timing_uuid}", horizontal=True) + image_replacement_stage = st.radio("Select stage to use:", [ + ImageStage.MAIN_VARIANT.value, ImageStage.SOURCE_IMAGE.value], key=f"image_replacement_stage_{stage}_{timing_uuid}", horizontal=True) which_image_to_use_for_replacement = st.number_input("Select image to use:", min_value=0, max_value=len( timing_list)-1, value=0, key=f"which_image_to_use_for_replacement_{stage}") - if which_stage_to_use_for_replacement == ImageStage.SOURCE_IMAGE.value: + if image_replacement_stage == ImageStage.SOURCE_IMAGE.value: selected_image = timing_list[which_image_to_use_for_replacement].source_image - elif which_stage_to_use_for_replacement == ImageStage.MAIN_VARIANT.value: + elif image_replacement_stage == ImageStage.MAIN_VARIANT.value: selected_image = timing_list[which_image_to_use_for_replacement].primary_image st.image(selected_image.local_path, use_column_width=True) diff --git a/ui_components/widgets/inpainting_element.py b/ui_components/widgets/inpainting_element.py index ba01980f..9d183b9b 100644 --- a/ui_components/widgets/inpainting_element.py +++ b/ui_components/widgets/inpainting_element.py @@ -11,7 +11,7 @@ import streamlit as st from streamlit_drawable_canvas import st_canvas from shared.constants import InferenceType, InternalFileType, ProjectMetaData -from ui_components.constants import CROPPED_IMG_LOCAL_PATH, MASK_IMG_LOCAL_PATH, TEMP_MASK_FILE, WorkflowStageType +from ui_components.constants import CROPPED_IMG_LOCAL_PATH, MASK_IMG_LOCAL_PATH, TEMP_MASK_FILE, DefaultProjectSettingParams, WorkflowStageType from ui_components.methods.file_methods import add_temp_file_to_project, save_or_host_file from utils.data_repo.data_repo import DataRepo @@ -23,11 +23,11 @@ def inpainting_element(timing_uuid): - which_stage_to_inpaint = st_memory.radio("Which stage to work on?", ["Styled Key Frame", "Unedited Key Frame"], horizontal=True, key="which_stage_inpainting") + inpainting_stage = st_memory.radio("Which stage to work on?", ["Styled Key Frame", "Unedited Key Frame"], horizontal=True, key="inpainting_stage") - if which_stage_to_inpaint == "Styled Key Frame": + if inpainting_stage == "Styled Key Frame": stage = WorkflowStageType.STYLED.value - elif which_stage_to_inpaint == "Unedited Key Frame": + elif inpainting_stage == "Unedited Key Frame": stage = WorkflowStageType.SOURCE.value data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) @@ -282,12 +282,12 @@ def inpainting_element(timing_uuid): elif source_of_image == "From Other Frame": btn1, btn2 = st.columns([1, 1]) with btn1: - which_stage_to_use = st.radio("Select stage to use:", WorkflowStageType.value_list()) + inpainting_other_frame_stage = st.radio("Select stage to use:", WorkflowStageType.value_list()) which_image_to_use = st.number_input("Select image to use:", min_value=0, max_value=len(timing_details)-1, value=0) - if which_stage_to_use == WorkflowStageType.SOURCE.value: + if inpainting_other_frame_stage == WorkflowStageType.SOURCE.value: background_image = timing_details[which_image_to_use].source_image - elif which_stage_to_use == WorkflowStageType.STYLED.value: + elif inpainting_other_frame_stage == WorkflowStageType.STYLED.value: background_image = timing_details[which_image_to_use].primary_image with btn2: @@ -297,10 +297,10 @@ def inpainting_element(timing_uuid): btn1, btn2 = st.columns([1, 1]) with btn1: prompt = st.text_area("Prompt:", help="Describe the whole image, but focus on the details you want changed!", - value=project_settings.default_prompt) + value=DefaultProjectSettingParams.batch_prompt) with btn2: negative_prompt = st.text_area( - "Negative Prompt:", help="Enter any things you want to make the model avoid!", value=project_settings.default_negative_prompt) + "Negative Prompt:", help="Enter any things you want to make the model avoid!", value=DefaultProjectSettingParams.batch_negative_prompt) edit1, edit2 = st.columns(2) @@ -372,9 +372,9 @@ def inpaint_in_black_space_element(cropped_img, project_uuid, stage=WorkflowStag st.markdown("##### Inpaint in black space:") inpaint_prompt = st.text_area( - "Prompt", value=project_settings.default_prompt) + "Prompt", value=DefaultProjectSettingParams.batch_prompt) inpaint_negative_prompt = st.text_input( - "Negative Prompt", value='edge,branches, frame, fractals, text' + project_settings.default_negative_prompt) + "Negative Prompt", value='edge,branches, frame, fractals, text' + DefaultProjectSettingParams.batch_negative_prompt) if 'precision_cropping_inpainted_image_uuid' not in st.session_state: st.session_state['precision_cropping_inpainted_image_uuid'] = "" diff --git a/ui_components/widgets/styling_element.py b/ui_components/widgets/styling_element.py index 129900c1..e75918e8 100644 --- a/ui_components/widgets/styling_element.py +++ b/ui_components/widgets/styling_element.py @@ -3,6 +3,7 @@ from typing import List from shared.constants import AIModelCategory, AIModelType, ViewType +from ui_components.constants import DefaultProjectSettingParams, DefaultTimingStyleParams from ui_components.methods.ml_methods import trigger_restyling_process from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject from utils.constants import ImageStage @@ -23,15 +24,8 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): append_to_item_name = str(timing.shot.project.uuid) st.markdown("## Batch queries") - if project_settings.default_stage: - if f'index_of_which_stage_to_run_on_{append_to_item_name}' not in st.session_state: - st.session_state["transformation_stage"] = project_settings.default_stage - st.session_state[f'index_of_which_stage_to_run_on_{append_to_item_name}'] = stages.index( - st.session_state["transformation_stage"]) - else: - st.session_state["transformation_stage"] = ImageStage.SOURCE_IMAGE.value - st.session_state[f'index_of_which_stage_to_run_on_{append_to_item_name}'] = 0 - + st.session_state["transformation_stage"] = ImageStage.SOURCE_IMAGE.value + st.session_state[f'frame_styling_stage_index_{append_to_item_name}'] = 0 stages1, stages2 = st.columns([1, 1]) with stages1: @@ -40,7 +34,7 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): options=stages, horizontal=True, key=f"image_stage_selector_{append_to_item_name}", - index=st.session_state[f'index_of_which_stage_to_run_on_{append_to_item_name}'], + index=st.session_state[f'frame_styling_stage_index_{append_to_item_name}'], help="Extracted frames means the original frames from the video." ) @@ -57,8 +51,8 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): elif not image and st.session_state["transformation_stage"] in [ImageStage.SOURCE_IMAGE.value, ImageStage.MAIN_VARIANT.value]: st.error(f"No {st.session_state['transformation_stage']} image found for this variant") - if stages.index(st.session_state["transformation_stage"]) != st.session_state[f'index_of_which_stage_to_run_on_{append_to_item_name}']: - st.session_state[f'index_of_which_stage_to_run_on_{append_to_item_name}'] = stages.index(st.session_state["transformation_stage"]) + if stages.index(st.session_state["transformation_stage"]) != st.session_state[f'frame_styling_stage_index_{append_to_item_name}']: + st.session_state[f'frame_styling_stage_index_{append_to_item_name}'] = stages.index(st.session_state["transformation_stage"]) st.rerun() @@ -128,15 +122,8 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): if st.session_state['adapter_type'] in ["canny", "pose"]: canny1, canny2 = st.columns(2) if view_type == ViewType.LIST.value: - if project_settings.default_low_threshold != "": - low_threshold_value = project_settings.default_low_threshold - else: - low_threshold_value = 50 - - if project_settings.default_high_threshold != "": - high_threshold_value = project_settings.default_high_threshold - else: - high_threshold_value = 150 + low_threshold_value = 50 + high_threshold_value = 150 elif view_type == ViewType.SINGLE.value: if timing.low_threshold != "": @@ -307,30 +294,19 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): st.info("In our experience, setting the seed to 87870, and the guidance scale to 7.5 gets consistently good results. You can set this in advanced settings.") if view_type == ViewType.LIST.value: - if project_settings.default_strength: - st.session_state['strength'] = project_settings.default_strength - else: - st.session_state['strength'] = 0.5 + st.session_state['strength'] = DefaultProjectSettingParams.batch_strength elif view_type == ViewType.SINGLE.value: - if timing.strength: - st.session_state['strength'] = timing.strength - else: - st.session_state['strength'] = 0.5 + st.session_state['strength'] = DefaultTimingStyleParams.strength st.session_state['strength'] = st.slider(f"Strength", value=float( st.session_state['strength']), min_value=0.0, max_value=1.0, step=0.01) if view_type == ViewType.LIST.value: - if project_settings.default_guidance_scale: - st.session_state['guidance_scale'] = project_settings.default_guidance_scale - else: - st.session_state['guidance_scale'] = 7.5 + st.session_state['guidance_scale'] = DefaultProjectSettingParams.batch_guidance_scale + elif view_type == ViewType.SINGLE.value: - if timing.guidance_scale != "": - st.session_state['guidance_scale'] = timing.guidance_scale - else: - st.session_state['guidance_scale'] = 7.5 + st.session_state['guidance_scale'] = DefaultTimingStyleParams.guidance_scale if not ('negative_prompt_value' in st.session_state and st.session_state['negative_prompt_value']) and timing.negative_prompt: st.session_state['negative_prompt_value'] = timing.negative_prompt @@ -346,30 +322,20 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): f"Guidance scale", value=float(st.session_state['guidance_scale'])) if view_type == ViewType.LIST.value: - if project_settings.default_seed != "": - st.session_state['seed'] = project_settings.default_seed - else: - st.session_state['seed'] = 0 + st.session_state['seed'] = DefaultProjectSettingParams.batch_seed elif view_type == ViewType.SINGLE.value: - if timing.seed != "": - st.session_state['seed'] = timing.seed - else: - st.session_state['seed'] = 0 + st.session_state['seed'] = DefaultTimingStyleParams.seed st.session_state['seed'] = st.number_input( f"Seed", value=int(st.session_state['seed'])) if view_type == ViewType.LIST.value: - if project_settings.default_num_inference_steps: - st.session_state['num_inference_steps'] = project_settings.default_num_inference_steps - else: - st.session_state['num_inference_steps'] = 50 + st.session_state['num_inference_steps'] = DefaultProjectSettingParams.batch_num_inference_steps + elif view_type == ViewType.SINGLE.value: - if timing.num_inference_steps: - st.session_state['num_inference_steps'] = timing.num_inference_steps - else: - st.session_state['num_inference_steps'] = 50 + st.session_state['num_inference_steps'] = DefaultTimingStyleParams.num_inference_steps + st.session_state['num_inference_steps'] = st.number_input( f"Inference steps", value=int(st.session_state['num_inference_steps'])) diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index 2553e727..b015ab64 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -8,7 +8,7 @@ from ui_components.widgets.add_key_frame_element import add_key_frame -def timeline_view_buttons(i, j, timing_list, shift_frames_setting, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle, project_uuid): +def timeline_view_buttons(i, j, timing_list, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle, project_uuid): if replace_image_widget_toggle: replace_image_widget(timing_list[i + j].uuid, stage=WorkflowStageType.STYLED.value,options=["Uploaded Frame"]) @@ -48,11 +48,8 @@ def timeline_view(shot_uuid, stage): st.markdown("***") - header_col_1, header_col_2, header_col_3 = st.columns([1.5,4,1.5]) + _, header_col_2, header_col_3 = st.columns([1.5,4,1.5]) - with header_col_1: - shift_frames_setting = st.toggle("Shift Frames", help="If set to True, it will shift the frames after your adjustment forward by the amount of time you move.") - with header_col_2: col1, col2, col3 = st.columns(3) @@ -93,6 +90,6 @@ def timeline_view(shot_uuid, stage): else: st.error("No video found for this frame.") with st.expander(f'Frame #{display_number}', True): - timeline_view_buttons(i, j, shot_list, shift_frames_setting, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle, project_uuid) + timeline_view_buttons(i, j, shot_list, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle, project_uuid) diff --git a/utils/common_utils.py b/utils/common_utils.py index b42915a3..d6c8fd1a 100644 --- a/utils/common_utils.py +++ b/utils/common_utils.py @@ -157,7 +157,7 @@ def reset_project_state(): def reset_styling_settings(timing_uuid): keys_to_delete = [ - f"index_of_which_stage_to_run_on_{timing_uuid}", + f"frame_styling_stage_index_{timing_uuid}", "index_of_default_model", "index_of_controlnet_adapter_type", "index_of_dreambooth_model", diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index 590f5de5..5adf9d54 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -4,6 +4,7 @@ import requests as r import numpy as np from shared.constants import AnimationStyleType, AnimationToolType +from ui_components.constants import DefaultTimingStyleParams from ui_components.methods.file_methods import generate_temp_file from ui_components.models import InferenceLogObject @@ -36,8 +37,7 @@ def calculate_dynamic_interpolations_steps(clip_duration): def create_interpolated_clip(img_location_list, animation_style, settings, variant_count=1, queue_inference=False): data_repo = DataRepo() if not animation_style: - project_setting = data_repo.get_project_setting(st.session_state["project_uuid"]) - animation_style = project_setting.default_animation_style + animation_style = DefaultTimingStyleParams.animation_style if animation_style == AnimationStyleType.INTERPOLATION.value: return VideoInterpolator.video_through_frame_interpolation( From 3e03fd3de0a6fc07a92539c1357e49dc977b28d8 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sun, 29 Oct 2023 00:18:24 +0530 Subject: [PATCH 125/164] wip: fixing startup bugs + migrations added --- backend/db_repo.py | 6 - ...shot_added_and_redundant_fields_removed.py | 201 ++++++++++++++++++ backend/models.py | 6 +- backend/serializers/dao.py | 7 - backend/serializers/dto.py | 28 ++- banodoco_settings.py | 3 +- .../components/frame_styling_page.py | 4 +- ui_components/models.py | 2 +- ui_components/setup.py | 6 +- ui_components/widgets/sidebar_logger.py | 2 +- 10 files changed, 235 insertions(+), 30 deletions(-) create mode 100644 backend/migrations/0012_shot_added_and_redundant_fields_removed.py diff --git a/backend/db_repo.py b/backend/db_repo.py index b343f2b0..41ec6f95 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -816,7 +816,6 @@ def create_timing(self, **kwargs): return InternalResponse({}, 'invalid model uuid', False) attributes._data['model_id'] = model.id - if 'source_image_id' in attributes.data: if attributes.data['source_image_id'] != None: @@ -825,7 +824,6 @@ def create_timing(self, **kwargs): return InternalResponse({}, 'invalid source image uuid', False) attributes._data['source_image_id'] = source_image.id - if 'mask_id' in attributes.data: if attributes.data['mask_id'] != None: @@ -834,7 +832,6 @@ def create_timing(self, **kwargs): return InternalResponse({}, 'invalid mask uuid', False) attributes._data['mask_id'] = mask.id - if 'canny_image_id' in attributes.data: if attributes.data['canny_image_id'] != None: @@ -843,7 +840,6 @@ def create_timing(self, **kwargs): return InternalResponse({}, 'invalid canny image uuid', False) attributes._data['canny_image_id'] = canny_image.id - if 'primay_image_id' in attributes.data: if attributes.data['primay_image_id'] != None: @@ -853,9 +849,7 @@ def create_timing(self, **kwargs): attributes._data['primay_image_id'] = primay_image.id - timing = Timing.objects.create(**attributes.data) - payload = { 'data': TimingDto(timing).data } diff --git a/backend/migrations/0012_shot_added_and_redundant_fields_removed.py b/backend/migrations/0012_shot_added_and_redundant_fields_removed.py new file mode 100644 index 00000000..f7b7dedd --- /dev/null +++ b/backend/migrations/0012_shot_added_and_redundant_fields_removed.py @@ -0,0 +1,201 @@ +# Generated by Django 4.2.1 on 2023-10-28 13:15 + +from django.db import migrations, models +import django.db.models.deletion +import uuid + + +class Migration(migrations.Migration): + + dependencies = [ + ('backend', '0011_lock_added'), + ] + + operations = [ + migrations.RemoveField( + model_name='setting', + name='default_adapter_type', + ), + migrations.RemoveField( + model_name='setting', + name='default_animation_style', + ), + migrations.RemoveField( + model_name='setting', + name='default_custom_model_uuid_list', + ), + migrations.RemoveField( + model_name='setting', + name='default_custom_pipeline', + ), + migrations.RemoveField( + model_name='setting', + name='default_guidance_scale', + ), + migrations.RemoveField( + model_name='setting', + name='default_high_threshold', + ), + migrations.RemoveField( + model_name='setting', + name='default_low_threshold', + ), + migrations.RemoveField( + model_name='setting', + name='default_negative_prompt', + ), + migrations.RemoveField( + model_name='setting', + name='default_num_inference_steps', + ), + migrations.RemoveField( + model_name='setting', + name='default_prompt', + ), + migrations.RemoveField( + model_name='setting', + name='default_seed', + ), + migrations.RemoveField( + model_name='setting', + name='default_stage', + ), + migrations.RemoveField( + model_name='setting', + name='default_strength', + ), + migrations.RemoveField( + model_name='setting', + name='extraction_type', + ), + migrations.RemoveField( + model_name='setting', + name='guidance_type', + ), + migrations.RemoveField( + model_name='setting', + name='input_video', + ), + migrations.RemoveField( + model_name='setting', + name='rotation_angle_value', + ), + migrations.RemoveField( + model_name='setting', + name='x_shift', + ), + migrations.RemoveField( + model_name='setting', + name='y_shift', + ), + migrations.RemoveField( + model_name='setting', + name='zoom_level', + ), + migrations.RemoveField( + model_name='timing', + name='adapter_type', + ), + migrations.RemoveField( + model_name='timing', + name='animation_style', + ), + migrations.RemoveField( + model_name='timing', + name='custom_model_id_list', + ), + migrations.RemoveField( + model_name='timing', + name='custom_pipeline', + ), + migrations.RemoveField( + model_name='timing', + name='frame_number', + ), + migrations.RemoveField( + model_name='timing', + name='frame_time', + ), + migrations.RemoveField( + model_name='timing', + name='guidance_scale', + ), + migrations.RemoveField( + model_name='timing', + name='high_threshold', + ), + migrations.RemoveField( + model_name='timing', + name='interpolated_clip_list', + ), + migrations.RemoveField( + model_name='timing', + name='interpolation_steps', + ), + migrations.RemoveField( + model_name='timing', + name='low_threshold', + ), + migrations.RemoveField( + model_name='timing', + name='negative_prompt', + ), + migrations.RemoveField( + model_name='timing', + name='num_inteference_steps', + ), + migrations.RemoveField( + model_name='timing', + name='preview_video', + ), + migrations.RemoveField( + model_name='timing', + name='project', + ), + migrations.RemoveField( + model_name='timing', + name='prompt', + ), + migrations.RemoveField( + model_name='timing', + name='seed', + ), + migrations.RemoveField( + model_name='timing', + name='strength', + ), + migrations.RemoveField( + model_name='timing', + name='timed_clip', + ), + migrations.RemoveField( + model_name='timing', + name='transformation_stage', + ), + migrations.CreateModel( + name='Shot', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('uuid', models.UUIDField(default=uuid.uuid4)), + ('created_on', models.DateTimeField(auto_now_add=True)), + ('updated_on', models.DateTimeField(auto_now=True)), + ('is_disabled', models.BooleanField(default=False)), + ('name', models.CharField(blank=True, default='', max_length=255)), + ('desc', models.TextField(blank=True, default='')), + ('shot_idx', models.IntegerField()), + ('duration', models.FloatField(default=2.5)), + ('meta_data', models.TextField(blank=True, default='')), + ('interpolated_clip_list', models.TextField(default=None, null=True)), + ('main_clip', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='backend.internalfileobject')), + ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='backend.project')), + ], + options={ + 'db_table': 'shot', + }, + ), + migrations.AddField( + model_name='timing', + name='shot', + field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='backend.shot'), + ), + ] diff --git a/backend/models.py b/backend/models.py index 5ac42964..3fe4f5ce 100644 --- a/backend/models.py +++ b/backend/models.py @@ -149,8 +149,8 @@ def data_dump_dict(self): class Shot(BaseModel): name = models.CharField(max_length=255, default="", blank=True) - project_id = models.ForeignKey(Project, on_delete=models.CASCADE) - main_clip_id = models.ForeignKey(InternalFileObject, default=None, null=True) # main clip has the correct duration + project = models.ForeignKey(Project, on_delete=models.CASCADE) + main_clip = models.ForeignKey(InternalFileObject, default=None, null=True, on_delete=models.DO_NOTHING) # main clip has the correct duration desc = models.TextField(default="", blank=True) shot_idx = models.IntegerField() duration = models.FloatField(default=2.5) @@ -217,7 +217,7 @@ class Timing(BaseModel): mask = models.ForeignKey(InternalFileObject, related_name="mask", on_delete=models.DO_NOTHING, null=True) canny_image = models.ForeignKey(InternalFileObject, related_name="canny_image", on_delete=models.DO_NOTHING, null=True) primary_image = models.ForeignKey(InternalFileObject, related_name="primary_image", on_delete=models.DO_NOTHING, null=True) # variant number that is currently selected (among alternative images) NONE if none is present - shot_id = models.ForeignKey(Shot, on_delete=models.CASCADE, null=True) + shot = models.ForeignKey(Shot, on_delete=models.CASCADE, null=True) alternative_images = models.TextField(default=None, null=True) notes = models.TextField(default="", blank=True) clip_duration = models.FloatField(default=None, null=True) diff --git a/backend/serializers/dao.py b/backend/serializers/dao.py index 26ff23ee..2231a725 100644 --- a/backend/serializers/dao.py +++ b/backend/serializers/dao.py @@ -82,14 +82,7 @@ class CreateTimingDao(serializers.Serializer): shot_id = serializers.CharField(max_length=100) primary_image = serializers.CharField(max_length=100, required=False) alternative_images = serializers.CharField(max_length=100, required=False) - prompt = serializers.CharField(max_length=1024, required=False) - negative_prompt = serializers.CharField(max_length=1024, required=False) - guidance_scale = serializers.FloatField(required=False) - seed = serializers.IntegerField(required=False) - strength = serializers.FloatField(required=False) notes = serializers.CharField(max_length=1024, required=False) - adapter_type = serializers.CharField(max_length=255, required=False) - clip_duration = serializers.FloatField(default=0, required=False) aux_frame_index = serializers.IntegerField(required=False) diff --git a/backend/serializers/dto.py b/backend/serializers/dto.py index 24ade5a0..aa2da671 100644 --- a/backend/serializers/dto.py +++ b/backend/serializers/dto.py @@ -74,12 +74,30 @@ class Meta: fields = ('uuid', 'name', 'local_path', 'type', 'hosted_url', 'created_on', 'inference_log', 'project') +class BasicShotDto(serializers.ModelSerializer): + project = ProjectDto() + + class Meta: + model = Shot + fields = ( + "uuid", + "name", + "project", + "desc", + "shot_idx", + "project", + "duration", + "meta_data", + ) + + class TimingDto(serializers.ModelSerializer): model = AIModelDto() source_image = InternalFileDto() mask = InternalFileDto() canny_image = InternalFileDto() primary_image = InternalFileDto() + shot = BasicShotDto() class Meta: model = Timing @@ -91,16 +109,10 @@ class Meta: "canny_image", "primary_image", "alternative_images", - "prompt", - "negative_prompt", - "guidance_scale", - "seed", - "strength", "notes", - "adapter_type", - "clip_duration", "aux_frame_index", "created_on", + "shot" ) @@ -166,6 +178,7 @@ class ShotDto(serializers.ModelSerializer): timing_list = serializers.SerializerMethodField() interpolated_clip_list = serializers.SerializerMethodField() main_clip = InternalFileDto() + project = ProjectDto() class Meta: model = Shot @@ -174,6 +187,7 @@ class Meta: "name", "desc", "shot_idx", + "project", "duration", "meta_data", "timing_list", diff --git a/banodoco_settings.py b/banodoco_settings.py index 826d69f1..7b1d6241 100644 --- a/banodoco_settings.py +++ b/banodoco_settings.py @@ -88,9 +88,8 @@ def create_new_project(user: InternalUserObject, project_name: str, width=512, h # create a default first shot shot_data = { "name": "Shot 1", - "project_id": project.uuid, + "project_uuid": project.uuid, "desc": "", - "shot_idx": 0, "duration": 2 } diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 22344a28..98817400 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -31,8 +31,8 @@ def frame_styling_page(shot_uuid: str): if "strength" not in st.session_state: st.session_state['strength'] = DefaultProjectSettingParams.batch_strength - st.session_state['prompt_value'] = project_settings.default_prompt - st.session_state['model'] = project_settings.default_model.uuid + st.session_state['prompt_value'] = DefaultProjectSettingParams.batch_prompt + st.session_state['model'] = None st.session_state['negative_prompt_value'] = DefaultProjectSettingParams.batch_negative_prompt st.session_state['guidance_scale'] = DefaultProjectSettingParams.batch_guidance_scale st.session_state['seed'] = DefaultProjectSettingParams.batch_seed diff --git a/ui_components/models.py b/ui_components/models.py index 07fb12d7..d3cf0346 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -162,7 +162,7 @@ class InternalShotObject: def __init__(self, **kwargs): self.uuid = kwargs['uuid'] if key_present('uuid', kwargs) else None self.name = kwargs['name'] if key_present('name', kwargs) else "" - self.project_id = kwargs['project_id'] if key_present('project_id', kwargs) else None + self.project = InternalProjectObject(**kwargs['project']) if key_present('project', kwargs) else None self.desc = kwargs['desc'] if key_present('desc', kwargs) else "" self.shot_idx = kwargs['shot_idx'] if key_present('shot_idx', kwargs) else 0 self.duration = kwargs['duration'] if key_present('duration', kwargs) else 0 diff --git a/ui_components/setup.py b/ui_components/setup.py index e0b43bb3..56333eb0 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -87,6 +87,10 @@ def setup_app_ui(): st.session_state["project_uuid"] = project_list[selected_index].uuid check_project_meta_data(st.session_state["project_uuid"]) + if 'shot_uuid' not in st.session_state: + shot_list = data_repo.get_shot_list(st.session_state["project_uuid"]) + st.session_state['shot_uuid'] = shot_list[0].uuid + if "current_frame_index" not in st.session_state: st.session_state['current_frame_index'] = 1 @@ -164,7 +168,7 @@ def setup_app_ui(): st.session_state['page'] = option_menu(None, pages, icons=['palette', 'camera-reels', "hourglass", 'stopwatch'], menu_icon="cast", orientation="horizontal", key="secti2on_selector", styles={ "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "orange"}}, manual_select=st.session_state["manual_select"]) - frame_styling_page(mainheader2, st.session_state["project_uuid"]) + frame_styling_page(st.session_state["shot_uuid"]) elif st.session_state["main_view_type"] == "Tools & Settings": diff --git a/ui_components/widgets/sidebar_logger.py b/ui_components/widgets/sidebar_logger.py index 09a304d0..8ffb107e 100644 --- a/ui_components/widgets/sidebar_logger.py +++ b/ui_components/widgets/sidebar_logger.py @@ -35,7 +35,7 @@ def sidebar_logger(shot_uuid): page_number = b1.number_input('Page number', min_value=1, max_value=project_setting.total_log_pages, value=1, step=1) items_per_page = b2.slider("Items per page", min_value=1, max_value=20, value=5, step=1) log_list, total_page_count = data_repo.get_all_inference_log_list( - project_id=shot_uuid, + project_id=shot.project.uuid, page=page_number, data_per_page=items_per_page, status_list=status_list From af8f43461a3bc42f8fd0798361292b91a1ea9af1 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sun, 29 Oct 2023 14:22:49 +0530 Subject: [PATCH 126/164] wip: basic ui setup --- .../components/frame_styling_page.py | 4 +- .../widgets/add_key_frame_element.py | 4 +- ui_components/widgets/shot_view.py | 73 +++++++++++++++ .../widgets/style_explorer_element.py | 37 ++++---- ui_components/widgets/timeline_view.py | 92 +++++++------------ 5 files changed, 128 insertions(+), 82 deletions(-) create mode 100644 ui_components/widgets/shot_view.py diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 98817400..59a7439c 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -61,7 +61,7 @@ def frame_styling_page(shot_uuid: str): project_settings = data_repo.get_project_setting(shot.project.uuid) if st.session_state['frame_styling_view_type'] == "Explorer": - style_explorer_element(shot_uuid) + style_explorer_element(shot.project.uuid) # -------------------- INDIVIDUAL VIEW ---------------------- elif st.session_state['frame_styling_view_type'] == "Individual": @@ -151,7 +151,7 @@ def frame_styling_page(shot_uuid: str): with st.expander("➕ Add Key Frame", expanded=True): selected_image, inherit_styling_settings, _ = add_key_frame_element(shot_uuid) if st.button(f"Add key frame",type="primary",use_container_width=True): - add_key_frame(selected_image, inherit_styling_settings) + add_key_frame(selected_image, inherit_styling_settings, shot_uuid) st.rerun() # -------------------- TIMELINE VIEW -------------------------- diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index 1e11234c..06f1e866 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -90,11 +90,9 @@ def add_key_frame_element(shot_uuid): return selected_image, inherit_styling_settings, transformation_stage -def add_key_frame(selected_image, inherit_styling_settings, target_frame_position=None, refresh_state=True): +def add_key_frame(selected_image, inherit_styling_settings, shot_uuid, target_frame_position=None, refresh_state=True): data_repo = DataRepo() - shot_uuid = st.session_state['shot_uuid'] timing_list = data_repo.get_timing_list_from_shot(shot_uuid) - project_settings = data_repo.get_project_setting(shot_uuid) if len(timing_list) == 0: index_of_current_item = 1 diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py new file mode 100644 index 00000000..851446bb --- /dev/null +++ b/ui_components/widgets/shot_view.py @@ -0,0 +1,73 @@ +from typing import List +import streamlit as st +from ui_components.constants import WorkflowStageType +from ui_components.methods.file_methods import generate_pil_image + +from ui_components.models import InternalFrameTimingObject, InternalShotObject +from ui_components.widgets.add_key_frame_element import add_key_frame +from ui_components.widgets.frame_movement_widgets import change_frame_position_input, delete_frame_button, jump_to_single_frame_view_button, move_frame_back_button, move_frame_forward_button, replace_image_widget +from utils.data_repo.data_repo import DataRepo + +def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): + data_repo = DataRepo() + shot: InternalShotObject = data_repo.get_shot_from_uuid(shot_uuid) + + st.title(shot.name) + timing_list: List[InternalFrameTimingObject] = shot.timing_list + + grid = st.columns(items_per_row) + for idx, timing in enumerate(timing_list): + with grid[idx%items_per_row]: + if timing.primary_image and timing.primary_image.location: + st.image(timing.primary_image.location, use_column_width=True) + timeline_view_buttons(idx, shot_uuid, **kwargs) + else: + st.warning("No primary image present") + +def shot_video_element(shot_uuid, idx, items_per_row): + data_repo = DataRepo() + shot: InternalShotObject = data_repo.get_shot_from_uuid(shot_uuid) + + grid = st.columns(items_per_row) + with grid[idx%items_per_row]: + st.title(shot.name) + if shot.main_clip and shot.main_clip.location: + st.video(shot.main_clip.location) + else: + st.warning("No video present") + + if st.button("Generate video", key=shot.uuid): + pass + +def timeline_view_buttons(idx, shot_uuid, replace_image_widget_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle): + data_repo = DataRepo() + shot = data_repo.get_shot_from_uuid(shot_uuid) + timing_list = shot.timing_list + + if replace_image_widget_toggle: + replace_image_widget(timing_list[idx].uuid, stage=WorkflowStageType.STYLED.value, options=["Uploaded Frame"]) + + btn1, btn2, btn3, btn4 = st.columns([1, 1, 1, 1]) + + if move_frames_toggle: + with btn1: + move_frame_back_button(timing_list[idx].uuid, "side-to-side") + with btn2: + move_frame_forward_button(timing_list[idx].uuid, "side-to-side") + + if copy_frame_toggle: + with btn3: + if st.button("🔁", key=f"copy_frame_{timing_list[idx].uuid}"): + pil_image = generate_pil_image(timing_list[idx].primary_image.location) + add_key_frame(pil_image, False, st.session_state['shot_uuid'], timing_list[idx].aux_frame_index+1, refresh_state=False) + st.rerun() + + if delete_frames_toggle: + with btn4: + delete_frame_button(timing_list[idx].uuid) + + if change_position_toggle: + change_frame_position_input(timing_list[idx].uuid, "side-to-side") + + jump_to_single_frame_view_button(idx + 1, timing_list, 'timeline_btn_'+str(timing_list[idx].uuid)) + diff --git a/ui_components/widgets/style_explorer_element.py b/ui_components/widgets/style_explorer_element.py index fa49c369..fb6f5849 100644 --- a/ui_components/widgets/style_explorer_element.py +++ b/ui_components/widgets/style_explorer_element.py @@ -13,11 +13,11 @@ from utils.ml_processor.replicate.constants import REPLICATE_MODEL -def style_explorer_element(shot_uuid): +def style_explorer_element(project_uuid): st.markdown("***") data_repo = DataRepo() - shot = data_repo.get_shot_from_uuid(shot_uuid) - project_settings = data_repo.get_project_setting(shot.project.uuid) + shot_list = data_repo.get_shot_list(project_uuid) + project_settings = data_repo.get_project_setting(project_uuid) _, a2, a3,_= st.columns([0.5, 1, 0.5,0.5]) with a2: @@ -85,8 +85,7 @@ def style_explorer_element(shot_uuid): negative_prompt="bad image, worst image, bad anatomy, washed out colors", height=project_settings.height, width=project_settings.width, - project_uuid=shot.project.uuid, - shot_uuid=shot.uuid + project_uuid=project_uuid ) replicate_model = REPLICATE_MODEL.get_model_by_db_obj(model_dict[model_name]) @@ -96,12 +95,11 @@ def style_explorer_element(shot_uuid): "inference_type": InferenceType.GALLERY_IMAGE_GENERATION.value, "output": output, "log_uuid": log.uuid, - "project_uuid": shot.project.uuid, - "shot_uuid": shot.uuid + "project_uuid": project_uuid } process_inference_output(**inference_data) - project_setting = data_repo.get_project_setting(shot.project.uuid) + project_setting = data_repo.get_project_setting(project_uuid) st.markdown("***") page_number = st.radio("Select page", options=range(1, project_setting.total_gallery_pages + 1), horizontal=True) @@ -112,7 +110,7 @@ def style_explorer_element(shot_uuid): gallery_image_list, res_payload = data_repo.get_all_file_list( file_type=InternalFileType.IMAGE.value, tag=InternalFileTag.GALLERY_IMAGE.value, - project_id=shot.project.uuid, + project_id=project_uuid, page=page_number, data_per_page=num_items_per_page, sort_order=SortOrder.DESCENDING.value # newly created images appear first @@ -147,14 +145,19 @@ def style_explorer_element(shot_uuid): else: st.warning("No data found") - if st.button(f"Add to timeline", key=f"{gallery_image_list[i + j].uuid}", help="Promote this variant to the primary image", use_container_width=True): - pil_image = generate_pil_image(gallery_image_list[i + j].location) - add_key_frame(pil_image, False, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False) - - # removing this from the gallery view - data_repo.update_file(gallery_image_list[i + j].uuid, tag="") - - st.rerun() + with st.expander('Add to shot', False): + shot_number = st.number_input(f"Shot # (out of {len(shot_list)})", 1, + len(shot_list), value=1, + step=1, key=f"shot_frame_{gallery_image_list[i + j].uuid}") + if st.button(f"Add to shot", key=f"{gallery_image_list[i + j].uuid}", help="Promote this variant to the primary image", use_container_width=True): + pil_image = generate_pil_image(gallery_image_list[i + j].location) + shot_uuid = shot_list[shot_number - 1].uuid + add_key_frame(pil_image, False, shot_uuid, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False) + + # removing this from the gallery view + data_repo.update_file(gallery_image_list[i + j].uuid, tag="") + + st.rerun() st.markdown("***") else: diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index b015ab64..9c3c69ef 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -1,44 +1,7 @@ import streamlit as st -from ui_components.widgets.frame_movement_widgets import change_frame_position_input, delete_frame_button, jump_to_single_frame_view_button, move_frame_back_button, move_frame_forward_button, replace_image_widget -from ui_components.widgets.image_carousal import display_image +from ui_components.widgets.shot_view import shot_keyframe_element, shot_video_element from utils.data_repo.data_repo import DataRepo -from ui_components.constants import WorkflowStageType from utils import st_memory -from ui_components.methods.file_methods import generate_pil_image -from ui_components.widgets.add_key_frame_element import add_key_frame - - -def timeline_view_buttons(i, j, timing_list, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle, project_uuid): - if replace_image_widget_toggle: - replace_image_widget(timing_list[i + j].uuid, stage=WorkflowStageType.STYLED.value,options=["Uploaded Frame"]) - - btn1, btn2, btn3, btn4 = st.columns([1, 1, 1, 1]) - - if move_frames_toggle: - with btn1: - move_frame_back_button(timing_list[i + j].uuid, "side-to-side") - with btn2: - move_frame_forward_button(timing_list[i + j].uuid, "side-to-side") - - if copy_frame_toggle: - with btn3: - if st.button("🔁", key=f"copy_frame_{timing_list[i + j].uuid}"): - pil_image = generate_pil_image(timing_list[i + j].primary_image.location) - add_key_frame(pil_image, False, timing_list[i + j].aux_frame_index+1, refresh_state=False) - - st.rerun() - - if delete_frames_toggle: - with btn4: - delete_frame_button(timing_list[i + j].uuid) - - if change_position_toggle: - change_frame_position_input(timing_list[i + j].uuid, "side-to-side") - - if time_setter_toggle or duration_setter_toggle or replace_image_widget_toggle or move_frames_toggle or delete_frames_toggle or change_position_toggle: - st.caption("--") - - jump_to_single_frame_view_button(i + j + 1, timing_list, 'timeline_btn_'+str(timing_list[i+j].uuid)) def timeline_view(shot_uuid, stage): @@ -60,10 +23,8 @@ def timeline_view(shot_uuid, stage): time_setter_toggle = replace_image_widget_toggle = duration_setter_toggle = copy_frame_toggle = move_frames_toggle = delete_frames_toggle = change_position_toggle = True else: - with col2: - time_setter_toggle = st_memory.toggle("Time Setter", value=True, key="time_setter_toggle") - delete_frames_toggle = st_memory.toggle("Delete Frames", value=True, key="delete_frames_toggle") - duration_setter_toggle = st_memory.toggle("Duration Setter", value=False, key="duration_setter_toggle") + with col2: + delete_frames_toggle = st_memory.toggle("Delete Frames", value=True, key="delete_frames_toggle") copy_frame_toggle = st_memory.toggle("Copy Frame", value=False, key="copy_frame_toggle") with col3: move_frames_toggle = st_memory.toggle("Move Frames", value=True, key="move_frames_toggle") @@ -72,24 +33,35 @@ def timeline_view(shot_uuid, stage): with header_col_3: items_per_row = st_memory.slider("How many frames per row?", min_value=1, max_value=10, value=5, step=1, key="items_per_row_slider") - - st.markdown("***") - total_count = len(shot_list) - for i in range(0, total_count, items_per_row): # Step of items_per_row for grid - grid = st.columns(items_per_row) # Create items_per_row columns for grid - for j in range(items_per_row): - if i + j < total_count: # Check if index is within range - with grid[j]: - display_number = i + j + 1 - if stage == 'Key Frames': - display_image(timing_uuid=shot_list[i + j].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) - elif stage == 'Videos': - if shot_list[i + j].main_clip: - st.video(shot_list[i + j].main_clip.location) - else: - st.error("No video found for this frame.") - with st.expander(f'Frame #{display_number}', True): - timeline_view_buttons(i, j, shot_list, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle, project_uuid) + btn_data = { + "replace_image_widget_toggle": replace_image_widget_toggle, + "copy_frame_toggle": copy_frame_toggle, + "move_frames_toggle": move_frames_toggle, + "delete_frames_toggle": delete_frames_toggle, + "change_position_toggle": change_position_toggle + } + if stage == 'Key Frames': + for shot in shot_list: + shot_keyframe_element(shot.uuid, items_per_row, **btn_data) + else: + for idx, shot in enumerate(shot_list): + shot_video_element(shot.uuid, idx, items_per_row) + + # for i in range(0, total_count, items_per_row): # Step of items_per_row for grid + # grid = st.columns(items_per_row) # Create items_per_row columns for grid + # for j in range(items_per_row): + # if i + j < total_count: # Check if index is within range + # with grid[j]: + # display_number = i + j + 1 + # if stage == 'Key Frames': + # display_image(timing_uuid=shot_list[i + j].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) + # elif stage == 'Videos': + # if shot_list[i + j].main_clip: + # st.video(shot_list[i + j].main_clip.location) + # else: + # st.error("No video found for this frame.") + # with st.expander(f'Frame #{display_number}', True): + # timeline_view_buttons(i, j, shot_list, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle, project_uuid) From ccd5cc0021db92b6f70080c78eded3953449983c Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sun, 29 Oct 2023 16:53:49 +0530 Subject: [PATCH 127/164] wip: frame movement fixed --- backend/db_repo.py | 8 +++++--- backend/models.py | 2 ++ backend/serializers/dto.py | 4 +++- banodoco_settings.py | 1 - ui_components/methods/common_methods.py | 11 +++++++++++ ui_components/widgets/add_key_frame_element.py | 6 +++--- ui_components/widgets/shot_view.py | 17 ++++++++++------- ui_components/widgets/timeline_view.py | 10 ++++++++-- utils/data_repo/data_repo.py | 4 ++-- 9 files changed, 44 insertions(+), 19 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index 41ec6f95..07822255 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -807,7 +807,7 @@ def create_timing(self, **kwargs): attributes._data['shot_id'] = shot.id if 'aux_frame_index' not in attributes.data or attributes.data['aux_frame_index'] == None: - attributes._data['aux_frame_index'] = Timing.objects.filter(project_id=attributes.data['project_id'], is_disabled=False).count() + attributes._data['aux_frame_index'] = Timing.objects.filter(shot_id=attributes.data['shot_id'], is_disabled=False).count() if 'model_id' in attributes.data: if attributes.data['model_id'] != None: @@ -1448,13 +1448,15 @@ def get_shot_list(self, project_uuid): return InternalResponse(payload, 'shot list fetched successfully', True) - def create_shot(self, project_uuid, name, duration, meta_data="", desc=""): + def create_shot(self, project_uuid, duration, name=None, meta_data="", desc=""): project = Project.objects.filter(uuid=project_uuid, is_disabled=False).first() if not project: return InternalResponse({}, 'invalid project uuid', False) shot_number = Shot.objects.filter(project_id=project.id, is_disabled=False).count() + 1 - + if not name: + name = "Shot " + str(shot_number) + shot_data = { "name" : name, "desc" : desc, diff --git a/backend/models.py b/backend/models.py index 3fe4f5ce..975f4314 100644 --- a/backend/models.py +++ b/backend/models.py @@ -207,6 +207,7 @@ def save(self, *args, **kwargs): else: shots_to_move = Shot.objects.filter(project_id=self.project_id, shot_idx__gte=self.shot_idx, \ shot_idx__lt=self.old_shot_idx, is_disabled=False).order_by('shot_idx') + shots_to_move.update(shot_idx=F('shot_idx') + 1) super(Shot, self).save(*args, **kwargs) @@ -266,6 +267,7 @@ def save(self, *args, **kwargs): else: timings_to_move = Timing.objects.filter(shot_id=self.shot_id, aux_frame_index__gte=self.aux_frame_index, \ aux_frame_index__lt=self.old_aux_frame_index, is_disabled=False).order_by('aux_frame_index') + timings_to_move.update(aux_frame_index=F('aux_frame_index') + 1) super().save(*args, **kwargs) diff --git a/backend/serializers/dto.py b/backend/serializers/dto.py index aa2da671..4fac24df 100644 --- a/backend/serializers/dto.py +++ b/backend/serializers/dto.py @@ -197,7 +197,9 @@ class Meta: def get_timing_list(self, obj): timing_list = self.context.get("timing_list", []) - return [TimingDto(timing).data for timing in timing_list] + timing_list = [TimingDto(timing).data for timing in timing_list if str(timing.shot.uuid) == str(obj.uuid)] + timing_list.sort(key=lambda x: x['aux_frame_index']) + return timing_list def get_interpolated_clip_list(self, obj): id_list = json.loads(obj.interpolated_clip_list) if obj.interpolated_clip_list else [] diff --git a/banodoco_settings.py b/banodoco_settings.py index 7b1d6241..aba57f22 100644 --- a/banodoco_settings.py +++ b/banodoco_settings.py @@ -87,7 +87,6 @@ def create_new_project(user: InternalUserObject, project_name: str, width=512, h # create a default first shot shot_data = { - "name": "Shot 1", "project_uuid": project.uuid, "desc": "", "duration": 2 diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index b9ee1f28..a828558c 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -399,6 +399,17 @@ def create_or_update_mask(timing_uuid, image) -> InternalFileObject: timing = data_repo.get_timing_from_uuid(timing_uuid) return timing.mask.location +def add_new_shot(project_uuid): + data_repo = DataRepo() + + shot_data = { + "project_uuid": project_uuid, + "desc": "", + "duration": 2 + } + + _ = data_repo.create_shot(**shot_data) + # adds the image file in variant (alternative images) list def add_image_variant(image_file_uuid: str, timing_uuid: str): data_repo = DataRepo() diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index 06f1e866..1cbeeb1a 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -108,11 +108,11 @@ def add_key_frame(selected_image, inherit_styling_settings, shot_uuid, target_fr timing_list = data_repo.get_timing_list_from_shot(shot_uuid) if selected_image: - save_uploaded_image(selected_image, shot_uuid, timing_list[index_of_current_item].uuid, WorkflowStageType.SOURCE.value) - save_uploaded_image(selected_image, shot_uuid, timing_list[index_of_current_item].uuid, WorkflowStageType.STYLED.value) + save_uploaded_image(selected_image, shot_uuid, timing_list[index_of_current_item - 1].uuid, WorkflowStageType.SOURCE.value) + save_uploaded_image(selected_image, shot_uuid, timing_list[index_of_current_item - 1].uuid, WorkflowStageType.STYLED.value) if inherit_styling_settings == "Yes": - clone_styling_settings(index_of_current_item - 1, timing_list[index_of_current_item].uuid) + clone_styling_settings(index_of_current_item - 1, timing_list[index_of_current_item - 1].uuid) if len(timing_list) == 1: st.session_state['current_frame_index'] = 1 diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 851446bb..6ac07885 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -16,13 +16,16 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): timing_list: List[InternalFrameTimingObject] = shot.timing_list grid = st.columns(items_per_row) - for idx, timing in enumerate(timing_list): - with grid[idx%items_per_row]: - if timing.primary_image and timing.primary_image.location: - st.image(timing.primary_image.location, use_column_width=True) - timeline_view_buttons(idx, shot_uuid, **kwargs) - else: - st.warning("No primary image present") + if timing_list and len(timing_list): + for idx, timing in enumerate(timing_list): + with grid[idx%items_per_row]: + if timing.primary_image and timing.primary_image.location: + st.image(timing.primary_image.location, use_column_width=True) + timeline_view_buttons(idx, shot_uuid, **kwargs) + else: + st.warning("No primary image present") + else: + st.warning("No keyframes present") def shot_video_element(shot_uuid, idx, items_per_row): data_repo = DataRepo() diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index 9c3c69ef..4ef841ea 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -1,4 +1,5 @@ import streamlit as st +from ui_components.methods.common_methods import add_new_shot from ui_components.widgets.shot_view import shot_keyframe_element, shot_video_element from utils.data_repo.data_repo import DataRepo from utils import st_memory @@ -11,8 +12,13 @@ def timeline_view(shot_uuid, stage): st.markdown("***") - _, header_col_2, header_col_3 = st.columns([1.5,4,1.5]) + header_col_1, header_col_2, header_col_3 = st.columns([1.5,4,1.5]) + with header_col_1: + if st.button('Add new shot'): + add_new_shot(shot.project.uuid) + st.rerun() + with header_col_2: col1, col2, col3 = st.columns(3) @@ -20,7 +26,7 @@ def timeline_view(shot_uuid, stage): expand_all = st_memory.toggle("Expand All", key="expand_all",value=False) if expand_all: - time_setter_toggle = replace_image_widget_toggle = duration_setter_toggle = copy_frame_toggle = move_frames_toggle = delete_frames_toggle = change_position_toggle = True + replace_image_widget_toggle = copy_frame_toggle = move_frames_toggle = delete_frames_toggle = change_position_toggle = True else: with col2: diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index e8a8fed2..194ca99c 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -414,8 +414,8 @@ def get_shot_list(self, project_uuid): shot_list = res.data['data'] if res.status else None return [InternalShotObject(**shot) for shot in shot_list] if shot_list else [] - def create_shot(self, project_uuid, name, duration, meta_data="", desc=""): - res = self.db_repo.create_shot(project_uuid, name, duration, meta_data, desc) + def create_shot(self, project_uuid, duration, name="", meta_data="", desc=""): + res = self.db_repo.create_shot(project_uuid, duration, name, meta_data, desc) shot = res.data['data'] if res.status else None return InternalShotObject(**shot) if shot else None From ebf391583ab42465f65c348575f10a7f3206d963 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sun, 29 Oct 2023 21:00:42 +0530 Subject: [PATCH 128/164] frame addition fix --- ui_components/methods/common_methods.py | 4 +-- .../widgets/add_key_frame_element.py | 26 +++++++------------ .../widgets/animation_style_element.py | 3 --- .../widgets/frame_movement_widgets.py | 1 + ui_components/widgets/shot_view.py | 26 ++++++++++++------- ui_components/widgets/timeline_view.py | 4 ++- 6 files changed, 31 insertions(+), 33 deletions(-) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index a828558c..05b405d0 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -533,13 +533,13 @@ def get_audio_bytes_for_slice(timing_uuid): return audio_bytes -def create_frame_inside_shot(shot_uuid, index_of_frame): +def create_frame_inside_shot(shot_uuid, aux_frame_index): data_repo = DataRepo() timing_data = { "shot_id": shot_uuid, "animation_style": AnimationStyleType.INTERPOLATION.value, - "aux_frame_index": index_of_frame + "aux_frame_index": aux_frame_index } timing: InternalFrameTimingObject = data_repo.create_timing(**timing_data) diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index 1cbeeb1a..a83380bb 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -94,32 +94,24 @@ def add_key_frame(selected_image, inherit_styling_settings, shot_uuid, target_fr data_repo = DataRepo() timing_list = data_repo.get_timing_list_from_shot(shot_uuid) - if len(timing_list) == 0: - index_of_current_item = 1 - else: - target_frame_position = st.session_state['current_frame_index'] if target_frame_position is None else target_frame_position - index_of_current_item = min(len(timing_list), target_frame_position) - - - if len(timing_list) == 0: - _ = create_frame_inside_shot(shot_uuid, 0) - else: - _ = create_frame_inside_shot(shot_uuid, index_of_current_item) + target_frame_position = st.session_state['current_frame_index'] if target_frame_position is None else target_frame_position + target_aux_frame_index = min(len(timing_list), target_frame_position) + _ = create_frame_inside_shot(shot_uuid, target_aux_frame_index) timing_list = data_repo.get_timing_list_from_shot(shot_uuid) if selected_image: - save_uploaded_image(selected_image, shot_uuid, timing_list[index_of_current_item - 1].uuid, WorkflowStageType.SOURCE.value) - save_uploaded_image(selected_image, shot_uuid, timing_list[index_of_current_item - 1].uuid, WorkflowStageType.STYLED.value) + save_uploaded_image(selected_image, shot_uuid, timing_list[target_aux_frame_index].uuid, WorkflowStageType.SOURCE.value) + save_uploaded_image(selected_image, shot_uuid, timing_list[target_aux_frame_index].uuid, WorkflowStageType.STYLED.value) - if inherit_styling_settings == "Yes": - clone_styling_settings(index_of_current_item - 1, timing_list[index_of_current_item - 1].uuid) + if inherit_styling_settings == "Yes" and st.session_state['current_frame_index']: + clone_styling_settings(st.session_state['current_frame_index'] - 1, timing_list[target_aux_frame_index].uuid) if len(timing_list) == 1: st.session_state['current_frame_index'] = 1 st.session_state['current_frame_uuid'] = timing_list[0].uuid else: - st.session_state['prev_frame_index'] = min(len(timing_list), index_of_current_item + 1) - st.session_state['current_frame_index'] = min(len(timing_list), index_of_current_item + 1) + st.session_state['prev_frame_index'] = min(len(timing_list), target_aux_frame_index + 1) + st.session_state['current_frame_index'] = min(len(timing_list), target_aux_frame_index + 1) st.session_state['current_frame_uuid'] = timing_list[st.session_state['current_frame_index'] - 1].uuid st.session_state['page'] = CreativeProcessType.STYLING.value diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 49a271d9..4a1a214e 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -9,9 +9,6 @@ def animation_style_element(shot_uuid): motion_modules = AnimateDiffCheckpoint.get_name_list() - data_repo = DataRepo() - shot = data_repo.get_shot_from_uuid(shot_uuid) - project_settings = data_repo.get_project_setting(shot.project.uuid) variant_count = 1 current_animation_style = AnimationStyleType.INTERPOLATION.value # setting a default value diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index 066c4485..57578b06 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -92,6 +92,7 @@ def delete_frame(timing_uuid): if len(timing_list) == 1: st.error("can't delete the only image present in the shot") + time.sleep(0.3) return data_repo.delete_timing_from_uuid(timing.uuid) diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 6ac07885..b2574a5c 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -1,3 +1,4 @@ +import time from typing import List import streamlit as st from ui_components.constants import WorkflowStageType @@ -27,20 +28,25 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): else: st.warning("No keyframes present") -def shot_video_element(shot_uuid, idx, items_per_row): +def shot_video_element(shot_uuid): data_repo = DataRepo() shot: InternalShotObject = data_repo.get_shot_from_uuid(shot_uuid) - grid = st.columns(items_per_row) - with grid[idx%items_per_row]: - st.title(shot.name) - if shot.main_clip and shot.main_clip.location: - st.video(shot.main_clip.location) - else: - st.warning("No video present") + st.title(shot.name) + if shot.main_clip and shot.main_clip.location: + st.video(shot.main_clip.location) + else: + st.warning("No video present") + + if st.button("Generate video", key=shot.uuid): + if not (shot.timing_list and len(shot.timing_list) and len(shot.timing_list) > 1): + st.error("Atleast two frames are required") + time.sleep(0.3) + return + + # NOTE: @peter let me know if you want me to complete this + - if st.button("Generate video", key=shot.uuid): - pass def timeline_view_buttons(idx, shot_uuid, replace_image_widget_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle): data_repo = DataRepo() diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index 4ef841ea..64675145 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -51,8 +51,10 @@ def timeline_view(shot_uuid, stage): for shot in shot_list: shot_keyframe_element(shot.uuid, items_per_row, **btn_data) else: + grid = st.columns(items_per_row) for idx, shot in enumerate(shot_list): - shot_video_element(shot.uuid, idx, items_per_row) + with grid[idx%items_per_row]: + shot_video_element(shot.uuid) # for i in range(0, total_count, items_per_row): # Step of items_per_row for grid # grid = st.columns(items_per_row) # Create items_per_row columns for grid From 944776a9bd8aba13b6260f584eb89cd11e13ca47 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sun, 29 Oct 2023 21:29:05 +0530 Subject: [PATCH 129/164] shot movement added --- backend/db_repo.py | 6 ++++-- ui_components/widgets/shot_view.py | 17 +++++++++++++++++ utils/data_repo/api_repo.py | 11 ++--------- utils/data_repo/data_repo.py | 4 ++-- 4 files changed, 25 insertions(+), 13 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index 07822255..e1d08a0f 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -1438,7 +1438,7 @@ def get_shot_list(self, project_uuid): if not project: return InternalResponse({}, 'invalid project uuid', False) - shot_list: List[Shot] = Shot.objects.filter(project_id=project.id, is_disabled=False).all() + shot_list: List[Shot] = Shot.objects.filter(project_id=project.id, is_disabled=False).order_by('shot_idx').all() timing_list = Timing.objects.filter(is_disabled=False).all() context = {'timing_list': timing_list} @@ -1477,7 +1477,7 @@ def create_shot(self, project_uuid, duration, name=None, meta_data="", desc=""): return InternalResponse(payload, 'shot created successfully', True) - def update_shot(self, shot_uuid, name=None, duration=None, meta_data=None, desc=None): + def update_shot(self, shot_uuid, shot_idx=None, name=None, duration=None, meta_data=None, desc=None): shot: Shot = Shot.objects.filter(uuid=shot_uuid, is_disabled=False).first() if not shot: return InternalResponse({}, 'invalid shot uuid', False) @@ -1491,6 +1491,8 @@ def update_shot(self, shot_uuid, name=None, duration=None, meta_data=None, desc= update_data['meta_data'] = meta_data if desc != None: update_data['desc'] = desc + if shot_idx != None: + update_data['shot_idx'] = shot_idx for k,v in update_data.items(): setattr(shot, k, v) diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index b2574a5c..7adb4039 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -16,6 +16,23 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): st.title(shot.name) timing_list: List[InternalFrameTimingObject] = shot.timing_list + if st.button("Move shot down", key=f'shot_down_movement_{shot.uuid}'): + shot_list = data_repo.get_shot_list(shot.project.uuid) + if shot.shot_idx < len(shot_list): + data_repo.update_shot(shot_uuid, shot_idx=shot.shot_idx+1) + else: + st.error("this is the last shot") + time.sleep(0.3) + st.rerun() + + if st.button("Move shot up", key=f'shot_up_movement_{shot.uuid}'): + if shot.shot_idx > 0: + data_repo.update_shot(shot_uuid, shot_idx=shot.shot_idx-1) + else: + st.error("this is the first shot") + time.sleep(0.3) + st.rerun() + grid = st.columns(items_per_row) if timing_list and len(timing_list): for idx, timing in enumerate(timing_list): diff --git a/utils/data_repo/api_repo.py b/utils/data_repo/api_repo.py index 8ec7a5a2..39e3fd0f 100644 --- a/utils/data_repo/api_repo.py +++ b/utils/data_repo/api_repo.py @@ -484,15 +484,8 @@ def create_shot(self, project_uuid, name, duration, meta_data="", desc=""): res = self.http_post(self.SHOT_URL, data=data) return InternalResponse(res['payload'], 'success', res['status']) - def update_shot(self, shot_uuid, name=None, duration=None, meta_data=None, desc=None): - update_data = { - 'uuid': shot_uuid, - 'name': name, - 'duration': duration, - 'meta_data': meta_data, - 'desc': desc - } - res = self.http_put(self.SHOT_URL, data=update_data) + def update_shot(self, shot_uuid, **kwargs): + res = self.http_put(self.SHOT_URL, data=kwargs) return InternalResponse(res['payload'], 'success', res['status']) def delete_shot(self, shot_uuid): diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 194ca99c..90823d17 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -419,8 +419,8 @@ def create_shot(self, project_uuid, duration, name="", meta_data="", desc=""): shot = res.data['data'] if res.status else None return InternalShotObject(**shot) if shot else None - def update_shot(self, shot_uuid, name=None, duration=None, meta_data=None, desc=None): - res = self.db_repo.update_shot(shot_uuid, name, duration, meta_data, desc) + def update_shot(self, shot_uuid, shot_idx=None, name=None, duration=None, meta_data=None, desc=None): + res = self.db_repo.update_shot(shot_uuid, shot_idx=shot_idx, name=name, duration=duration, meta_data=meta_data, desc=desc) return res.status def delete_shot(self, shot_uuid): From 0948cd384605ebe8aa944730cc86b339d739622f Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Mon, 30 Oct 2023 18:38:01 +0530 Subject: [PATCH 130/164] shot selection added --- ui_components/widgets/frame_selector.py | 31 ++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index 5f0e185e..0e8a9c61 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -10,15 +10,28 @@ def frame_selector_widget(): time1, time2 = st.columns([1,1]) timing_list = data_repo.get_timing_list_from_shot(shot_uuid=st.session_state["shot_uuid"]) + shot = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) + shot_list = data_repo.get_shot_list(shot.project.uuid) len_timing_list = len(timing_list) if len(timing_list) > 0 else 1.0 st.progress(st.session_state['current_frame_index'] / len_timing_list) + with time1: + if 'prev_shot_index' not in st.session_state: + st.session_state['prev_shot_index'] = shot.shot_idx + + st.session_state['current_shot_index'] = st.number_input(f"Shot # (out of {len(shot_list)})", 1, + len(shot_list), value=st.session_state['prev_shot_index'], + step=1, key="current_shot_sidebar_selector") + + update_current_shot_index(st.session_state['current_shot_index']) + + with time2: if 'prev_frame_index' not in st.session_state: st.session_state['prev_frame_index'] = 1 st.session_state['current_frame_index'] = st.number_input(f"Key frame # (out of {len(timing_list)})", 1, len(timing_list), value=st.session_state['prev_frame_index'], - step=1, key="which_image_selector") + step=1, key="current_frame_sidebar_selector") update_current_frame_index(st.session_state['current_frame_index']) @@ -63,4 +76,20 @@ def update_current_frame_index(index): st.session_state['frame_styling_view_type_index'] = 0 st.session_state['frame_styling_view_type'] = "Individual View" + st.rerun() + + +def update_current_shot_index(index): + data_repo = DataRepo() + shot_list = data_repo.get_shot_list(project_uuid=st.session_state["project_uuid"]) + + st.session_state['shot_uuid'] = shot_list[index - 1].uuid + + if st.session_state['prev_shot_index'] != index: + st.session_state['prev_shot_index'] = index + st.session_state['shot_uuid'] = shot_list[index - 1].uuid + st.session_state['reset_canvas'] = True + st.session_state['frame_styling_view_type_index'] = 0 + st.session_state['frame_styling_view_type'] = "Individual View" + st.rerun() From 9940d2a5b7151e5cb51d507cb29449533b3a81d1 Mon Sep 17 00:00:00 2001 From: peter942 Date: Mon, 30 Oct 2023 18:34:24 +0000 Subject: [PATCH 131/164] Restructing UX for shots --- .../components/frame_styling_page.py | 11 +- ui_components/constants.py | 2 +- ui_components/methods/common_methods.py | 6 +- .../widgets/frame_movement_widgets.py | 1 + ui_components/widgets/frame_selector.py | 87 ++++++++----- ui_components/widgets/shot_view.py | 122 +++++++++++++----- ui_components/widgets/timeline_view.py | 30 +++-- 7 files changed, 171 insertions(+), 88 deletions(-) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 59a7439c..a1b25238 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -55,8 +55,11 @@ def frame_styling_page(shot_uuid: str): st.markdown( f"#### :red[{st.session_state['main_view_type']}] > **:green[{st.session_state['frame_styling_view_type']}]** > :orange[{st.session_state['page']}]") else: - st.markdown( - f"#### :red[{st.session_state['main_view_type']}] > **:green[{st.session_state['frame_styling_view_type']}]** > :orange[{st.session_state['page']}] > :blue[Frame #{st.session_state['current_frame_index']}]") + if st.session_state['page'] == "Key Frames": + st.markdown( + f"#### :red[{st.session_state['main_view_type']}] > **:green[{st.session_state['frame_styling_view_type']}]** > :orange[{st.session_state['page']}] > :blue[Frame #{st.session_state['current_frame_index']}]") + else: + st.markdown(f"#### :red[{st.session_state['main_view_type']}] > **:green[{st.session_state['frame_styling_view_type']}]** > :orange[{st.session_state['page']}] > :blue[Shot #{st.session_state['current_frame_index']}]") project_settings = data_repo.get_project_setting(shot.project.uuid) @@ -162,8 +165,8 @@ def frame_styling_page(shot_uuid: str): with st.expander("🌀 Batch Styling", expanded=False): styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.LIST.value) timeline_view(shot_uuid, "Key Frames") - elif st.session_state['page'] == "Videos": - timeline_view(shot_uuid, "Videos") + elif st.session_state['page'] == "Shots": + timeline_view(shot_uuid, "Shots") # -------------------- SIDEBAR NAVIGATION -------------------------- with st.sidebar: diff --git a/ui_components/constants.py b/ui_components/constants.py index 99fb7347..9bcf836d 100644 --- a/ui_components/constants.py +++ b/ui_components/constants.py @@ -14,7 +14,7 @@ class VideoQuality(ExtendedEnum): class CreativeProcessType(ExtendedEnum): STYLING = "Key Frames" - MOTION = "Videos" + MOTION = "Shots" class DefaultTimingStyleParams: prompt = "" diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 05b405d0..00e663dc 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -78,10 +78,10 @@ def save_new_image(img: Union[Image.Image, str, np.ndarray, io.BytesIO], shot_uu def save_and_promote_image(image, project_uuid, frame_uuid, save_type): data_repo = DataRepo() - + + try: - saved_image = save_new_image(image, project_uuid) - + saved_image = save_new_image(image, project_uuid) # Update records based on save_type if save_type == "source": data_repo.update_specific_timing(frame_uuid, source_image_id=saved_image.uuid) diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index 57578b06..e5810caa 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -158,6 +158,7 @@ def replace_image_widget(timing_uuid, stage, options=["Uploaded Frame","Other Fr else: replacement_frame = st.file_uploader("Upload Styled Image", type=[ "png", "jpeg"], accept_multiple_files=False, key=f"replacement_frame_upload_{stage}_{timing_uuid}") + st.write(replacement_frame) if replacement_frame != None: if st.button("Replace frame", disabled=False): timing = data_repo.get_timing_from_uuid(timing.uuid) diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index 0e8a9c61..c301d8f2 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -1,6 +1,7 @@ import streamlit as st from ui_components.widgets.frame_movement_widgets import delete_frame, replace_image_widget from ui_components.widgets.image_carousal import display_image +from ui_components.models import InternalFrameTimingObject, InternalShotObject from utils.data_repo.data_repo import DataRepo from ui_components.constants import WorkflowStageType @@ -24,43 +25,63 @@ def frame_selector_widget(): step=1, key="current_shot_sidebar_selector") update_current_shot_index(st.session_state['current_shot_index']) + if st.session_state['page'] == "Key Frames": + with time2: + if 'prev_frame_index' not in st.session_state: + st.session_state['prev_frame_index'] = 1 + + st.session_state['current_frame_index'] = st.number_input(f"Key frame # (out of {len(timing_list)})", 1, + len(timing_list), value=st.session_state['prev_frame_index'], + step=1, key="current_frame_sidebar_selector") + + update_current_frame_index(st.session_state['current_frame_index']) + + with st.expander(f"🖼️ Frame #{st.session_state['current_frame_index']} Details"): + a1, a2 = st.columns([1,1]) + with a1: + st.warning(f"Guidance Image:") + display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value, clickable=False) - with time2: - if 'prev_frame_index' not in st.session_state: - st.session_state['prev_frame_index'] = 1 + with a2: + st.success(f"Main Styled Image:") + display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) - st.session_state['current_frame_index'] = st.number_input(f"Key frame # (out of {len(timing_list)})", 1, - len(timing_list), value=st.session_state['prev_frame_index'], - step=1, key="current_frame_sidebar_selector") - - update_current_frame_index(st.session_state['current_frame_index']) - - with st.expander(f"🖼️ Frame #{st.session_state['current_frame_index']} Details"): - a1, a2 = st.columns([1,1]) - with a1: - st.warning(f"Guidance Image:") - display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value, clickable=False) - - with a2: - st.success(f"Main Styled Image:") - display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) - - st.markdown("---") - - b1, b2 = st.columns([1,1]) - with b1: - st.caption("Replace guidance image") - replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value) - - with b2: - st.caption("Replace styled image") - replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value) + st.markdown("---") + + b1, b2 = st.columns([1,1]) + with b1: + st.caption("Replace guidance image") + replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value) + + with b2: + st.caption("Replace styled image") + replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value) + + st.markdown("---") + + if st.button("Delete key frame"): + delete_frame(st.session_state['current_frame_uuid']) + st.rerun() + + else: + shot_list = data_repo.get_shot_list(shot.project.uuid) + shot: InternalShotObject = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) + with st.expander(f"🎬 {shot.name} Details"): - st.markdown("---") + # shot = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) - if st.button("Delete key frame"): - delete_frame(st.session_state['current_frame_uuid']) - st.rerun() + timing_list: List[InternalFrameTimingObject] = shot.timing_list + + if timing_list and len(timing_list): + grid = st.columns(3) # Change to 3 columns + for idx, timing in enumerate(timing_list): + with grid[idx % 3]: # Change to 3 columns + if timing.primary_image and timing.primary_image.location: + st.image(timing.primary_image.location, use_column_width=True) + else: + st.warning("No primary image present") + else: + st.warning("No keyframes present") def update_current_frame_index(index): diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 7adb4039..a52ab425 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -8,64 +8,116 @@ from ui_components.widgets.add_key_frame_element import add_key_frame from ui_components.widgets.frame_movement_widgets import change_frame_position_input, delete_frame_button, jump_to_single_frame_view_button, move_frame_back_button, move_frame_forward_button, replace_image_widget from utils.data_repo.data_repo import DataRepo +from utils import st_memory def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): data_repo = DataRepo() shot: InternalShotObject = data_repo.get_shot_from_uuid(shot_uuid) + + if "open_shot" not in st.session_state: + st.session_state["open_shot"] = None + + # st.markdown(f"### {shot.name}", expanded=True) - st.title(shot.name) timing_list: List[InternalFrameTimingObject] = shot.timing_list + + with st.expander(f"{shot.name}", expanded=True): - if st.button("Move shot down", key=f'shot_down_movement_{shot.uuid}'): - shot_list = data_repo.get_shot_list(shot.project.uuid) - if shot.shot_idx < len(shot_list): - data_repo.update_shot(shot_uuid, shot_idx=shot.shot_idx+1) + if st.session_state["open_shot"] != shot.shot_idx: + if st.toggle("Open shot", key=f"shot_{shot.shot_idx}"): + st.session_state["open_shot"] = shot.shot_idx + st.experimental_rerun() else: - st.error("this is the last shot") - time.sleep(0.3) - st.rerun() + if not st.toggle("Open shot", key=f"close_shot_{shot.shot_idx}", value=True): + st.session_state["open_shot"] = None + st.experimental_rerun() + + if st.session_state["open_shot"] == shot.shot_idx: + + header_col_1, header_col_2 = st.columns([1.5,4]) + + with header_col_1: + name = st.text_input("Update name:", value=shot.name) + duration = st.number_input("Duration:") + + with header_col_2: + col1, col2, col3, col4 = st.columns(4) - if st.button("Move shot up", key=f'shot_up_movement_{shot.uuid}'): - if shot.shot_idx > 0: - data_repo.update_shot(shot_uuid, shot_idx=shot.shot_idx-1) + with col2: + delete_frames_toggle = st_memory.toggle("Delete Frames", value=True, key="delete_frames_toggle") + copy_frame_toggle = st_memory.toggle("Copy Frame", value=True, key="copy_frame_toggle") + with col3: + move_frames_toggle = st_memory.toggle("Move Frames", value=True, key="move_frames_toggle") + replace_image_widget_toggle = st_memory.toggle("Replace Image", value=False, key="replace_image_widget_toggle") + + with col4: + change_shot_toggle = st_memory.toggle("Change Shot", value=False, key="change_shot_toggle") + + st.markdown("***") + + + grid = st.columns(items_per_row) + if timing_list and len(timing_list): + for idx, timing in enumerate(timing_list): + with grid[idx%items_per_row]: + if timing.primary_image and timing.primary_image.location: + st.image(timing.primary_image.location, use_column_width=True) + if st.session_state["open_shot"] == shot.shot_idx: + timeline_view_buttons(idx, shot_uuid, replace_image_widget_toggle, copy_frame_toggle, move_frames_toggle,delete_frames_toggle, change_shot_toggle) + else: + st.warning("No primary image present") else: - st.error("this is the first shot") - time.sleep(0.3) - st.rerun() - - grid = st.columns(items_per_row) - if timing_list and len(timing_list): - for idx, timing in enumerate(timing_list): - with grid[idx%items_per_row]: - if timing.primary_image and timing.primary_image.location: - st.image(timing.primary_image.location, use_column_width=True) - timeline_view_buttons(idx, shot_uuid, **kwargs) + st.warning("No keyframes present") + + st.markdown("***") + + + if st.session_state["open_shot"] == shot.shot_idx: + bottom1, bottom2, bottom3 = st.columns([1,2,1]) + with bottom1: + confirm_delete = st.checkbox("I know that this will delete all the frames and videos within") + if confirm_delete: + if st.button("Delete frame"): + st.success("Done!") else: - st.warning("No primary image present") - else: - st.warning("No keyframes present") + st.button("Delete frame", disabled=True, help="Check the box above to enable the delete bottom.") + with bottom3: + if st.button("Move shot down", key=f'shot_down_movement_{shot.uuid}'): + shot_list = data_repo.get_shot_list(shot.project.uuid) + if shot.shot_idx < len(shot_list): + data_repo.update_shot(shot_uuid, shot_idx=shot.shot_idx+1) + else: + st.error("This is the last shot") + time.sleep(0.3) + st.rerun() + + if st.button("Move shot up", key=f'shot_up_movement_{shot.uuid}'): + if shot.shot_idx > 0: + data_repo.update_shot(shot_uuid, shot_idx=shot.shot_idx-1) + else: + st.error("This is the first shot") + time.sleep(0.3) + st.rerun() + def shot_video_element(shot_uuid): data_repo = DataRepo() + shot: InternalShotObject = data_repo.get_shot_from_uuid(shot_uuid) - st.title(shot.name) + st.markdown(f"## {shot.name}") if shot.main_clip and shot.main_clip.location: st.video(shot.main_clip.location) else: st.warning("No video present") + + if st.button(f"Jump to {shot.name}", key=f"btn_{shot_uuid}"): + st.success("Coming soon") - if st.button("Generate video", key=shot.uuid): - if not (shot.timing_list and len(shot.timing_list) and len(shot.timing_list) > 1): - st.error("Atleast two frames are required") - time.sleep(0.3) - return - - # NOTE: @peter let me know if you want me to complete this -def timeline_view_buttons(idx, shot_uuid, replace_image_widget_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle): +def timeline_view_buttons(idx, shot_uuid, replace_image_widget_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_shot_toggle): data_repo = DataRepo() shot = data_repo.get_shot_from_uuid(shot_uuid) timing_list = shot.timing_list @@ -92,7 +144,7 @@ def timeline_view_buttons(idx, shot_uuid, replace_image_widget_toggle, copy_fram with btn4: delete_frame_button(timing_list[idx].uuid) - if change_position_toggle: + if change_shot_toggle: change_frame_position_input(timing_list[idx].uuid, "side-to-side") jump_to_single_frame_view_button(idx + 1, timing_list, 'timeline_btn_'+str(timing_list[idx].uuid)) diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index 64675145..94d56f6f 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -9,16 +9,14 @@ def timeline_view(shot_uuid, stage): data_repo = DataRepo() shot = data_repo.get_shot_from_uuid(shot_uuid) shot_list = data_repo.get_shot_list(shot.project.uuid) - + st.markdown("***") - + header_col_1, header_col_2, header_col_3 = st.columns([1.5,4,1.5]) - with header_col_1: - if st.button('Add new shot'): - add_new_shot(shot.project.uuid) - st.rerun() - + # with header_col_1: + + ''' with header_col_2: col1, col2, col3 = st.columns(3) @@ -37,24 +35,32 @@ def timeline_view(shot_uuid, stage): replace_image_widget_toggle = st_memory.toggle("Replace Image", value=False, key="replace_image_widget_toggle") change_position_toggle = st_memory.toggle("Change Position", value=False, key="change_position_toggle") - with header_col_3: - items_per_row = st_memory.slider("How many frames per row?", min_value=1, max_value=10, value=5, step=1, key="items_per_row_slider") - btn_data = { - "replace_image_widget_toggle": replace_image_widget_toggle, + "replace_imagshot_keyframe_element_widget_toggle": replace_image_widget_toggle, "copy_frame_toggle": copy_frame_toggle, "move_frames_toggle": move_frames_toggle, "delete_frames_toggle": delete_frames_toggle, "change_position_toggle": change_position_toggle } + ''' + + with header_col_3: + items_per_row = st_memory.slider("How many frames per row?", min_value=1, max_value=10, value=5, step=1, key="items_per_row_slider") + if stage == 'Key Frames': for shot in shot_list: - shot_keyframe_element(shot.uuid, items_per_row, **btn_data) + shot_keyframe_element(shot.uuid, items_per_row) + st.markdown("***") + if st.button('Add new shot', type="primary"): + add_new_shot(shot.project.uuid) + st.rerun() else: grid = st.columns(items_per_row) for idx, shot in enumerate(shot_list): with grid[idx%items_per_row]: shot_video_element(shot.uuid) + + # for i in range(0, total_count, items_per_row): # Step of items_per_row for grid # grid = st.columns(items_per_row) # Create items_per_row columns for grid From be0bd88351256bbd6a3eb58086c54310d2f711e9 Mon Sep 17 00:00:00 2001 From: peter942 Date: Tue, 31 Oct 2023 11:00:20 +0000 Subject: [PATCH 132/164] Optimisations --- .../components/frame_styling_page.py | 8 ++--- .../widgets/frame_movement_widgets.py | 2 ++ ui_components/widgets/frame_selector.py | 20 +++++++++--- ui_components/widgets/shot_view.py | 30 ++++++++++------- ui_components/widgets/timeline_view.py | 32 ++----------------- 5 files changed, 41 insertions(+), 51 deletions(-) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index a1b25238..e38b1053 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -59,7 +59,7 @@ def frame_styling_page(shot_uuid: str): st.markdown( f"#### :red[{st.session_state['main_view_type']}] > **:green[{st.session_state['frame_styling_view_type']}]** > :orange[{st.session_state['page']}] > :blue[Frame #{st.session_state['current_frame_index']}]") else: - st.markdown(f"#### :red[{st.session_state['main_view_type']}] > **:green[{st.session_state['frame_styling_view_type']}]** > :orange[{st.session_state['page']}] > :blue[Shot #{st.session_state['current_frame_index']}]") + st.markdown(f"#### :red[{st.session_state['main_view_type']}] > **:green[{st.session_state['frame_styling_view_type']}]** > :orange[{st.session_state['page']}] > :blue[{shot.name}]") project_settings = data_repo.get_project_setting(shot.project.uuid) @@ -161,9 +161,9 @@ def frame_styling_page(shot_uuid: str): elif st.session_state['frame_styling_view_type'] == "Timeline": if st.session_state['page'] == "Key Frames": - with st.sidebar: - with st.expander("🌀 Batch Styling", expanded=False): - styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.LIST.value) + # with st.sidebar: + # with st.expander("🌀 Batch Styling", expanded=False): + # styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.LIST.value) timeline_view(shot_uuid, "Key Frames") elif st.session_state['page'] == "Shots": timeline_view(shot_uuid, "Shots") diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index e5810caa..23fbb9a5 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -174,4 +174,6 @@ def jump_to_single_frame_view_button(display_number, timing_list, src): st.session_state['current_frame_uuid'] = timing_list[st.session_state['current_frame_index'] - 1].uuid st.session_state['frame_styling_view_type'] = "Individual" st.session_state['change_view_type'] = True + # st.session_state['page'] = "Key Frames" + st.session_state["manual_select"] = 0 st.rerun() \ No newline at end of file diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index c301d8f2..880e9f30 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -1,11 +1,12 @@ import streamlit as st -from ui_components.widgets.frame_movement_widgets import delete_frame, replace_image_widget +from ui_components.widgets.frame_movement_widgets import delete_frame, replace_image_widget,jump_to_single_frame_view_button from ui_components.widgets.image_carousal import display_image from ui_components.models import InternalFrameTimingObject, InternalShotObject from utils.data_repo.data_repo import DataRepo from ui_components.constants import WorkflowStageType + def frame_selector_widget(): data_repo = DataRepo() time1, time2 = st.columns([1,1]) @@ -20,10 +21,15 @@ def frame_selector_widget(): if 'prev_shot_index' not in st.session_state: st.session_state['prev_shot_index'] = shot.shot_idx - st.session_state['current_shot_index'] = st.number_input(f"Shot # (out of {len(shot_list)})", 1, - len(shot_list), value=st.session_state['prev_shot_index'], - step=1, key="current_shot_sidebar_selector") - + # Get the list of shot names + shot_names = [s.name for s in shot_list] + + # Add a selectbox for shot_name + shot_name = st.selectbox('Shot Name', shot_names, key="current_shot_sidebar_selector") + + # Set current_shot_index based on the selected shot_name + st.session_state['current_shot_index'] = shot_names.index(shot_name) + 1 + update_current_shot_index(st.session_state['current_shot_index']) if st.session_state['page'] == "Key Frames": with time2: @@ -78,6 +84,10 @@ def frame_selector_widget(): with grid[idx % 3]: # Change to 3 columns if timing.primary_image and timing.primary_image.location: st.image(timing.primary_image.location, use_column_width=True) + + # Call jump_to_single_frame_view_button function + jump_to_single_frame_view_button(idx + 1, timing_list, f"jump_to_{idx + 1}") + else: st.warning("No primary image present") else: diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index a52ab425..07dae778 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -23,25 +23,30 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): with st.expander(f"{shot.name}", expanded=True): - if st.session_state["open_shot"] != shot.shot_idx: - if st.toggle("Open shot", key=f"shot_{shot.shot_idx}"): - st.session_state["open_shot"] = shot.shot_idx - st.experimental_rerun() - else: - if not st.toggle("Open shot", key=f"close_shot_{shot.shot_idx}", value=True): - st.session_state["open_shot"] = None - st.experimental_rerun() + header_col_0, header_col_1, header_col_2, header_col_3 = st.columns([1, 1.75,1,4]) + + with header_col_0: + if st.session_state["open_shot"] != shot.shot_idx: + if st.toggle("Open shot", key=f"shot_{shot.shot_idx}"): + st.session_state["open_shot"] = shot.shot_idx + st.experimental_rerun() + else: + if not st.toggle("Open shot", key=f"close_shot_{shot.shot_idx}", value=True): + st.session_state["open_shot"] = None + st.experimental_rerun() if st.session_state["open_shot"] == shot.shot_idx: - header_col_1, header_col_2 = st.columns([1.5,4]) + with header_col_1: name = st.text_input("Update name:", value=shot.name) - duration = st.number_input("Duration:") with header_col_2: - col1, col2, col3, col4 = st.columns(4) + duration = st.number_input("Duration:") + + with header_col_3: + col2, col3, col4 = st.columns(3) with col2: delete_frames_toggle = st_memory.toggle("Delete Frames", value=True, key="delete_frames_toggle") @@ -53,7 +58,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): with col4: change_shot_toggle = st_memory.toggle("Change Shot", value=False, key="change_shot_toggle") - st.markdown("***") + # st.markdown("***") grid = st.columns(items_per_row) @@ -66,6 +71,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): timeline_view_buttons(idx, shot_uuid, replace_image_widget_toggle, copy_frame_toggle, move_frames_toggle,delete_frames_toggle, change_shot_toggle) else: st.warning("No primary image present") + else: st.warning("No keyframes present") diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index 94d56f6f..2569e7f1 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -12,39 +12,11 @@ def timeline_view(shot_uuid, stage): st.markdown("***") - header_col_1, header_col_2, header_col_3 = st.columns([1.5,4,1.5]) - - # with header_col_1: - - ''' - with header_col_2: - col1, col2, col3 = st.columns(3) - - with col1: - expand_all = st_memory.toggle("Expand All", key="expand_all",value=False) - - if expand_all: - replace_image_widget_toggle = copy_frame_toggle = move_frames_toggle = delete_frames_toggle = change_position_toggle = True + _, header_col_2 = st.columns([5.5,1.5]) - else: - with col2: - delete_frames_toggle = st_memory.toggle("Delete Frames", value=True, key="delete_frames_toggle") - copy_frame_toggle = st_memory.toggle("Copy Frame", value=False, key="copy_frame_toggle") - with col3: - move_frames_toggle = st_memory.toggle("Move Frames", value=True, key="move_frames_toggle") - replace_image_widget_toggle = st_memory.toggle("Replace Image", value=False, key="replace_image_widget_toggle") - change_position_toggle = st_memory.toggle("Change Position", value=False, key="change_position_toggle") - btn_data = { - "replace_imagshot_keyframe_element_widget_toggle": replace_image_widget_toggle, - "copy_frame_toggle": copy_frame_toggle, - "move_frames_toggle": move_frames_toggle, - "delete_frames_toggle": delete_frames_toggle, - "change_position_toggle": change_position_toggle - } - ''' - with header_col_3: + with header_col_2: items_per_row = st_memory.slider("How many frames per row?", min_value=1, max_value=10, value=5, step=1, key="items_per_row_slider") if stage == 'Key Frames': From 8cb000071322963c2170d2e600a3b214d5d8ad79 Mon Sep 17 00:00:00 2001 From: peter942 Date: Tue, 31 Oct 2023 13:02:14 +0000 Subject: [PATCH 133/164] Optimisations --- ui_components/components/new_project_page.py | 1 + ui_components/setup.py | 5 +++ .../widgets/frame_movement_widgets.py | 5 +-- ui_components/widgets/frame_selector.py | 40 +++++++++++-------- ui_components/widgets/shot_view.py | 4 +- ui_components/widgets/timeline_view.py | 20 ---------- 6 files changed, 33 insertions(+), 42 deletions(-) diff --git a/ui_components/components/new_project_page.py b/ui_components/components/new_project_page.py index 486502d4..65fec2c4 100644 --- a/ui_components/components/new_project_page.py +++ b/ui_components/components/new_project_page.py @@ -44,6 +44,7 @@ def new_project_page(): # Prompt user for video dimension specifications video_width_column, video_height_column, video_info_column = st.columns(3) + frame_sizes = ["512", "704", "768", "896", "1024"] with video_width_column: diff --git a/ui_components/setup.py b/ui_components/setup.py index 56333eb0..ed063cb3 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -167,6 +167,11 @@ def setup_app_ui(): st.session_state['page'] = option_menu(None, pages, icons=['palette', 'camera-reels', "hourglass", 'stopwatch'], menu_icon="cast", orientation="horizontal", key="secti2on_selector", styles={ "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "orange"}}, manual_select=st.session_state["manual_select"]) + + if st.session_state["manual_select"] != None: + st.session_state["manual_select"] = None + + frame_styling_page(st.session_state["shot_uuid"]) diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index 23fbb9a5..4ba99a4f 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -157,8 +157,7 @@ def replace_image_widget(timing_uuid, stage, options=["Uploaded Frame","Other Fr st.rerun() else: replacement_frame = st.file_uploader("Upload Styled Image", type=[ - "png", "jpeg"], accept_multiple_files=False, key=f"replacement_frame_upload_{stage}_{timing_uuid}") - st.write(replacement_frame) + "png", "jpeg"], accept_multiple_files=False, key=f"replacement_frame_upload_{stage}_{timing_uuid}") if replacement_frame != None: if st.button("Replace frame", disabled=False): timing = data_repo.get_timing_from_uuid(timing.uuid) @@ -176,4 +175,4 @@ def jump_to_single_frame_view_button(display_number, timing_list, src): st.session_state['change_view_type'] = True # st.session_state['page'] = "Key Frames" st.session_state["manual_select"] = 0 - st.rerun() \ No newline at end of file + st.rerun() diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index 880e9f30..f373cbb7 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -43,31 +43,37 @@ def frame_selector_widget(): update_current_frame_index(st.session_state['current_frame_index']) with st.expander(f"🖼️ Frame #{st.session_state['current_frame_index']} Details"): - a1, a2 = st.columns([1,1]) + a1, a2 = st.columns([3,2]) with a1: - st.warning(f"Guidance Image:") - display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value, clickable=False) - - with a2: - st.success(f"Main Styled Image:") + st.success(f"Main Key Frame:") display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) - st.markdown("---") - - b1, b2 = st.columns([1,1]) - with b1: - st.caption("Replace guidance image") - replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value) - with b2: + # st.warning(f"Guidance Image:") + # display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value, clickable=False) + with a2: st.caption("Replace styled image") replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value) + st.info("In Context:") + shot_list = data_repo.get_shot_list(shot.project.uuid) + shot: InternalShotObject = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) + + # shot = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) + timing_list: List[InternalFrameTimingObject] = shot.timing_list + + if timing_list and len(timing_list): + grid = st.columns(4) # Change to 4 columns + for idx, timing in enumerate(timing_list): + with grid[idx % 4]: # Change to 4 columns + if timing.primary_image and timing.primary_image.location: + st.image(timing.primary_image.location, use_column_width=True) + else: + st.warning("No primary image present") + else: + st.warning("No keyframes present") + st.markdown("---") - - if st.button("Delete key frame"): - delete_frame(st.session_state['current_frame_uuid']) - st.rerun() else: shot_list = data_repo.get_shot_list(shot.project.uuid) diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 07dae778..aea00433 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -111,11 +111,11 @@ def shot_video_element(shot_uuid): shot: InternalShotObject = data_repo.get_shot_from_uuid(shot_uuid) - st.markdown(f"## {shot.name}") + st.markdown(f"### {shot.name}") if shot.main_clip and shot.main_clip.location: st.video(shot.main_clip.location) else: - st.warning("No video present") + st.warning('''No video present''') if st.button(f"Jump to {shot.name}", key=f"btn_{shot_uuid}"): st.success("Coming soon") diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index 2569e7f1..65da4d07 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -31,23 +31,3 @@ def timeline_view(shot_uuid, stage): for idx, shot in enumerate(shot_list): with grid[idx%items_per_row]: shot_video_element(shot.uuid) - - - - # for i in range(0, total_count, items_per_row): # Step of items_per_row for grid - # grid = st.columns(items_per_row) # Create items_per_row columns for grid - # for j in range(items_per_row): - # if i + j < total_count: # Check if index is within range - # with grid[j]: - # display_number = i + j + 1 - # if stage == 'Key Frames': - # display_image(timing_uuid=shot_list[i + j].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) - # elif stage == 'Videos': - # if shot_list[i + j].main_clip: - # st.video(shot_list[i + j].main_clip.location) - # else: - # st.error("No video found for this frame.") - # with st.expander(f'Frame #{display_number}', True): - # timeline_view_buttons(i, j, shot_list, time_setter_toggle, replace_image_widget_toggle, duration_setter_toggle, copy_frame_toggle, move_frames_toggle, delete_frames_toggle, change_position_toggle, project_uuid) - - From 79cc5a9be678774b3dc0b317ffda5b48a88be778 Mon Sep 17 00:00:00 2001 From: peter942 Date: Tue, 31 Oct 2023 14:11:47 +0000 Subject: [PATCH 134/164] Optimisations --- ui_components/components/frame_styling_page.py | 2 ++ ui_components/widgets/frame_movement_widgets.py | 14 +++++++------- ui_components/widgets/frame_selector.py | 5 +++-- ui_components/widgets/shot_view.py | 4 ++-- ui_components/widgets/timeline_view.py | 2 -- 5 files changed, 14 insertions(+), 13 deletions(-) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index e38b1053..2873af91 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -24,6 +24,8 @@ def frame_styling_page(shot_uuid: str): + + data_repo = DataRepo() shot = data_repo.get_shot_from_uuid(shot_uuid) timing_list = data_repo.get_timing_list_from_shot(shot_uuid) diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index 4ba99a4f..7953a98c 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -13,18 +13,18 @@ def change_frame_position_input(timing_uuid, src): ''' data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - timing_list = data_repo.get_timing_list_from_shot(shot_uuid=timing.shot.uuid) - - min_value = 1 - max_value = len(timing_list) + project_uuid = timing.shot.project.uuid + + shot_list = data_repo.get_shot_list(project_uuid) + shot_names = [shot.name for shot in shot_list] - new_position = st.number_input("Move to new position:", min_value=min_value, max_value=max_value, - value=timing.aux_frame_index + 1, step=1, key=f"new_position_{timing.uuid}_{src}") + new_position = st.selectbox("Move to new shot:", shot_names, key=f"new_position_{timing.uuid}_{src}") - if st.button('Update Position',key=f"change_frame_position_{timing.uuid}_{src}"): + if st.button('Move to shot',key=f"change_frame_position_{timing.uuid}_{src}",use_container_width=True): data_repo.update_specific_timing(timing.uuid, aux_frame_index=new_position - 1) st.rerun() + def move_frame(direction, timing_uuid): ''' diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index f373cbb7..10920ca6 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -17,6 +17,7 @@ def frame_selector_widget(): len_timing_list = len(timing_list) if len(timing_list) > 0 else 1.0 st.progress(st.session_state['current_frame_index'] / len_timing_list) + with time1: if 'prev_shot_index' not in st.session_state: st.session_state['prev_shot_index'] = shot.shot_idx @@ -63,9 +64,9 @@ def frame_selector_widget(): timing_list: List[InternalFrameTimingObject] = shot.timing_list if timing_list and len(timing_list): - grid = st.columns(4) # Change to 4 columns + grid = st.columns(3) # Change to 4 columns for idx, timing in enumerate(timing_list): - with grid[idx % 4]: # Change to 4 columns + with grid[idx % 3]: # Change to 4 columns if timing.primary_image and timing.primary_image.location: st.image(timing.primary_image.location, use_column_width=True) else: diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index aea00433..c262ef43 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -20,7 +20,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): # st.markdown(f"### {shot.name}", expanded=True) timing_list: List[InternalFrameTimingObject] = shot.timing_list - + with st.expander(f"{shot.name}", expanded=True): header_col_0, header_col_1, header_col_2, header_col_3 = st.columns([1, 1.75,1,4]) @@ -40,7 +40,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): with header_col_1: - name = st.text_input("Update name:", value=shot.name) + name = st.text_input("Update name:", value=shot.name,max_chars=40) with header_col_2: duration = st.number_input("Duration:") diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index 65da4d07..b884e96b 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -14,8 +14,6 @@ def timeline_view(shot_uuid, stage): _, header_col_2 = st.columns([5.5,1.5]) - - with header_col_2: items_per_row = st_memory.slider("How many frames per row?", min_value=1, max_value=10, value=5, step=1, key="items_per_row_slider") From 8e847c6d481b05aaa611166576ea7e450cae2e03 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Tue, 31 Oct 2023 20:04:33 +0530 Subject: [PATCH 135/164] file upload fixed --- ui_components/components/new_project_page.py | 4 +- ui_components/methods/common_methods.py | 19 ++++--- .../widgets/frame_movement_widgets.py | 50 +++++++++---------- ui_components/widgets/frame_selector.py | 1 + 4 files changed, 35 insertions(+), 39 deletions(-) diff --git a/ui_components/components/new_project_page.py b/ui_components/components/new_project_page.py index 65fec2c4..29159679 100644 --- a/ui_components/components/new_project_page.py +++ b/ui_components/components/new_project_page.py @@ -84,8 +84,8 @@ def new_project_page(): if starting_image: try: - save_and_promote_image(starting_image, new_project.uuid, new_timing.uuid, "source") - save_and_promote_image(starting_image, new_project.uuid, new_timing.uuid, "styled") + save_and_promote_image(starting_image, shot.uuid, new_timing.uuid, "source") + save_and_promote_image(starting_image, shot.uuid, new_timing.uuid, "styled") except Exception as e: st.error(f"Failed to save the uploaded image due to {str(e)}") diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 00e663dc..39b9e08f 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -76,18 +76,17 @@ def save_new_image(img: Union[Image.Image, str, np.ndarray, io.BytesIO], shot_uu new_image = data_repo.create_file(**file_data) return new_image -def save_and_promote_image(image, project_uuid, frame_uuid, save_type): +def save_and_promote_image(image, shot_uuid, timing_uuid, stage): data_repo = DataRepo() - - + try: - saved_image = save_new_image(image, project_uuid) - # Update records based on save_type - if save_type == "source": - data_repo.update_specific_timing(frame_uuid, source_image_id=saved_image.uuid) - elif save_type == "styled": - number_of_image_variants = add_image_variant(saved_image.uuid, frame_uuid) - promote_image_variant(frame_uuid, number_of_image_variants - 1) + saved_image = save_new_image(image, shot_uuid) + # Update records based on stage + if stage == WorkflowStageType.SOURCE.value: + data_repo.update_specific_timing(timing_uuid, source_image_id=saved_image.uuid) + elif stage == WorkflowStageType.STYLED.value: + number_of_image_variants = add_image_variant(saved_image.uuid, timing_uuid) + promote_image_variant(timing_uuid, number_of_image_variants - 1) return saved_image except Exception as e: diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index 7953a98c..503729c1 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -1,5 +1,6 @@ import time import streamlit as st +from ui_components.constants import WorkflowStageType from ui_components.methods.common_methods import add_image_variant, promote_image_variant, save_and_promote_image from ui_components.models import InternalFrameTimingObject from utils.constants import ImageStage @@ -110,7 +111,7 @@ def delete_frame(timing_uuid): st.session_state['current_frame_uuid'] = timing_list[st.session_state['current_frame_index'] - 1].uuid -def replace_image_widget(timing_uuid, stage, options=["Uploaded Frame","Other Frame"]): +def replace_image_widget(timing_uuid, stage, options=["Uploaded Frame", "Other Frame"]): data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) timing_list = data_repo.get_timing_list_from_shot(timing.shot.uuid) @@ -118,20 +119,24 @@ def replace_image_widget(timing_uuid, stage, options=["Uploaded Frame","Other Fr replace_with = options[0] if len(options) == 1 else st.radio("Replace with:", options, horizontal=True, key=f"replacement_entity_{stage}_{timing_uuid}") if replace_with == "Other Frame": - image_replacement_stage = st.radio("Select stage to use:", [ - ImageStage.MAIN_VARIANT.value, ImageStage.SOURCE_IMAGE.value], key=f"image_replacement_stage_{stage}_{timing_uuid}", horizontal=True) - which_image_to_use_for_replacement = st.number_input("Select image to use:", min_value=0, max_value=len( - timing_list)-1, value=0, key=f"which_image_to_use_for_replacement_{stage}") + image_replacement_stage = st.radio( + "Select stage to use:", + [ImageStage.MAIN_VARIANT.value, ImageStage.SOURCE_IMAGE.value], + key=f"image_replacement_stage_{stage}_{timing_uuid}", + horizontal=True + ) + replacement_img_number = st.number_input("Select image to use:", min_value=1, max_value=len( + timing_list), value=0, key=f"replacement_img_number_{stage}") if image_replacement_stage == ImageStage.SOURCE_IMAGE.value: - selected_image = timing_list[which_image_to_use_for_replacement].source_image + selected_image = timing_list[replacement_img_number - 1].source_image elif image_replacement_stage == ImageStage.MAIN_VARIANT.value: - selected_image = timing_list[which_image_to_use_for_replacement].primary_image + selected_image = timing_list[replacement_img_number - 1].primary_image - st.image(selected_image.local_path, use_column_width=True) + st.image(selected_image.location, use_column_width=True) if st.button("Replace with selected frame", disabled=False,key=f"replace_with_selected_frame_{stage}_{timing_uuid}"): - if stage == "source": + if stage == WorkflowStageType.SOURCE.value: data_repo.update_specific_timing(timing.uuid, source_image_id=selected_image.uuid) st.success("Replaced") time.sleep(1) @@ -145,28 +150,19 @@ def replace_image_widget(timing_uuid, stage, options=["Uploaded Frame","Other Fr time.sleep(1) st.rerun() elif replace_with == "Uploaded Frame": - if stage == "source": - uploaded_file = st.file_uploader("Upload Source Image", type=[ - "png", "jpeg"], accept_multiple_files=False) - if uploaded_file != None: - if st.button("Upload Source Image"): - if uploaded_file: - timing = data_repo.get_timing_from_uuid(timing.uuid) - if save_and_promote_image(uploaded_file, timing.shot.project.uuid, timing.uuid, "source"): - time.sleep(1.5) - st.rerun() - else: - replacement_frame = st.file_uploader("Upload Styled Image", type=[ - "png", "jpeg"], accept_multiple_files=False, key=f"replacement_frame_upload_{stage}_{timing_uuid}") - if replacement_frame != None: - if st.button("Replace frame", disabled=False): + btn_text = 'Upload source image' if stage == WorkflowStageType.SOURCE.value else 'Replace frame' + uploaded_file = st.file_uploader(btn_text, type=[ + "png", "jpeg"], accept_multiple_files=False) + if uploaded_file != None: + if st.button(btn_text): + if uploaded_file: timing = data_repo.get_timing_from_uuid(timing.uuid) - if replacement_frame: - save_and_promote_image(replacement_frame, timing.shot.project.uuid, timing.uuid, "styled") + if save_and_promote_image(uploaded_file, timing.shot.uuid, timing.uuid, stage): st.success("Replaced") - time.sleep(1) + time.sleep(1.5) st.rerun() + def jump_to_single_frame_view_button(display_number, timing_list, src): if st.button(f"Jump to #{display_number}", key=src): st.session_state['prev_frame_index'] = display_number diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index 10920ca6..7a9001d9 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -1,3 +1,4 @@ +from typing import List import streamlit as st from ui_components.widgets.frame_movement_widgets import delete_frame, replace_image_widget,jump_to_single_frame_view_button from ui_components.widgets.image_carousal import display_image From 613c471a04fc16c661687ee6439490f672f05290 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Tue, 31 Oct 2023 22:08:05 +0530 Subject: [PATCH 136/164] shot deletion added --- ui_components/widgets/shot_view.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index c262ef43..ef9b2342 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -29,11 +29,11 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): if st.session_state["open_shot"] != shot.shot_idx: if st.toggle("Open shot", key=f"shot_{shot.shot_idx}"): st.session_state["open_shot"] = shot.shot_idx - st.experimental_rerun() + st.rerun() else: if not st.toggle("Open shot", key=f"close_shot_{shot.shot_idx}", value=True): st.session_state["open_shot"] = None - st.experimental_rerun() + st.rerun() if st.session_state["open_shot"] == shot.shot_idx: @@ -76,17 +76,18 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): st.warning("No keyframes present") st.markdown("***") - - if st.session_state["open_shot"] == shot.shot_idx: - bottom1, bottom2, bottom3 = st.columns([1,2,1]) + bottom1, _, bottom3 = st.columns([1,2,1]) + with bottom1: confirm_delete = st.checkbox("I know that this will delete all the frames and videos within") - if confirm_delete: - if st.button("Delete frame"): - st.success("Done!") - else: - st.button("Delete frame", disabled=True, help="Check the box above to enable the delete bottom.") + help = "Check the box above to enable the delete bottom." if confirm_delete else "" + if st.button("Delete shot", disabled=(not confirm_delete), help=help, key=shot_uuid): + data_repo.delete_shot(shot_uuid) + st.success("Done!") + time.sleep(0.3) + st.rerun() + with bottom3: if st.button("Move shot down", key=f'shot_down_movement_{shot.uuid}'): shot_list = data_repo.get_shot_list(shot.project.uuid) From a297bf72e363559c6ffc7e3cf04afe2f8db0a72d Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Tue, 31 Oct 2023 23:05:56 +0530 Subject: [PATCH 137/164] frame movement across shot fixed --- backend/models.py | 12 +++++++++++ .../widgets/frame_movement_widgets.py | 21 ++++++++++--------- ui_components/widgets/shot_view.py | 6 +++--- 3 files changed, 26 insertions(+), 13 deletions(-) diff --git a/backend/models.py b/backend/models.py index 975f4314..27c37dbf 100644 --- a/backend/models.py +++ b/backend/models.py @@ -232,6 +232,7 @@ def __init__(self, *args, **kwargs): super(Timing, self).__init__(*args, **kwargs) self.old_is_disabled = self.is_disabled self.old_aux_frame_index = self.aux_frame_index + self.old_shot = self.shot def save(self, *args, **kwargs): # TODO: updating details of every frame this way can be slow - implement a better strategy @@ -269,6 +270,17 @@ def save(self, *args, **kwargs): aux_frame_index__lt=self.old_aux_frame_index, is_disabled=False).order_by('aux_frame_index') timings_to_move.update(aux_frame_index=F('aux_frame_index') + 1) + # --------------- handling shot change ------------------- + if not self.is_disabled and self.id and self.old_shot != self.shot: + # moving all frames ahead of this frame, one step backwards + timing_list = Timing.objects.filter(shot_id=self.old_shot.id, \ + aux_frame_index__gt=self.aux_frame_index, is_disabled=False).order_by('aux_frame_index') + # changing the aux_frame_index of this frame to be the last one in the new shot + new_index = Timing.objects.filter(shot_id=self.shot.id, is_disabled=False).count() + timing = Timing.objects.filter(uuid=self.uuid, is_disabled=False).order_by('aux_frame_index').first() + self.aux_frame_index = new_index + timing_list.update(aux_frame_index=F('aux_frame_index') - 1) + super().save(*args, **kwargs) diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index 503729c1..5e3cc3f4 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -7,10 +7,9 @@ from utils.data_repo.data_repo import DataRepo -def change_frame_position_input(timing_uuid, src): +def change_frame_shot(timing_uuid, src): ''' - changes frame position inside the respective shot - (used when large change is required, like moving a frame from 2nd pos to 10th pos) + used to move a frame from one shot to another ''' data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) @@ -19,13 +18,15 @@ def change_frame_position_input(timing_uuid, src): shot_list = data_repo.get_shot_list(project_uuid) shot_names = [shot.name for shot in shot_list] - new_position = st.selectbox("Move to new shot:", shot_names, key=f"new_position_{timing.uuid}_{src}") - - if st.button('Move to shot',key=f"change_frame_position_{timing.uuid}_{src}",use_container_width=True): - data_repo.update_specific_timing(timing.uuid, aux_frame_index=new_position - 1) - st.rerun() - - + new_shot = st.selectbox("Move to new shot:", shot_names, key=f"new_shot_{timing.uuid}_{src}") + if st.button('Move to shot', key=f"change_frame_position_{timing.uuid}_{src}", use_container_width=True): + shot = next((obj for obj in shot_list if obj.name == new_shot), None) # NOTE: this assumes unique name for different shots + if shot: + data_repo.update_specific_timing(timing.uuid, shot_id=shot.uuid) + st.success("Success") + time.sleep(0.3) + st.rerun() + def move_frame(direction, timing_uuid): ''' diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index ef9b2342..fc995601 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -6,7 +6,7 @@ from ui_components.models import InternalFrameTimingObject, InternalShotObject from ui_components.widgets.add_key_frame_element import add_key_frame -from ui_components.widgets.frame_movement_widgets import change_frame_position_input, delete_frame_button, jump_to_single_frame_view_button, move_frame_back_button, move_frame_forward_button, replace_image_widget +from ui_components.widgets.frame_movement_widgets import change_frame_shot, delete_frame_button, jump_to_single_frame_view_button, move_frame_back_button, move_frame_forward_button, replace_image_widget from utils.data_repo.data_repo import DataRepo from utils import st_memory @@ -43,7 +43,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): name = st.text_input("Update name:", value=shot.name,max_chars=40) with header_col_2: - duration = st.number_input("Duration:") + duration = st.number_input("Duration:", disabled=True, value=shot.duration) with header_col_3: col2, col3, col4 = st.columns(3) @@ -152,7 +152,7 @@ def timeline_view_buttons(idx, shot_uuid, replace_image_widget_toggle, copy_fram delete_frame_button(timing_list[idx].uuid) if change_shot_toggle: - change_frame_position_input(timing_list[idx].uuid, "side-to-side") + change_frame_shot(timing_list[idx].uuid, "side-to-side") jump_to_single_frame_view_button(idx + 1, timing_list, 'timeline_btn_'+str(timing_list[idx].uuid)) From 1b5c01410d9d587f57d6b461f238c39dcba67b38 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Tue, 31 Oct 2023 23:22:36 +0530 Subject: [PATCH 138/164] unique shot name check added --- backend/db_repo.py | 11 ++++++++++- backend/models.py | 1 - ui_components/widgets/shot_view.py | 17 +++++++---------- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index e1d08a0f..a6ffc8b8 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -1456,6 +1456,10 @@ def create_shot(self, project_uuid, duration, name=None, meta_data="", desc=""): shot_number = Shot.objects.filter(project_id=project.id, is_disabled=False).count() + 1 if not name: name = "Shot " + str(shot_number) + else: + prev_shot = Shot.objects.filter(project_id=project.id, name=name, is_disabled=False).first() + if prev_shot: + return InternalResponse({}, 'shot name already exists', False) shot_data = { "name" : name, @@ -1484,7 +1488,12 @@ def update_shot(self, shot_uuid, shot_idx=None, name=None, duration=None, meta_d update_data = {} if name != None: + prev_shot = Shot.objects.filter(project_id=shot.project.id, name=name, is_disabled=False).first() + if prev_shot: + return InternalResponse({}, 'shot name already exists', False) + update_data['name'] = name + if duration != None: update_data['duration'] = duration if meta_data != None: @@ -1493,7 +1502,7 @@ def update_shot(self, shot_uuid, shot_idx=None, name=None, duration=None, meta_d update_data['desc'] = desc if shot_idx != None: update_data['shot_idx'] = shot_idx - + for k,v in update_data.items(): setattr(shot, k, v) diff --git a/backend/models.py b/backend/models.py index 27c37dbf..c37ff5e0 100644 --- a/backend/models.py +++ b/backend/models.py @@ -277,7 +277,6 @@ def save(self, *args, **kwargs): aux_frame_index__gt=self.aux_frame_index, is_disabled=False).order_by('aux_frame_index') # changing the aux_frame_index of this frame to be the last one in the new shot new_index = Timing.objects.filter(shot_id=self.shot.id, is_disabled=False).count() - timing = Timing.objects.filter(uuid=self.uuid, is_disabled=False).order_by('aux_frame_index').first() self.aux_frame_index = new_index timing_list.update(aux_frame_index=F('aux_frame_index') - 1) diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index fc995601..737326d7 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -26,19 +26,16 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): header_col_0, header_col_1, header_col_2, header_col_3 = st.columns([1, 1.75,1,4]) with header_col_0: - if st.session_state["open_shot"] != shot.shot_idx: - if st.toggle("Open shot", key=f"shot_{shot.shot_idx}"): - st.session_state["open_shot"] = shot.shot_idx + if st.session_state["open_shot"] != shot.uuid: + if st.toggle("Open shot", key=f"shot_{shot.uuid}"): + st.session_state["open_shot"] = shot.uuid st.rerun() else: - if not st.toggle("Open shot", key=f"close_shot_{shot.shot_idx}", value=True): + if not st.toggle("Open shot", key=f"close_shot_{shot.uuid}", value=True): st.session_state["open_shot"] = None st.rerun() - if st.session_state["open_shot"] == shot.shot_idx: - - - + if st.session_state["open_shot"] == shot.uuid: with header_col_1: name = st.text_input("Update name:", value=shot.name,max_chars=40) @@ -67,7 +64,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): with grid[idx%items_per_row]: if timing.primary_image and timing.primary_image.location: st.image(timing.primary_image.location, use_column_width=True) - if st.session_state["open_shot"] == shot.shot_idx: + if st.session_state["open_shot"] == shot.uuid: timeline_view_buttons(idx, shot_uuid, replace_image_widget_toggle, copy_frame_toggle, move_frames_toggle,delete_frames_toggle, change_shot_toggle) else: st.warning("No primary image present") @@ -76,7 +73,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): st.warning("No keyframes present") st.markdown("***") - if st.session_state["open_shot"] == shot.shot_idx: + if st.session_state["open_shot"] == shot.uuid: bottom1, _, bottom3 = st.columns([1,2,1]) with bottom1: From 7ba7dc55e0a7ec9ea5eba912a31daabe8bfaa537 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 1 Nov 2023 00:28:05 +0530 Subject: [PATCH 139/164] wip: shot video generation --- banodoco_runner.py | 16 ++++++++++++---- shared/constants.py | 1 + ui_components/methods/common_methods.py | 19 +++++++++---------- ui_components/methods/video_methods.py | 19 ++++++------------- ui_components/models.py | 19 ++++++++++++++++--- .../widgets/variant_comparison_grid.py | 6 +++--- utils/data_repo/api_repo.py | 5 +++++ utils/data_repo/data_repo.py | 8 ++++---- utils/media_processor/interpolator.py | 1 - utils/media_processor/video.py | 5 +---- 10 files changed, 57 insertions(+), 42 deletions(-) diff --git a/banodoco_runner.py b/banodoco_runner.py index 0589011f..7a672437 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -67,8 +67,11 @@ def check_and_update_db(): log_list = InferenceLog.objects.filter(status__in=[InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value], is_disabled=False).all() + # these items will updated in the cache when the app refreshes the next time timing_update_list = {} # {project_id: [timing_uuids]} gallery_update_list = {} # {project_id: True/False} + shot_update_list = {} # {project_id: [shot_uuids]} + for log in log_list: input_params = json.loads(log.input_params) replicate_data = input_params.get(InferenceParamType.REPLICATE_INFERENCE.value, None) @@ -102,17 +105,19 @@ def check_and_update_db(): print("processing inference output") process_inference_output(**origin_data) - if origin_data['inference_type'] in [InferenceType.FRAME_INTERPOLATION.value, \ - InferenceType.FRAME_TIMING_IMAGE_INFERENCE.value, \ + if origin_data['inference_type'] in [InferenceType.FRAME_TIMING_IMAGE_INFERENCE.value, \ InferenceType.FRAME_INPAINTING.value]: if str(log.project.uuid) not in timing_update_list: timing_update_list[str(log.project.uuid)] = [] timing_update_list[str(log.project.uuid)].append(origin_data['timing_uuid']) elif origin_data['inference_type'] == InferenceType.GALLERY_IMAGE_GENERATION.value: - if str(log.project.uuid) not in gallery_update_list: - gallery_update_list[str(log.project.uuid)] = False gallery_update_list[str(log.project.uuid)] = True + + elif origin_data['inference_type'] == InferenceType.FRAME_INTERPOLATION.value: + if str(log.project.uuid) not in shot_update_list: + shot_update_list[str(log.project.uuid)] = [] + shot_update_list[str(log.project.uuid)].append(origin_data['shot_uuid']) except Exception as e: app_logger.log(LoggingType.ERROR, f"Error: {e}") @@ -137,6 +142,9 @@ def check_and_update_db(): final_res[project_uuid] = {} final_res[project_uuid].update({f"{ProjectMetaData.GALLERY_UPDATE.value}": val}) + + for project_uuid, val in shot_update_list.items(): + final_res[project_uuid] = {ProjectMetaData.SHOT_VIDEO_UPDATE.value: list(set(val))} for project_uuid, val in final_res.items(): diff --git a/shared/constants.py b/shared/constants.py index 0d46e3ec..fc6657d6 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -97,6 +97,7 @@ class ProjectMetaData(ExtendedEnum): DATA_UPDATE = "data_update" # info regarding cache/data update when runner updates the db GALLERY_UPDATE = "gallery_update" BACKGROUND_IMG_LIST = "background_img_list" + SHOT_VIDEO_UPDATE = "shot_video_update" class SortOrder(ExtendedEnum): ASCENDING = "asc" diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 39b9e08f..06dc7715 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -321,7 +321,6 @@ def promote_video_variant(shot_uuid, variant_uuid): data_repo.update_shot(shot.uuid, main_clip_id=variant_to_promote.uuid) - def extract_canny_lines(image_path_or_url, project_uuid, low_threshold=50, high_threshold=150) -> InternalFileObject: data_repo = DataRepo() @@ -757,9 +756,9 @@ def process_inference_output(**kwargs): if output: settings = kwargs.get('settings') - timing_uuid = kwargs.get('timing_uuid') - timing = data_repo.get_timing_from_uuid(timing_uuid) - if not timing: + shot_uuid = kwargs.get('shot_uuid') + shot = data_repo.get_shot_from_uuid(shot_uuid) + if not shot: return False # output can also be an url @@ -772,21 +771,21 @@ def process_inference_output(**kwargs): os.remove(temp_output_file.name) if 'normalise_speed' in settings and settings['normalise_speed']: - output = VideoProcessor.update_video_bytes_speed(output, AnimationStyleType.INTERPOLATION.value, timing.clip_duration) + output = VideoProcessor.update_video_bytes_speed(output, AnimationStyleType.INTERPOLATION.value, shot.duration) - video_location = "videos/" + str(timing.shot.project.uuid) + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" + video_location = "videos/" + str(shot.project.uuid) + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" video = convert_bytes_to_file( file_location_to_save=video_location, mime_type="video/mp4", file_bytes=output, - project_uuid=timing.shot.project.uuid, + project_uuid=shot.project.uuid, inference_log_id=log_uuid ) - data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video.uuid) - if not timing.timed_clip: + data_repo.add_interpolated_clip(shot_uuid, interpolated_clip_id=video.uuid) + if not shot.main_clip: output_video = update_speed_of_video_clip(video, timing_uuid) - data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) + data_repo.update_specific_timing(timing_uuid, main_clip_id=output_video.uuid) else: del kwargs['log_uuid'] diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index cc92a2fc..aaa399e7 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -36,7 +36,7 @@ def create_single_interpolated_clip(shot_uuid, quality, settings={}, variant_cou elif quality == 'preview': interpolation_steps = 3 - img_list = [t.primary_image for t in timing_list] + img_list = [t.primary_image.location for t in timing_list] settings.update(interpolation_steps=interpolation_steps) # res is an array of tuples (video_bytes, log) @@ -54,21 +54,14 @@ def create_single_interpolated_clip(shot_uuid, quality, settings={}, variant_cou "output": output, "log_uuid": log.uuid, "settings": settings, - "shot_uuid": shot_uuid + "shot_uuid": str(shot_uuid) } process_inference_output(**inference_data) -def update_speed_of_video_clip(video_file: InternalFileObject, timing_uuid) -> InternalFileObject: +def update_speed_of_video_clip(video_file: InternalFileObject, duration) -> InternalFileObject: from ui_components.methods.file_methods import generate_temp_file, convert_bytes_to_file - data_repo = DataRepo() - - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - desired_duration = timing.clip_duration - temp_video_file = None if video_file.hosted_url and is_s3_image_url(video_file.hosted_url): temp_video_file = generate_temp_file(video_file.hosted_url, '.mp4') @@ -76,18 +69,18 @@ def update_speed_of_video_clip(video_file: InternalFileObject, timing_uuid) -> I location_of_video = temp_video_file.name if temp_video_file else video_file.local_path new_file_name = ''.join(random.choices(string.ascii_lowercase + string.digits, k=16)) + ".mp4" - new_file_location = "videos/" + str(timing.shot.project.uuid) + "/assets/videos/1_final/" + str(new_file_name) + new_file_location = "videos/" + str(video_file.project.uuid) + "/assets/videos/1_final/" + str(new_file_name) video_bytes = VideoProcessor.update_video_speed( location_of_video, - desired_duration + duration ) video_file = convert_bytes_to_file( new_file_location, "video/mp4", video_bytes, - timing.shot.project.uuid + video_file.project.uuid ) if temp_video_file: diff --git a/ui_components/models.py b/ui_components/models.py index d3cf0346..a35548b6 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -180,14 +180,27 @@ def meta_data_dict(self): @property def primary_interpolated_video_index(self): - if not (self.interpolated_clip_list and len(self.interpolated_clip_list)) or not self.main_clip: + video_list = self.interpolated_clip_file_list + if not len(video_list): return -1 - for idx, img in enumerate(self.interpolated_clip_list): - if img.uuid == self.main_clip.uuid: + for idx, vid in enumerate(video_list): + if vid.uuid == self.main_clip.uuid: return idx return -1 + + @property + def interpolated_clip_file_list(self): + if not (self.interpolated_clip_list and len(self.interpolated_clip_list)): + return [] + + from utils.data_repo.data_repo import DataRepo + + data_repo = DataRepo() + video_id_list = json.loads(self.interpolated_clip_list) if self.interpolated_clip_list else [] + video_list = data_repo.get_image_list_from_uuid_list(video_id_list) + return video_list class InternalAppSettingObject: diff --git a/ui_components/widgets/variant_comparison_grid.py b/ui_components/widgets/variant_comparison_grid.py index b087b0d1..f3a41231 100644 --- a/ui_components/widgets/variant_comparison_grid.py +++ b/ui_components/widgets/variant_comparison_grid.py @@ -15,7 +15,7 @@ def variant_comparison_grid(ele_uuid, stage=CreativeProcessType.MOTION.value): if stage == CreativeProcessType.MOTION.value: shot_uuid = ele_uuid shot = data_repo.get_shot_from_uuid(shot_uuid) - variants = shot.interpolated_clip_list + variants = shot.interpolated_clip_file_list else: timing_uuid = ele_uuid timing = data_repo.get_timing_from_uuid(timing_uuid) @@ -40,7 +40,7 @@ def variant_comparison_grid(ele_uuid, stage=CreativeProcessType.MOTION.value): st.info("No variants present") return - current_variant = timing.primary_interpolated_video_index if stage == CreativeProcessType.MOTION.value else int( + current_variant = shot.primary_interpolated_video_index if stage == CreativeProcessType.MOTION.value else int( timing.primary_variant_index) st.markdown("***") @@ -68,7 +68,7 @@ def variant_comparison_grid(ele_uuid, stage=CreativeProcessType.MOTION.value): if st.button(f"Promote Variant #{variant_index + 1}", key=f"Promote Variant #{variant_index + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): if stage == CreativeProcessType.MOTION.value: - promote_video_variant(timing.uuid, variants[variant_index].uuid) + promote_video_variant(shot.uuid, variants[variant_index].uuid) else: promote_image_variant(timing.uuid, variant_index) diff --git a/utils/data_repo/api_repo.py b/utils/data_repo/api_repo.py index 39e3fd0f..90207f9e 100644 --- a/utils/data_repo/api_repo.py +++ b/utils/data_repo/api_repo.py @@ -83,6 +83,7 @@ def _setup_urls(self): # shot self.SHOT_URL = 'v1/data/shot' self.SHOT_LIST_URL = 'v1/data/shot/list' + self.SHOT_INTERPOLATED_CLIP = 'v1/data/shot/interpolated-clip' def logout(self): delete_url_param(AUTH_TOKEN) @@ -490,4 +491,8 @@ def update_shot(self, shot_uuid, **kwargs): def delete_shot(self, shot_uuid): res = self.http_delete(self.SHOT_URL, params={'uuid': shot_uuid}) + return InternalResponse(res['payload'], 'success', res['status']) + + def add_interpolated_clip(self, shot_uuid, **kwargs): + res = self.http_post(self.SHOT_INTERPOLATED_CLIP, data=kwargs) return InternalResponse(res['payload'], 'success', res['status']) \ No newline at end of file diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 90823d17..674a4822 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -281,10 +281,6 @@ def update_specific_timing(self, uuid, **kwargs): res = self.db_repo.update_specific_timing(uuid, **kwargs) return res.status - def add_interpolated_clip(self, timing_uuid, **kwargs): - res = self.db_repo.add_interpolated_clip(timing_uuid, **kwargs) - return res.status - def delete_timing_from_uuid(self, uuid): res = self.db_repo.delete_timing_from_uuid(uuid) return res.status @@ -426,3 +422,7 @@ def update_shot(self, shot_uuid, shot_idx=None, name=None, duration=None, meta_d def delete_shot(self, shot_uuid): res = self.db_repo.delete_shot(shot_uuid) return res.status + + def add_interpolated_clip(self, shot_uuid, **kwargs): + res = self.db_repo.add_interpolated_clip(shot_uuid, **kwargs) + return res.status \ No newline at end of file diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index 5adf9d54..299af3e3 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -35,7 +35,6 @@ def calculate_dynamic_interpolations_steps(clip_duration): @staticmethod def create_interpolated_clip(img_location_list, animation_style, settings, variant_count=1, queue_inference=False): - data_repo = DataRepo() if not animation_style: animation_style = DefaultTimingStyleParams.animation_style diff --git a/utils/media_processor/video.py b/utils/media_processor/video.py index 6b20554c..eb56b638 100644 --- a/utils/media_processor/video.py +++ b/utils/media_processor/video.py @@ -1,9 +1,6 @@ -from io import BytesIO import os import tempfile -from moviepy.editor import concatenate_videoclips, TextClip, VideoFileClip, vfx - -from shared.constants import AnimationStyleType +from moviepy.editor import VideoFileClip, vfx class VideoProcessor: @staticmethod From 6a53e22c122a4f2c225412de9b0bfee9b16c5b75 Mon Sep 17 00:00:00 2001 From: peter942 Date: Wed, 1 Nov 2023 02:17:48 +0000 Subject: [PATCH 140/164] Lots of changes --- .../components/frame_styling_page.py | 40 ++++--- ...xplorer_element.py => explorer_element.py} | 102 ++++++++++++----- ui_components/widgets/frame_selector.py | 103 +++++++++--------- ui_components/widgets/shot_view.py | 16 ++- ui_components/widgets/timeline_view.py | 2 +- utils/st_memory.py | 2 +- 6 files changed, 168 insertions(+), 97 deletions(-) rename ui_components/widgets/{style_explorer_element.py => explorer_element.py} (63%) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 2873af91..9e8750bd 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -14,7 +14,7 @@ from ui_components.widgets.inpainting_element import inpainting_element from ui_components.widgets.drawing_element import drawing_element from ui_components.widgets.sidebar_logger import sidebar_logger -from ui_components.widgets.style_explorer_element import style_explorer_element +from ui_components.widgets.explorer_element import explorer_element,gallery_image_view from ui_components.widgets.variant_comparison_grid import variant_comparison_grid from utils import st_memory @@ -66,7 +66,8 @@ def frame_styling_page(shot_uuid: str): project_settings = data_repo.get_project_setting(shot.project.uuid) if st.session_state['frame_styling_view_type'] == "Explorer": - style_explorer_element(shot.project.uuid) + + explorer_element(shot.project.uuid) # -------------------- INDIVIDUAL VIEW ---------------------- elif st.session_state['frame_styling_view_type'] == "Individual": @@ -81,6 +82,7 @@ def frame_styling_page(shot_uuid: str): animation_style_element(shot_uuid) elif st.session_state['page'] == CreativeProcessType.STYLING.value: + variant_comparison_grid(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) st.markdown("***") @@ -104,9 +106,11 @@ def frame_styling_page(shot_uuid: str): f"How many variants?", min_value=1, max_value=100, \ key=f"number_of_variants_{st.session_state['current_frame_index']}" ) - with detail2: - if st.button(f"Generate variants", key=f"new_variations_{st.session_state['current_frame_index']}", help="This will generate new variants based on the settings to the left."): + variant_term = "variant" if st.session_state['individual_number_of_variants'] == 1 else "variants" + st.write("") + + if st.button(f"Generate {variant_term}", key=f"new_variations_{st.session_state['current_frame_index']}", help="This will generate new variants based on the settings to the left."): for i in range(0, max(st.session_state['individual_number_of_variants'], 1)): trigger_restyling_process( timing_uuid=st.session_state['current_frame_uuid'], @@ -132,10 +136,7 @@ def frame_styling_page(shot_uuid: str): ) st.rerun() - st.markdown("***") - st.info( - "You can restyle multiple frames at once in the Timeline view.") - st.markdown("***") + style_cloning_element(timing_list) with st.expander("🔍 Prompt Finder"): @@ -162,15 +163,28 @@ def frame_styling_page(shot_uuid: str): # -------------------- TIMELINE VIEW -------------------------- elif st.session_state['frame_styling_view_type'] == "Timeline": + + if st.session_state['page'] == "Key Frames": - # with st.sidebar: - # with st.expander("🌀 Batch Styling", expanded=False): - # styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.LIST.value) + + with st.sidebar: + with st.expander("📋 Explorer Shortlist",expanded=True): + if st_memory.toggle("Open", value=True, key="explorer_shortlist_toggle"): + project_setting = data_repo.get_project_setting(shot.project.uuid) + page_number = st.radio("Select page", options=range(1, project_setting.total_gallery_pages + 1), horizontal=True) + gallery_image_view(shot.project.uuid,page_number=page_number,num_items_per_page=10, open_detailed_view_for_all=False, shortlist=False,num_columns=2) + timeline_view(shot_uuid, "Key Frames") elif st.session_state['page'] == "Shots": timeline_view(shot_uuid, "Shots") # -------------------- SIDEBAR NAVIGATION -------------------------- with st.sidebar: - with st.expander("🔍 Inference Logging", expanded=True): - sidebar_logger(shot_uuid) + # with st.expander("🔍 Generation Log", expanded=True): + # sidebar_logger(shot_uuid) + + with st.expander("🔍 Generation Log", expanded=True): + if st_memory.toggle("Open", value=True, key="generaton_log_toggle"): + sidebar_logger(shot_uuid) + st.markdown("***") + diff --git a/ui_components/widgets/style_explorer_element.py b/ui_components/widgets/explorer_element.py similarity index 63% rename from ui_components/widgets/style_explorer_element.py rename to ui_components/widgets/explorer_element.py index fb6f5849..5b38c94a 100644 --- a/ui_components/widgets/style_explorer_element.py +++ b/ui_components/widgets/explorer_element.py @@ -1,6 +1,6 @@ import json import streamlit as st -from ui_components.methods.common_methods import process_inference_output +from ui_components.methods.common_methods import process_inference_output,add_new_shot from ui_components.methods.file_methods import generate_pil_image from ui_components.methods.ml_methods import query_llama2 from ui_components.widgets.add_key_frame_element import add_key_frame @@ -8,12 +8,15 @@ from utils.data_repo.data_repo import DataRepo from shared.constants import AIModelType, InferenceType, InternalFileTag, InternalFileType, SortOrder from utils import st_memory +import time from utils.ml_processor.ml_interface import get_ml_client from utils.ml_processor.replicate.constants import REPLICATE_MODEL -def style_explorer_element(project_uuid): + + +def explorer_element(project_uuid): st.markdown("***") data_repo = DataRepo() shot_list = data_repo.get_shot_list(project_uuid) @@ -98,28 +101,48 @@ def style_explorer_element(project_uuid): "project_uuid": project_uuid } process_inference_output(**inference_data) + e2.info("Check the Generation Log to the left for the status.") project_setting = data_repo.get_project_setting(project_uuid) st.markdown("***") - page_number = st.radio("Select page", options=range(1, project_setting.total_gallery_pages + 1), horizontal=True) + k1,k2 = st.columns([5,1]) + + page_number = k1.radio("Select page", options=range(1, project_setting.total_gallery_pages + 1), horizontal=True) + open_detailed_view_for_all = k2.toggle("Open detailed view for all:") f1,f2 = st.columns([1, 1]) - num_columns = f1.slider('Number of columns:', min_value=1, max_value=10, value=4) - num_items_per_page = f2.slider('Items per page:', min_value=1, max_value=100, value=20) + num_columns = f1.slider('Number of columns:', min_value=3, max_value=7, value=5) + num_items_per_page = f2.slider('Items per page:', min_value=10, max_value=50, value=20) st.markdown("***") + + tab1, tab2 = st.tabs(["Explorations", "Shortlist"]) + + + with tab1: + gallery_image_view(project_uuid,page_number,num_items_per_page,open_detailed_view_for_all, False,num_columns) + with tab2: + # @pom4piyush, this should trigger the gallery image view based - passing shortlist = True to show only the shortlisted items. This throws an error right now due to duplicate items on the list but won't when it's set up correctly. + st.success("Commented out...") + # gallery_image_view(project_uuid,page_number,num_items_per_page,open_detailed_view_for_all, True,num_columns) + +def gallery_image_view(project_uuid,page_number=1,num_items_per_page=20, open_detailed_view_for_all=False, shortlist=False,num_columns=2): + data_repo = DataRepo() + + project_settings = data_repo.get_project_setting(project_uuid) + shot_list = data_repo.get_shot_list(project_uuid) + # @pom4piyush, when you've added the shortlist, we should add a value tot the below that triggers based on the shortlist value that's fed into this function. gallery_image_list, res_payload = data_repo.get_all_file_list( file_type=InternalFileType.IMAGE.value, tag=InternalFileTag.GALLERY_IMAGE.value, project_id=project_uuid, page=page_number, data_per_page=num_items_per_page, - sort_order=SortOrder.DESCENDING.value # newly created images appear first + sort_order=SortOrder.DESCENDING.value ) - if project_setting.total_gallery_pages != res_payload['total_pages']: - project_setting.total_gallery_pages = res_payload['total_pages'] + if project_settings.total_gallery_pages != res_payload['total_pages']: + project_settings.total_gallery_pages = res_payload['total_pages'] st.rerun() - total_image_count = res_payload['count'] if gallery_image_list and len(gallery_image_list): start_index = 0 @@ -131,7 +154,20 @@ def style_explorer_element(project_uuid): if i + j < len(gallery_image_list): with cols[j]: st.image(gallery_image_list[i + j].location, use_column_width=True) - with st.expander(f'Variant #{(page_number - 1) * num_items_per_page + i + j + 1}', False): + if st.toggle(f'Open Details For #{(page_number - 1) * num_items_per_page + i + j + 1}', open_detailed_view_for_all): + # @pom4piyush, we should replace the shorlisted value with the shortlisted value from the database + shortlisted = True + if shortlisted == False: + if st.button("Add To Shortlist", key=f"shortlist_{gallery_image_list[i + j].uuid}",use_container_width=True): + time.sleep(0.3) + st.success("Added To Shortlist") + st.rerun() + else: + if st.button("Remove From Shortlist", key=f"shortlist_{gallery_image_list[i + j].uuid}",use_container_width=True): + time.sleep(0.3) + st.success("Removed From Shortlist") + st.rerun() + if gallery_image_list[i + j].inference_log: log = data_repo.get_inference_log_from_uuid(gallery_image_list[i + j].inference_log.uuid) if log: @@ -140,30 +176,42 @@ def style_explorer_element(project_uuid): model = json.loads(log.output_details)['model_name'].split('/')[-1] st.info(f"Prompt: {prompt}") st.info(f"Model: {model}") + shot_names = [s.name for s in shot_list] + shot_names.append('**Create New Shot**') + shot_name = st.selectbox('Shot Name', shot_names, key="current_shot_sidebar_selector") + + if shot_name == "**Create New Shot**": + shot_name = st.text_input("New shot name:", max_chars=40) + if st.button("Create new shot", key=f"create_new_{gallery_image_list[i + j].uuid}", use_container_width=True): + add_new_shot(project_uuid) + pil_image = generate_pil_image(gallery_image_list[i + j].location) + data_repo = DataRepo() + shot_list = data_repo.get_shot_list(project_uuid) + shot_uuid = shot_list[-1].uuid + add_key_frame(pil_image, False, shot_uuid, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False) + # @pom4piyush This should create a new shot, update its name to the specified name, and add the selected image to it. This might be done in an inelegent way + needs you to add an ability to update the shot name. + st.rerun() + + else: + if st.button(f"Add to shot", key=f"add_{gallery_image_list[i + j].uuid}", help="Promote this variant to the primary image", use_container_width=True): + shot_number = shot_names.index(shot_name) + 1 + pil_image = generate_pil_image(gallery_image_list[i + j].location) + shot_uuid = shot_list[shot_number - 1].uuid + # @pom4piyush, in cases like these, why not keep the same file as is used in the gallery view? Assuming generate_pil_image is turning a PIL object that's then downloaded + add_key_frame(pil_image, False, shot_uuid, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False) + + # removing this from the gallery view + data_repo.update_file(gallery_image_list[i + j].uuid, tag="") + + st.rerun() else: st.warning("No data found") else: st.warning("No data found") - - with st.expander('Add to shot', False): - shot_number = st.number_input(f"Shot # (out of {len(shot_list)})", 1, - len(shot_list), value=1, - step=1, key=f"shot_frame_{gallery_image_list[i + j].uuid}") - if st.button(f"Add to shot", key=f"{gallery_image_list[i + j].uuid}", help="Promote this variant to the primary image", use_container_width=True): - pil_image = generate_pil_image(gallery_image_list[i + j].location) - shot_uuid = shot_list[shot_number - 1].uuid - add_key_frame(pil_image, False, shot_uuid, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False) - - # removing this from the gallery view - data_repo.update_file(gallery_image_list[i + j].uuid, tag="") - - st.rerun() - + st.markdown("***") else: st.warning("No images present") - - def create_variate_option(column, key): label = key.replace('_', ' ').capitalize() variate_option = column.checkbox(f"Vary {label.lower()}", key=f"{key}_checkbox") diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index 10920ca6..aafa7952 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -4,6 +4,7 @@ from ui_components.models import InternalFrameTimingObject, InternalShotObject from utils.data_repo.data_repo import DataRepo from ui_components.constants import WorkflowStageType +from utils import st_memory @@ -43,62 +44,62 @@ def frame_selector_widget(): update_current_frame_index(st.session_state['current_frame_index']) - with st.expander(f"🖼️ Frame #{st.session_state['current_frame_index']} Details"): - a1, a2 = st.columns([3,2]) - with a1: - st.success(f"Main Key Frame:") - display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) - - - # st.warning(f"Guidance Image:") - # display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value, clickable=False) - with a2: - st.caption("Replace styled image") - replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value) - - st.info("In Context:") - shot_list = data_repo.get_shot_list(shot.project.uuid) - shot: InternalShotObject = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) - - # shot = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) - timing_list: List[InternalFrameTimingObject] = shot.timing_list - - if timing_list and len(timing_list): - grid = st.columns(3) # Change to 4 columns - for idx, timing in enumerate(timing_list): - with grid[idx % 3]: # Change to 4 columns - if timing.primary_image and timing.primary_image.location: - st.image(timing.primary_image.location, use_column_width=True) - else: - st.warning("No primary image present") - else: - st.warning("No keyframes present") - - st.markdown("---") + with st.expander(f"🖼️ Frame #{st.session_state['current_frame_index']} Details", expanded=True): + if st_memory.toggle("Open", value=False, key="frame_toggle"): + a1, a2 = st.columns([3,2]) + with a1: + st.success(f"Main Key Frame:") + display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) + + + # st.warning(f"Guidance Image:") + # display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value, clickable=False) + with a2: + st.caption("Replace styled image") + replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value) + + st.info("In Context:") + shot_list = data_repo.get_shot_list(shot.project.uuid) + shot: InternalShotObject = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) + + # shot = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) + timing_list: List[InternalFrameTimingObject] = shot.timing_list + + if timing_list and len(timing_list): + grid = st.columns(3) # Change to 4 columns + for idx, timing in enumerate(timing_list): + with grid[idx % 3]: # Change to 4 columns + if timing.primary_image and timing.primary_image.location: + st.image(timing.primary_image.location, use_column_width=True) + else: + st.warning("No primary image present") + else: + st.warning("No keyframes present") + + st.markdown("---") else: shot_list = data_repo.get_shot_list(shot.project.uuid) shot: InternalShotObject = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) - with st.expander(f"🎬 {shot.name} Details"): - - # shot = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) - - timing_list: List[InternalFrameTimingObject] = shot.timing_list - - if timing_list and len(timing_list): - grid = st.columns(3) # Change to 3 columns - for idx, timing in enumerate(timing_list): - with grid[idx % 3]: # Change to 3 columns - if timing.primary_image and timing.primary_image.location: - st.image(timing.primary_image.location, use_column_width=True) - - # Call jump_to_single_frame_view_button function - jump_to_single_frame_view_button(idx + 1, timing_list, f"jump_to_{idx + 1}") + with st.expander(f"🎬 {shot.name} Details",expanded=True): + if st_memory.toggle("Open", value=True, key="shot_details_toggle"): + + timing_list: List[InternalFrameTimingObject] = shot.timing_list + + if timing_list and len(timing_list): + grid = st.columns(3) # Change to 3 columns + for idx, timing in enumerate(timing_list): + with grid[idx % 3]: # Change to 3 columns + if timing.primary_image and timing.primary_image.location: + st.image(timing.primary_image.location, use_column_width=True) - else: - st.warning("No primary image present") - else: - st.warning("No keyframes present") + # Call jump_to_single_frame_view_button function + jump_to_single_frame_view_button(idx + 1, timing_list, f"jump_to_{idx + 1}") + + else: + st.warning("No primary image present") + else: + st.warning("No keyframes present") def update_current_frame_index(index): diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index c262ef43..e4135520 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -40,10 +40,14 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): with header_col_1: - name = st.text_input("Update name:", value=shot.name,max_chars=40) - + name = st.text_input("Name:", value=shot.name,max_chars=40) + if name != shot.name: + st.success("This would've updated.") + # @pom4piyush, could you make this update the shot name in the db? + with header_col_2: duration = st.number_input("Duration:") + # @pom4piyush, this should update the shot duration. with header_col_3: col2, col3, col4 = st.columns(3) @@ -75,7 +79,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): else: st.warning("No keyframes present") - st.markdown("***") + # st.markdown("***") if st.session_state["open_shot"] == shot.shot_idx: @@ -84,7 +88,11 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): confirm_delete = st.checkbox("I know that this will delete all the frames and videos within") if confirm_delete: if st.button("Delete frame"): - st.success("Done!") + + # @pom4piyush, would you be able to add a shot deletion function that can be called it? + st.success("Shot deleted!") + time.sleep(0.3) + st.rerun() else: st.button("Delete frame", disabled=True, help="Check the box above to enable the delete bottom.") with bottom3: diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index b884e96b..3bd0c792 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -15,7 +15,7 @@ def timeline_view(shot_uuid, stage): _, header_col_2 = st.columns([5.5,1.5]) with header_col_2: - items_per_row = st_memory.slider("How many frames per row?", min_value=1, max_value=10, value=5, step=1, key="items_per_row_slider") + items_per_row = st_memory.slider("How many frames per row?", min_value=3, max_value=7, value=5, step=1, key="items_per_row_slider") if stage == 'Key Frames': for shot in shot_list: diff --git a/utils/st_memory.py b/utils/st_memory.py index c9171b24..96aa9bd3 100644 --- a/utils/st_memory.py +++ b/utils/st_memory.py @@ -84,7 +84,7 @@ def toggle(label, value=True,key=None, help=None, on_change=None, disabled=False if key not in st.session_state: st.session_state[key] = value - selection = st.toggle(label=label, value=st.session_state[key], help=help, on_change=on_change, disabled=disabled, label_visibility=label_visibility) + selection = st.toggle(label=label, value=st.session_state[key], help=help, on_change=on_change, disabled=disabled, label_visibility=label_visibility, key=f"{key}_value") if selection != st.session_state[key]: st.session_state[key] = selection From 0f76d15dcc0e828d446adfe455e64d2fe4c3e657 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 1 Nov 2023 12:41:15 +0530 Subject: [PATCH 141/164] video inference fixed --- backend/db_repo.py | 6 ++++- ui_components/methods/common_methods.py | 6 ++--- ui_components/models.py | 25 ++++++------------- ui_components/setup.py | 3 +-- .../widgets/variant_comparison_grid.py | 4 +-- utils/data_repo/data_repo.py | 4 +-- 6 files changed, 20 insertions(+), 28 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index a6ffc8b8..fb2eec69 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -1481,7 +1481,7 @@ def create_shot(self, project_uuid, duration, name=None, meta_data="", desc=""): return InternalResponse(payload, 'shot created successfully', True) - def update_shot(self, shot_uuid, shot_idx=None, name=None, duration=None, meta_data=None, desc=None): + def update_shot(self, shot_uuid, shot_idx=None, name=None, duration=None, meta_data=None, desc=None, main_clip_id=None): shot: Shot = Shot.objects.filter(uuid=shot_uuid, is_disabled=False).first() if not shot: return InternalResponse({}, 'invalid shot uuid', False) @@ -1502,6 +1502,10 @@ def update_shot(self, shot_uuid, shot_idx=None, name=None, duration=None, meta_d update_data['desc'] = desc if shot_idx != None: update_data['shot_idx'] = shot_idx + if main_clip_id != None: + file = InternalFileObject.objects.filter(uuid=main_clip_id, is_disabled=False).first() + if file: + update_data['main_clip_id'] = file.id for k,v in update_data.items(): setattr(shot, k, v) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 06dc7715..f14e7934 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -771,7 +771,7 @@ def process_inference_output(**kwargs): os.remove(temp_output_file.name) if 'normalise_speed' in settings and settings['normalise_speed']: - output = VideoProcessor.update_video_bytes_speed(output, AnimationStyleType.INTERPOLATION.value, shot.duration) + output = VideoProcessor.update_video_bytes_speed(output, shot.duration) video_location = "videos/" + str(shot.project.uuid) + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" video = convert_bytes_to_file( @@ -784,8 +784,8 @@ def process_inference_output(**kwargs): data_repo.add_interpolated_clip(shot_uuid, interpolated_clip_id=video.uuid) if not shot.main_clip: - output_video = update_speed_of_video_clip(video, timing_uuid) - data_repo.update_specific_timing(timing_uuid, main_clip_id=output_video.uuid) + output_video = update_speed_of_video_clip(video, shot.duration) + data_repo.update_shot(shot_uuid, main_clip_id=output_video.uuid) else: del kwargs['log_uuid'] diff --git a/ui_components/models.py b/ui_components/models.py index a35548b6..da9afbdf 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -169,8 +169,8 @@ def __init__(self, **kwargs): self.meta_data = kwargs['meta_data'] if key_present('meta_data', kwargs) else {} self.timing_list = [InternalFrameTimingObject(**timing) for timing in kwargs["timing_list"]] \ if key_present('timing_list', kwargs) and kwargs["timing_list"] else [] - self.interpolated_clip_list = kwargs['interpolated_clip_list'] if key_present('interpolated_clip_list', kwargs) \ - else "" + self.interpolated_clip_list = [InternalFileObject(**vid) for vid in kwargs['interpolated_clip_list']] if key_present('interpolated_clip_list', kwargs) \ + else [] self.main_clip = InternalFileObject(**kwargs['main_clip']) if key_present('main_clip', kwargs) else \ None @@ -180,27 +180,16 @@ def meta_data_dict(self): @property def primary_interpolated_video_index(self): - video_list = self.interpolated_clip_file_list + video_list = self.interpolated_clip_list if not len(video_list): return -1 - for idx, vid in enumerate(video_list): - if vid.uuid == self.main_clip.uuid: - return idx + if self.main_clip: + for idx, vid in enumerate(video_list): + if vid.uuid == self.main_clip.uuid: + return idx return -1 - - @property - def interpolated_clip_file_list(self): - if not (self.interpolated_clip_list and len(self.interpolated_clip_list)): - return [] - - from utils.data_repo.data_repo import DataRepo - - data_repo = DataRepo() - video_id_list = json.loads(self.interpolated_clip_list) if self.interpolated_clip_list else [] - video_list = data_repo.get_image_list_from_uuid_list(video_id_list) - return video_list class InternalAppSettingObject: diff --git a/ui_components/setup.py b/ui_components/setup.py index ed063cb3..8b6138db 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -194,8 +194,7 @@ def setup_app_ui(): project_settings_page(st.session_state["project_uuid"]) elif st.session_state["main_view_type"] == "Video Rendering": - video_rendering_page( - mainheader2, st.session_state["project_uuid"]) + video_rendering_page(st.session_state["project_uuid"]) elif st.session_state["section"] == "App Settings": app_settings_page() diff --git a/ui_components/widgets/variant_comparison_grid.py b/ui_components/widgets/variant_comparison_grid.py index f3a41231..d6d8159d 100644 --- a/ui_components/widgets/variant_comparison_grid.py +++ b/ui_components/widgets/variant_comparison_grid.py @@ -15,7 +15,7 @@ def variant_comparison_grid(ele_uuid, stage=CreativeProcessType.MOTION.value): if stage == CreativeProcessType.MOTION.value: shot_uuid = ele_uuid shot = data_repo.get_shot_from_uuid(shot_uuid) - variants = shot.interpolated_clip_file_list + variants = shot.interpolated_clip_list else: timing_uuid = ele_uuid timing = data_repo.get_timing_from_uuid(timing_uuid) @@ -48,7 +48,7 @@ def variant_comparison_grid(ele_uuid, stage=CreativeProcessType.MOTION.value): cols = st.columns(num_columns) with cols[0]: if stage == CreativeProcessType.MOTION.value: - st.video(variants[current_variant].location, format='mp4', start_time=0) if variants[current_variant] else st.error("No video present") + st.video(variants[current_variant].location, format='mp4', start_time=0) if (current_variant != -1 and variants[current_variant]) else st.error("No video present") else: st.image(variants[current_variant].location, use_column_width=True) st.success("**Main variant**") diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 674a4822..54b71e8b 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -415,8 +415,8 @@ def create_shot(self, project_uuid, duration, name="", meta_data="", desc=""): shot = res.data['data'] if res.status else None return InternalShotObject(**shot) if shot else None - def update_shot(self, shot_uuid, shot_idx=None, name=None, duration=None, meta_data=None, desc=None): - res = self.db_repo.update_shot(shot_uuid, shot_idx=shot_idx, name=name, duration=duration, meta_data=meta_data, desc=desc) + def update_shot(self, shot_uuid, shot_idx=None, name=None, duration=None, meta_data=None, desc=None, main_clip_id=None): + res = self.db_repo.update_shot(shot_uuid, shot_idx=shot_idx, name=name, duration=duration, meta_data=meta_data, desc=desc, main_clip_id=main_clip_id) return res.status def delete_shot(self, shot_uuid): From b00fda71eef55df89acc141d531135de47a072e8 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 1 Nov 2023 14:35:07 +0530 Subject: [PATCH 142/164] frame shortlisting fix --- shared/constants.py | 1 + .../components/frame_styling_page.py | 2 +- ui_components/constants.py | 1 + ui_components/methods/common_methods.py | 6 +- .../widgets/add_key_frame_element.py | 14 +++- ui_components/widgets/explorer_element.py | 71 +++++++++---------- 6 files changed, 54 insertions(+), 41 deletions(-) diff --git a/shared/constants.py b/shared/constants.py index fc6657d6..9aaff064 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -60,6 +60,7 @@ class InternalFileTag(ExtendedEnum): INPUT_VIDEO = 'input_video' TEMP_IMAGE = 'temp' GALLERY_IMAGE = 'gallery_image' + SHORTLISTED_GALLERY_IMAGE = 'shortlisted_gallery_image' class AnimationStyleType(ExtendedEnum): INTERPOLATION = "Interpolate to next" diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 9e8750bd..7f6b0fca 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -172,7 +172,7 @@ def frame_styling_page(shot_uuid: str): if st_memory.toggle("Open", value=True, key="explorer_shortlist_toggle"): project_setting = data_repo.get_project_setting(shot.project.uuid) page_number = st.radio("Select page", options=range(1, project_setting.total_gallery_pages + 1), horizontal=True) - gallery_image_view(shot.project.uuid,page_number=page_number,num_items_per_page=10, open_detailed_view_for_all=False, shortlist=False,num_columns=2) + gallery_image_view(shot.project.uuid, page_number=page_number, num_items_per_page=10, open_detailed_view_for_all=False, shortlist=True, num_columns=2) timeline_view(shot_uuid, "Key Frames") elif st.session_state['page'] == "Shots": diff --git a/ui_components/constants.py b/ui_components/constants.py index 9bcf836d..210c55f9 100644 --- a/ui_components/constants.py +++ b/ui_components/constants.py @@ -51,6 +51,7 @@ class DefaultProjectSettingParams: batch_model = None total_log_pages = 1 total_gallery_pages = 1 + total_shortlist_gallery_pages = 1 # TODO: make proper paths for every file CROPPED_IMG_LOCAL_PATH = "videos/temp/cropped.png" diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index f14e7934..09013549 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -397,16 +397,18 @@ def create_or_update_mask(timing_uuid, image) -> InternalFileObject: timing = data_repo.get_timing_from_uuid(timing_uuid) return timing.mask.location -def add_new_shot(project_uuid): +def add_new_shot(project_uuid, name=""): data_repo = DataRepo() shot_data = { "project_uuid": project_uuid, "desc": "", + "name": name, "duration": 2 } - _ = data_repo.create_shot(**shot_data) + shot = data_repo.create_shot(**shot_data) + return shot # adds the image file in variant (alternative images) list def add_image_variant(image_file_uuid: str, timing_uuid: str): diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index a83380bb..0362b292 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -1,5 +1,7 @@ +from typing import Union import streamlit as st from ui_components.constants import CreativeProcessType, WorkflowStageType +from ui_components.models import InternalFileObject from ui_components.widgets.image_zoom_widgets import zoom_inputs from utils import st_memory @@ -90,16 +92,24 @@ def add_key_frame_element(shot_uuid): return selected_image, inherit_styling_settings, transformation_stage -def add_key_frame(selected_image, inherit_styling_settings, shot_uuid, target_frame_position=None, refresh_state=True): +def add_key_frame(selected_image: Union[Image.Image, InternalFileObject], inherit_styling_settings, shot_uuid, target_frame_position=None, refresh_state=True): + ''' + either a pil image or a internalfileobject can be passed to this method, for adding it inside a shot + ''' data_repo = DataRepo() timing_list = data_repo.get_timing_list_from_shot(shot_uuid) + # creating frame inside the shot at target_frame_position target_frame_position = st.session_state['current_frame_index'] if target_frame_position is None else target_frame_position target_aux_frame_index = min(len(timing_list), target_frame_position) _ = create_frame_inside_shot(shot_uuid, target_aux_frame_index) timing_list = data_repo.get_timing_list_from_shot(shot_uuid) - if selected_image: + # updating the newly created frame timing + if isinstance(selected_image, InternalFileObject): + data_repo.update_specific_timing(timing_list[target_aux_frame_index].uuid, source_image_id=selected_image.uuid) + data_repo.update_specific_timing(timing_list[target_aux_frame_index].uuid, primary_image_id=selected_image.uuid) + else: save_uploaded_image(selected_image, shot_uuid, timing_list[target_aux_frame_index].uuid, WorkflowStageType.SOURCE.value) save_uploaded_image(selected_image, shot_uuid, timing_list[target_aux_frame_index].uuid, WorkflowStageType.STYLED.value) diff --git a/ui_components/widgets/explorer_element.py b/ui_components/widgets/explorer_element.py index 5b38c94a..1c32c82f 100644 --- a/ui_components/widgets/explorer_element.py +++ b/ui_components/widgets/explorer_element.py @@ -105,10 +105,6 @@ def explorer_element(project_uuid): project_setting = data_repo.get_project_setting(project_uuid) st.markdown("***") - k1,k2 = st.columns([5,1]) - - page_number = k1.radio("Select page", options=range(1, project_setting.total_gallery_pages + 1), horizontal=True) - open_detailed_view_for_all = k2.toggle("Open detailed view for all:") f1,f2 = st.columns([1, 1]) num_columns = f1.slider('Number of columns:', min_value=3, max_value=7, value=5) @@ -116,33 +112,41 @@ def explorer_element(project_uuid): st.markdown("***") tab1, tab2 = st.tabs(["Explorations", "Shortlist"]) - - with tab1: - gallery_image_view(project_uuid,page_number,num_items_per_page,open_detailed_view_for_all, False,num_columns) + k1,k2 = st.columns([5,1]) + page_number = k1.radio("Select page", options=range(1, project_setting.total_gallery_pages + 1), horizontal=True, key="main_gallery") + open_detailed_view_for_all = k2.toggle("Open detailed view for all:", key='main_gallery_toggle') + gallery_image_view(project_uuid, page_number, num_items_per_page, open_detailed_view_for_all, False, num_columns) with tab2: - # @pom4piyush, this should trigger the gallery image view based - passing shortlist = True to show only the shortlisted items. This throws an error right now due to duplicate items on the list but won't when it's set up correctly. - st.success("Commented out...") - # gallery_image_view(project_uuid,page_number,num_items_per_page,open_detailed_view_for_all, True,num_columns) + k1,k2 = st.columns([5,1]) + shortlist_page_number = k1.radio("Select page", options=range(1, project_setting.total_shortlist_gallery_pages + 1), horizontal=True, key="shortlist_gallery") + open_detailed_view_for_all = k2.toggle("Open detailed view for all:", key='shortlist_gallery_toggle') + gallery_image_view(project_uuid, shortlist_page_number, num_items_per_page, open_detailed_view_for_all, True, num_columns) -def gallery_image_view(project_uuid,page_number=1,num_items_per_page=20, open_detailed_view_for_all=False, shortlist=False,num_columns=2): +def gallery_image_view(project_uuid,page_number=1,num_items_per_page=20, open_detailed_view_for_all=False, shortlist=False, num_columns=2): data_repo = DataRepo() project_settings = data_repo.get_project_setting(project_uuid) shot_list = data_repo.get_shot_list(project_uuid) - # @pom4piyush, when you've added the shortlist, we should add a value tot the below that triggers based on the shortlist value that's fed into this function. + gallery_image_list, res_payload = data_repo.get_all_file_list( file_type=InternalFileType.IMAGE.value, - tag=InternalFileTag.GALLERY_IMAGE.value, + tag=InternalFileTag.GALLERY_IMAGE.value if not shortlist else InternalFileTag.SHORTLISTED_GALLERY_IMAGE.value, project_id=project_uuid, page=page_number, data_per_page=num_items_per_page, sort_order=SortOrder.DESCENDING.value ) - if project_settings.total_gallery_pages != res_payload['total_pages']: - project_settings.total_gallery_pages = res_payload['total_pages'] - st.rerun() + if not shortlist: + if project_settings.total_gallery_pages != res_payload['total_pages']: + project_settings.total_gallery_pages = res_payload['total_pages'] + st.rerun() + else: + if project_settings.total_shortlist_gallery_pages != res_payload['total_pages']: + project_settings.total_shortlist_gallery_pages = res_payload['total_pages'] + st.rerun() + total_image_count = res_payload['count'] if gallery_image_list and len(gallery_image_list): start_index = 0 @@ -154,18 +158,18 @@ def gallery_image_view(project_uuid,page_number=1,num_items_per_page=20, open_de if i + j < len(gallery_image_list): with cols[j]: st.image(gallery_image_list[i + j].location, use_column_width=True) - if st.toggle(f'Open Details For #{(page_number - 1) * num_items_per_page + i + j + 1}', open_detailed_view_for_all): - # @pom4piyush, we should replace the shorlisted value with the shortlisted value from the database - shortlisted = True - if shortlisted == False: + if st.toggle(f'Open Details For #{(page_number - 1) * num_items_per_page + i + j + 1}', open_detailed_view_for_all, key=f"open_gallery_details_{gallery_image_list[i + j].uuid}"): + if not shortlist: if st.button("Add To Shortlist", key=f"shortlist_{gallery_image_list[i + j].uuid}",use_container_width=True): + data_repo.update_file(gallery_image_list[i + j].uuid, tag=InternalFileTag.SHORTLISTED_GALLERY_IMAGE.value) + st.success("Added To Shortlist") time.sleep(0.3) - st.success("Added To Shortlist") st.rerun() else: if st.button("Remove From Shortlist", key=f"shortlist_{gallery_image_list[i + j].uuid}",use_container_width=True): + data_repo.update_file(gallery_image_list[i + j].uuid, tag=InternalFileTag.GALLERY_IMAGE.value) + st.success("Removed From Shortlist") time.sleep(0.3) - st.success("Removed From Shortlist") st.rerun() if gallery_image_list[i + j].inference_log: @@ -178,31 +182,24 @@ def gallery_image_view(project_uuid,page_number=1,num_items_per_page=20, open_de st.info(f"Model: {model}") shot_names = [s.name for s in shot_list] shot_names.append('**Create New Shot**') - shot_name = st.selectbox('Shot Name', shot_names, key="current_shot_sidebar_selector") + shot_name = st.selectbox('Shot Name', shot_names, key=f"current_shot_sidebar_selector_{gallery_image_list[i + j].uuid}") if shot_name == "**Create New Shot**": - shot_name = st.text_input("New shot name:", max_chars=40) + shot_name = st.text_input("New shot name:", max_chars=40, key=f"shot_name_{gallery_image_list[i+j].uuid}") if st.button("Create new shot", key=f"create_new_{gallery_image_list[i + j].uuid}", use_container_width=True): - add_new_shot(project_uuid) - pil_image = generate_pil_image(gallery_image_list[i + j].location) - data_repo = DataRepo() - shot_list = data_repo.get_shot_list(project_uuid) - shot_uuid = shot_list[-1].uuid - add_key_frame(pil_image, False, shot_uuid, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False) - # @pom4piyush This should create a new shot, update its name to the specified name, and add the selected image to it. This might be done in an inelegent way + needs you to add an ability to update the shot name. + new_shot = add_new_shot(project_uuid, name=shot_name) + add_key_frame(gallery_image_list[i + j], False, new_shot.uuid, len(data_repo.get_timing_list_from_shot(new_shot.uuid)), refresh_state=False) + # removing this from the gallery view + data_repo.update_file(gallery_image_list[i + j].uuid, tag="") st.rerun() else: if st.button(f"Add to shot", key=f"add_{gallery_image_list[i + j].uuid}", help="Promote this variant to the primary image", use_container_width=True): shot_number = shot_names.index(shot_name) + 1 - pil_image = generate_pil_image(gallery_image_list[i + j].location) shot_uuid = shot_list[shot_number - 1].uuid - # @pom4piyush, in cases like these, why not keep the same file as is used in the gallery view? Assuming generate_pil_image is turning a PIL object that's then downloaded - add_key_frame(pil_image, False, shot_uuid, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False) - + add_key_frame(gallery_image_list[i + j], False, shot_uuid, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False) # removing this from the gallery view data_repo.update_file(gallery_image_list[i + j].uuid, tag="") - st.rerun() else: st.warning("No data found") @@ -212,6 +209,8 @@ def gallery_image_view(project_uuid,page_number=1,num_items_per_page=20, open_de st.markdown("***") else: st.warning("No images present") + + def create_variate_option(column, key): label = key.replace('_', ' ').capitalize() variate_option = column.checkbox(f"Vary {label.lower()}", key=f"{key}_checkbox") From 875aeb56ff67ea012585609a7bff2a215dc5cd09 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 1 Nov 2023 17:50:45 +0530 Subject: [PATCH 143/164] jump to shot fixed --- ui_components/widgets/frame_movement_widgets.py | 6 ++++-- ui_components/widgets/frame_selector.py | 13 +++++-------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index 5e3cc3f4..57226bfe 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -166,10 +166,12 @@ def replace_image_widget(timing_uuid, stage, options=["Uploaded Frame", "Other F def jump_to_single_frame_view_button(display_number, timing_list, src): if st.button(f"Jump to #{display_number}", key=src): - st.session_state['prev_frame_index'] = display_number + st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] = display_number st.session_state['current_frame_uuid'] = timing_list[st.session_state['current_frame_index'] - 1].uuid st.session_state['frame_styling_view_type'] = "Individual" st.session_state['change_view_type'] = True - # st.session_state['page'] = "Key Frames" + st.session_state['shot_uuid'] = timing_list[st.session_state['current_frame_index'] - 1].shot.uuid + st.session_state['prev_shot_index'] = st.session_state['current_shot_index'] = timing_list[st.session_state['current_frame_index'] - 1].shot.shot_idx st.session_state["manual_select"] = 0 + st.session_state['page'] = "Key Frames" st.rerun() diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index 8fe8cef2..20490170 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -24,16 +24,13 @@ def frame_selector_widget(): if 'prev_shot_index' not in st.session_state: st.session_state['prev_shot_index'] = shot.shot_idx - # Get the list of shot names shot_names = [s.name for s in shot_list] - - # Add a selectbox for shot_name shot_name = st.selectbox('Shot Name', shot_names, key="current_shot_sidebar_selector") - - # Set current_shot_index based on the selected shot_name - st.session_state['current_shot_index'] = shot_names.index(shot_name) + 1 - - update_current_shot_index(st.session_state['current_shot_index']) + + if not ('current_shot_index' in st.session_state and st.session_state['current_shot_index']): + st.session_state['current_shot_index'] = shot_names.index(shot_name) + 1 + update_current_shot_index(st.session_state['current_shot_index']) + if st.session_state['page'] == "Key Frames": with time2: if 'prev_frame_index' not in st.session_state: From 9d593fc32e8fa97ddb6b6bde128e46f2ef6159e5 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 1 Nov 2023 17:59:45 +0530 Subject: [PATCH 144/164] shot update fix --- ui_components/widgets/shot_view.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 639fe8d6..efc207db 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -37,14 +37,20 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): if st.session_state["open_shot"] == shot.uuid: with header_col_1: - name = st.text_input("Name:", value=shot.name,max_chars=40) + name = st.text_input("Name:", value=shot.name, max_chars=40) if name != shot.name: - st.success("This would've updated.") - # @pom4piyush, could you make this update the shot name in the db? + data_repo.update_shot(shot_uuid, name=name) + st.success("Success") + time.sleep(0.3) + st.rerun() with header_col_2: - duration = st.number_input("Duration:", disabled=True, value=shot.duration) - # @pom4piyush, this should update the shot duration. + duration = st.number_input("Duration:", value=shot.duration) + if duration != shot.duration: + data_repo.update_shot(shot_uuid, duration=duration) + st.success("Success") + time.sleep(0.3) + st.rerun() with header_col_3: col2, col3, col4 = st.columns(3) From ce56dad114888c0f8c44b4931d99521b71cd39e6 Mon Sep 17 00:00:00 2001 From: peter942 Date: Wed, 1 Nov 2023 12:55:49 +0000 Subject: [PATCH 145/164] Refactoring add images --- .../components/frame_styling_page.py | 2 +- .../widgets/add_key_frame_element.py | 111 ++++++++---------- ui_components/widgets/frame_selector.py | 2 +- ui_components/widgets/shot_view.py | 27 +++-- ui_components/widgets/timeline_view.py | 1 + 5 files changed, 70 insertions(+), 73 deletions(-) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 9e8750bd..ecd1fb76 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -155,7 +155,7 @@ def frame_styling_page(shot_uuid: str): drawing_element(timing_list,project_settings, shot_uuid) with st.expander("➕ Add Key Frame", expanded=True): - selected_image, inherit_styling_settings, _ = add_key_frame_element(shot_uuid) + selected_image, inherit_styling_settings = add_key_frame_element(shot_uuid) if st.button(f"Add key frame",type="primary",use_container_width=True): add_key_frame(selected_image, inherit_styling_settings, shot_uuid) st.rerun() diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index a83380bb..647eaead 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -13,82 +13,69 @@ -def add_key_frame_element(shot_uuid): +def add_key_frame_section(shot_uuid, individual_view=True): data_repo = DataRepo() shot = data_repo.get_shot_from_uuid(shot_uuid) timing_list = data_repo.get_timing_list_from_shot(shot_uuid) - add1, add2 = st.columns(2) - with add1: - selected_image_location = "" - image1,image2 = st.columns(2) - with image1: - source_of_starting_image = st.radio("Where would you like to get the starting image from?", [ - "Existing Frame", "Uploaded image"], key="source_of_starting_image") - - transformation_stage = None - if source_of_starting_image == "Existing Frame": - with image2: - transformation_stage = st.radio( - label="Which stage would you like to use?", - options=ImageStage.value_list(), - key="transformation_stage-bottom", - horizontal=True - ) - image_idx = st.number_input( - "Which frame would you like to use?", - min_value=1, - max_value=max(1, len(timing_list)), - value=st.session_state['current_frame_index'], - step=1, - key="image_idx" - ) - if transformation_stage == ImageStage.SOURCE_IMAGE.value: - if timing_list[image_idx - 1].source_image is not None and timing_list[image_idx - 1].source_image != "": - selected_image_location = timing_list[image_idx - 1].source_image.location - else: - selected_image_location = "" - elif transformation_stage == ImageStage.MAIN_VARIANT.value: - selected_image_location = timing_list[image_idx - 1].primary_image_location - elif source_of_starting_image == "Uploaded image": - with image2: - uploaded_image = st.file_uploader( - "Upload an image", type=["png", "jpg", "jpeg"]) - # FILE UPLOAD HANDLE-- - if uploaded_image is not None: - image = Image.open(uploaded_image) - file_location = f"videos/{shot.uuid}/assets/frames/1_selected/{uploaded_image.name}" - selected_image_location = save_or_host_file(image, file_location) - selected_image_location = selected_image_location or file_location - else: - selected_image_location = "" - image_idx = st.session_state['current_frame_index'] + selected_image_location = "" + source_of_starting_image = st.radio("Starting image source:", ["Uploaded image", "Existing Frame"], key="source_of_starting_image") + + if source_of_starting_image == "Existing Frame": + image_idx = st.number_input("Which frame would you like to use?", min_value=1, max_value=max(1, len(timing_list)), value=st.session_state['current_frame_index'], step=1, key="image_idx") + selected_image_location = timing_list[image_idx - 1].primary_image_location + elif source_of_starting_image == "Uploaded image": + uploaded_image = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"]) + if uploaded_image is not None: + image = Image.open(uploaded_image) + file_location = f"videos/{shot.uuid}/assets/frames/1_selected/{uploaded_image.name}" + selected_image_location = save_or_host_file(image, file_location) + selected_image_location = selected_image_location or file_location + else: + selected_image_location = "" + image_idx = st.session_state['current_frame_index'] - + if individual_view: radio_text = "Inherit styling settings from the " + ("current frame?" if source_of_starting_image == "Uploaded image" else "selected frame") - inherit_styling_settings = st_memory.radio(radio_text, ["Yes", "No"], \ - key="inherit_styling_settings", horizontal=True) + inherit_styling_settings = st_memory.radio(radio_text, ["Yes", "No"], key="inherit_styling_settings", horizontal=True) - apply_zoom_effects = st_memory.radio("Apply zoom effects to inputted image?", [ - "No","Yes"], key="apply_zoom_effects", horizontal=True) + apply_zoom_effects = st_memory.radio("Apply zoom effects to inputted image?", ["No","Yes"], key="apply_zoom_effects", horizontal=True) if apply_zoom_effects == "Yes": zoom_inputs(position='new', horizontal=True) + else: + inherit_styling_settings = "Yes" + apply_zoom_effects = "No" + + return selected_image_location, inherit_styling_settings, apply_zoom_effects +def display_selected_key_frame(selected_image_location, apply_zoom_effects): selected_image = None - with add2: - if selected_image_location: - if apply_zoom_effects == "Yes": - image_preview = generate_pil_image(selected_image_location) - selected_image = apply_image_transformations(image_preview, st.session_state['zoom_level_input'], st.session_state['rotation_angle_input'], st.session_state['x_shift'], st.session_state['y_shift']) - else: - selected_image = generate_pil_image(selected_image_location) - st.info("Starting Image:") - st.image(selected_image) + if selected_image_location: + if apply_zoom_effects == "Yes": + image_preview = generate_pil_image(selected_image_location) + selected_image = apply_image_transformations(image_preview, st.session_state['zoom_level_input'], st.session_state['rotation_angle_input'], st.session_state['x_shift'], st.session_state['y_shift']) else: - st.error("No Starting Image Found") + selected_image = generate_pil_image(selected_image_location) + st.info("Starting Image:") + st.image(selected_image) + else: + st.error("No Starting Image Found") + + return selected_image + +def add_key_frame_element(shot_uuid): + add1, add2 = st.columns(2) + with add1: + selected_image_location, inherit_styling_settings, apply_zoom_effects = add_key_frame_section(shot_uuid) + with add2: + selected_image = display_selected_key_frame(selected_image_location, apply_zoom_effects) + + return selected_image, inherit_styling_settings + + + - return selected_image, inherit_styling_settings, transformation_stage def add_key_frame(selected_image, inherit_styling_settings, shot_uuid, target_frame_position=None, refresh_state=True): data_repo = DataRepo() diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index aafa7952..ea0302ce 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -45,7 +45,7 @@ def frame_selector_widget(): update_current_frame_index(st.session_state['current_frame_index']) with st.expander(f"🖼️ Frame #{st.session_state['current_frame_index']} Details", expanded=True): - if st_memory.toggle("Open", value=False, key="frame_toggle"): + if st_memory.toggle("Open", value=True, key="frame_toggle"): a1, a2 = st.columns([3,2]) with a1: st.success(f"Main Key Frame:") diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index e4135520..e96c91dc 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -5,7 +5,7 @@ from ui_components.methods.file_methods import generate_pil_image from ui_components.models import InternalFrameTimingObject, InternalShotObject -from ui_components.widgets.add_key_frame_element import add_key_frame +from ui_components.widgets.add_key_frame_element import add_key_frame,add_key_frame_section from ui_components.widgets.frame_movement_widgets import change_frame_position_input, delete_frame_button, jump_to_single_frame_view_button, move_frame_back_button, move_frame_forward_button, replace_image_widget from utils.data_repo.data_repo import DataRepo from utils import st_memory @@ -62,24 +62,33 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): with col4: change_shot_toggle = st_memory.toggle("Change Shot", value=False, key="change_shot_toggle") - # st.markdown("***") + st.markdown("***") grid = st.columns(items_per_row) - if timing_list and len(timing_list): - for idx, timing in enumerate(timing_list): - with grid[idx%items_per_row]: + # if timing_list and len(timing_list): + for idx in range(len(timing_list) + 1): + with grid[idx%items_per_row]: + if idx == len(timing_list): + if st.session_state["open_shot"] == shot.shot_idx: + st.info("**Add new frame to shot**") + selected_image, inherit_styling_settings, _ = add_key_frame_section(shot_uuid, False) + if st.button(f"Add key frame",type="primary",use_container_width=True): + add_key_frame(selected_image, inherit_styling_settings, shot_uuid) + st.rerun() + else: + timing = timing_list[idx] if timing.primary_image and timing.primary_image.location: st.image(timing.primary_image.location, use_column_width=True) if st.session_state["open_shot"] == shot.shot_idx: timeline_view_buttons(idx, shot_uuid, replace_image_widget_toggle, copy_frame_toggle, move_frames_toggle,delete_frames_toggle, change_shot_toggle) else: - st.warning("No primary image present") - else: - st.warning("No keyframes present") + st.warning("No primary image present") + # else: + # st.warning("No keyframes present") - # st.markdown("***") + st.markdown("***") if st.session_state["open_shot"] == shot.shot_idx: diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index 3bd0c792..b38f694e 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -24,6 +24,7 @@ def timeline_view(shot_uuid, stage): if st.button('Add new shot', type="primary"): add_new_shot(shot.project.uuid) st.rerun() + else: grid = st.columns(items_per_row) for idx, shot in enumerate(shot_list): From c10a1a36993bfc8e14b3eeecff9b39a4009d2cd3 Mon Sep 17 00:00:00 2001 From: peter942 Date: Wed, 1 Nov 2023 13:42:16 +0000 Subject: [PATCH 146/164] Fixing timeline view --- ui_components/widgets/shot_view.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index b180d18e..d4100bcf 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -73,7 +73,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): for idx in range(len(timing_list) + 1): with grid[idx%items_per_row]: if idx == len(timing_list): - if st.session_state["open_shot"] == shot.shot_idx: + if st.session_state["open_shot"] == shot.uuid: st.info("**Add new frame to shot**") selected_image, inherit_styling_settings, _ = add_key_frame_section(shot_uuid, False) if st.button(f"Add key frame",type="primary",use_container_width=True): From 735e54a869e4c250152fe121c1646342089e4931 Mon Sep 17 00:00:00 2001 From: peter942 Date: Wed, 1 Nov 2023 14:27:44 +0000 Subject: [PATCH 147/164] Fixing small issues --- ui_components/widgets/shot_view.py | 17 +++++++++-------- ui_components/widgets/timeline_view.py | 2 +- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index d4100bcf..a4948c70 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -94,7 +94,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): st.markdown("***") - if st.session_state["open_shot"] == shot.shot_idx: + if st.session_state["open_shot"] == shot.uuid: bottom1, bottom2, bottom3 = st.columns([1,2,1]) with bottom1: confirm_delete = st.checkbox("I know that this will delete all the frames and videos within") @@ -106,6 +106,13 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): st.rerun() with bottom3: + if st.button("Move shot up", key=f'shot_up_movement_{shot.uuid}'): + if shot.shot_idx > 0: + data_repo.update_shot(shot_uuid, shot_idx=shot.shot_idx-1) + else: + st.error("This is the first shot") + time.sleep(0.3) + st.rerun() if st.button("Move shot down", key=f'shot_down_movement_{shot.uuid}'): shot_list = data_repo.get_shot_list(shot.project.uuid) if shot.shot_idx < len(shot_list): @@ -115,13 +122,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): time.sleep(0.3) st.rerun() - if st.button("Move shot up", key=f'shot_up_movement_{shot.uuid}'): - if shot.shot_idx > 0: - data_repo.update_shot(shot_uuid, shot_idx=shot.shot_idx-1) - else: - st.error("This is the first shot") - time.sleep(0.3) - st.rerun() + def shot_video_element(shot_uuid): diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index b38f694e..0efb2808 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -15,7 +15,7 @@ def timeline_view(shot_uuid, stage): _, header_col_2 = st.columns([5.5,1.5]) with header_col_2: - items_per_row = st_memory.slider("How many frames per row?", min_value=3, max_value=7, value=5, step=1, key="items_per_row_slider") + items_per_row = st_memory.slider("How many frames per row?", min_value=3, max_value=7, value=4, step=1, key="items_per_row_slider") if stage == 'Key Frames': for shot in shot_list: From 737786733b630431f5e89cd74c5ca4d0b5b5d39f Mon Sep 17 00:00:00 2001 From: peter942 Date: Wed, 1 Nov 2023 19:31:45 +0000 Subject: [PATCH 148/164] Fixing small issues --- ui_components/methods/common_methods.py | 3 ++ .../widgets/frame_movement_widgets.py | 8 +-- ui_components/widgets/frame_selector.py | 8 ++- ui_components/widgets/shot_view.py | 49 +++++++++++-------- 4 files changed, 42 insertions(+), 26 deletions(-) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 09013549..aa78f8fe 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -28,6 +28,9 @@ from utils.media_processor.video import VideoProcessor + + + def clone_styling_settings(source_frame_number, target_frame_uuid): data_repo = DataRepo() target_timing = data_repo.get_timing_from_uuid(target_frame_uuid) diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index 57226bfe..998632da 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -58,7 +58,7 @@ def move_frame_back_button(timing_uuid, orientation): arrow = "⬅️" else: # up-down arrow = "⬆️" - if st.button(arrow, key=f"move_frame_back_{timing_uuid}", help="Move frame back"): + if st.button(arrow, key=f"move_frame_back_{timing_uuid}", help="Move frame back", use_container_width=True): move_frame(direction, timing_uuid) st.rerun() @@ -70,7 +70,7 @@ def move_frame_forward_button(timing_uuid, orientation): else: # up-down arrow = "⬇️" - if st.button(arrow, key=f"move_frame_forward_{timing_uuid}", help="Move frame forward"): + if st.button(arrow, key=f"move_frame_forward_{timing_uuid}", help="Move frame forward", use_container_width=True): move_frame(direction, timing_uuid) st.rerun() @@ -81,7 +81,7 @@ def delete_frame_button(timing_uuid, show_label=False): else: label = "🗑️" - if st.button(label, key=f"delete_frame_{timing_uuid}", help="Delete frame"): + if st.button(label, key=f"delete_frame_{timing_uuid}", help="Delete frame", use_container_width=True): delete_frame(timing_uuid) st.rerun() @@ -165,7 +165,7 @@ def replace_image_widget(timing_uuid, stage, options=["Uploaded Frame", "Other F def jump_to_single_frame_view_button(display_number, timing_list, src): - if st.button(f"Jump to #{display_number}", key=src): + if st.button(f"Jump to #{display_number}", key=src, use_container_width=True): st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] = display_number st.session_state['current_frame_uuid'] = timing_list[st.session_state['current_frame_index'] - 1].uuid st.session_state['frame_styling_view_type'] = "Individual" diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index c0d6406e..08f59e80 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -2,6 +2,7 @@ import streamlit as st from ui_components.widgets.frame_movement_widgets import delete_frame, replace_image_widget,jump_to_single_frame_view_button from ui_components.widgets.image_carousal import display_image +from ui_components.widgets.shot_view import update_shot_name,update_shot_duration from ui_components.models import InternalFrameTimingObject, InternalShotObject from utils.data_repo.data_repo import DataRepo from ui_components.constants import WorkflowStageType @@ -25,7 +26,7 @@ def frame_selector_widget(): st.session_state['prev_shot_index'] = shot.shot_idx shot_names = [s.name for s in shot_list] - shot_name = st.selectbox('Shot Name', shot_names, key="current_shot_sidebar_selector") + shot_name = st.selectbox('Shot name:', shot_names, key="current_shot_sidebar_selector") if not ('current_shot_index' in st.session_state and st.session_state['current_shot_index']): st.session_state['current_shot_index'] = shot_names.index(shot_name) + 1 @@ -81,6 +82,11 @@ def frame_selector_widget(): shot: InternalShotObject = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) with st.expander(f"🎬 {shot.name} Details",expanded=True): if st_memory.toggle("Open", value=True, key="shot_details_toggle"): + a1,a2 = st.columns([2,2]) + with a1: + update_shot_name(shot, data_repo) + with a2: + update_shot_duration(shot, data_repo) timing_list: List[InternalFrameTimingObject] = shot.timing_list diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index a4948c70..553d94d3 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -20,10 +20,10 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): # st.markdown(f"### {shot.name}", expanded=True) timing_list: List[InternalFrameTimingObject] = shot.timing_list - - with st.expander(f"{shot.name}", expanded=True): - - header_col_0, header_col_1, header_col_2, header_col_3 = st.columns([1, 1.75,1,4]) + + with st.expander(f"_-_-_-_", expanded=True): + # st.info(f"##### {shot.name}") + header_col_1, header_col_0, header_col_2, header_col_3 = st.columns([1.5, 1,1,4]) with header_col_0: if st.session_state["open_shot"] != shot.uuid: @@ -36,21 +36,12 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): st.rerun() if st.session_state["open_shot"] == shot.uuid: - with header_col_1: - name = st.text_input("Name:", value=shot.name, max_chars=40) - if name != shot.name: - data_repo.update_shot(shot_uuid, name=name) - st.success("Success") - time.sleep(0.3) - st.rerun() + + with header_col_1: + update_shot_name(shot, data_repo) with header_col_2: - duration = st.number_input("Duration:", value=shot.duration) - if duration != shot.duration: - data_repo.update_shot(shot_uuid, duration=duration) - st.success("Success") - time.sleep(0.3) - st.rerun() + update_shot_duration(shot, data_repo) with header_col_3: col2, col3, col4 = st.columns(3) @@ -66,6 +57,9 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): change_shot_toggle = st_memory.toggle("Change Shot", value=False, key="change_shot_toggle") st.markdown("***") + else: + with header_col_1: + st.info(f"**{shot.name}**") grid = st.columns(items_per_row) @@ -91,7 +85,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): # else: # st.warning("No keyframes present") - st.markdown("***") + st.markdown("***") if st.session_state["open_shot"] == shot.uuid: @@ -122,8 +116,21 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): time.sleep(0.3) st.rerun() - - +def update_shot_name(shot, data_repo): + name = st.text_area("Name:", value=shot.name, max_chars=40, height=15) + if name != shot.name: + data_repo.update_shot(shot.uuid, name=name) + st.success("Success") + time.sleep(0.3) + st.rerun() + +def update_shot_duration(shot, data_repo): + duration = st.number_input("Duration:", value=shot.duration) + if duration != shot.duration: + data_repo.update_shot(shot.uuid, duration=duration) + st.success("Success") + time.sleep(0.3) + st.rerun() def shot_video_element(shot_uuid): data_repo = DataRepo() @@ -160,7 +167,7 @@ def timeline_view_buttons(idx, shot_uuid, replace_image_widget_toggle, copy_fram if copy_frame_toggle: with btn3: - if st.button("🔁", key=f"copy_frame_{timing_list[idx].uuid}"): + if st.button("🔁", key=f"copy_frame_{timing_list[idx].uuid}", use_container_width=True): pil_image = generate_pil_image(timing_list[idx].primary_image.location) add_key_frame(pil_image, False, st.session_state['shot_uuid'], timing_list[idx].aux_frame_index+1, refresh_state=False) st.rerun() From ac6600c65fb4c6317a421b8ce815285a75eac0c5 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Fri, 3 Nov 2023 22:38:13 +0530 Subject: [PATCH 149/164] shot cached --- .../widgets/frame_movement_widgets.py | 6 +- ui_components/widgets/frame_selector.py | 6 +- ui_components/widgets/sidebar_logger.py | 2 +- utils/cache/cache.py | 1 + utils/cache/cache_methods.py | 131 +++++++++++++++++- 5 files changed, 136 insertions(+), 10 deletions(-) diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index 57226bfe..900d3ef9 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -44,7 +44,7 @@ def move_frame(direction, timing_uuid): data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index - 1) elif direction == "Down": - timing_list = data_repo.get_timing_list_from_shot(shot_uuid=timing.shot.uuid) + timing_list = data_repo.get_timing_list_from_shot(timing.shot.uuid) if timing.aux_frame_index == len(timing_list) - 1: st.error("This is the last frame") time.sleep(0.5) @@ -90,7 +90,7 @@ def delete_frame(timing_uuid): timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) shot_uuid = timing.shot.uuid next_timing = data_repo.get_next_timing(timing_uuid) - timing_list = data_repo.get_timing_list_from_shot(shot_uuid=timing.shot.uuid) + timing_list = data_repo.get_timing_list_from_shot(timing.shot.uuid) if len(timing_list) == 1: st.error("can't delete the only image present in the shot") @@ -98,7 +98,7 @@ def delete_frame(timing_uuid): return data_repo.delete_timing_from_uuid(timing.uuid) - timing_list = data_repo.get_timing_list_from_shot(shot_uuid=shot_uuid) + timing_list = data_repo.get_timing_list_from_shot(shot_uuid) # this is the last frame if not next_timing: diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index c0d6406e..2ef80014 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -13,7 +13,7 @@ def frame_selector_widget(): data_repo = DataRepo() time1, time2 = st.columns([1,1]) - timing_list = data_repo.get_timing_list_from_shot(shot_uuid=st.session_state["shot_uuid"]) + timing_list = data_repo.get_timing_list_from_shot(st.session_state["shot_uuid"]) shot = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) shot_list = data_repo.get_shot_list(shot.project.uuid) len_timing_list = len(timing_list) if len(timing_list) > 0 else 1.0 @@ -102,7 +102,7 @@ def frame_selector_widget(): def update_current_frame_index(index): data_repo = DataRepo() - timing_list = data_repo.get_timing_list_from_shot(shot_uuid=st.session_state["shot_uuid"]) + timing_list = data_repo.get_timing_list_from_shot(st.session_state["shot_uuid"]) st.session_state['current_frame_uuid'] = timing_list[index - 1].uuid @@ -118,7 +118,7 @@ def update_current_frame_index(index): def update_current_shot_index(index): data_repo = DataRepo() - shot_list = data_repo.get_shot_list(project_uuid=st.session_state["project_uuid"]) + shot_list = data_repo.get_shot_list(st.session_state["project_uuid"]) st.session_state['shot_uuid'] = shot_list[index - 1].uuid diff --git a/ui_components/widgets/sidebar_logger.py b/ui_components/widgets/sidebar_logger.py index 8ffb107e..bb39625c 100644 --- a/ui_components/widgets/sidebar_logger.py +++ b/ui_components/widgets/sidebar_logger.py @@ -12,7 +12,7 @@ def sidebar_logger(shot_uuid): data_repo = DataRepo() shot = data_repo.get_shot_from_uuid(shot_uuid) - timing_list = data_repo.get_timing_list_from_shot(shot_uuid=shot_uuid) + timing_list = data_repo.get_timing_list_from_shot(shot_uuid) a1, _, a3 = st.columns([1, 0.2, 1]) refresh_disabled = False # not any(log.status in [InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value] for log in log_list) diff --git a/utils/cache/cache.py b/utils/cache/cache.py index ec47b69c..851973d8 100644 --- a/utils/cache/cache.py +++ b/utils/cache/cache.py @@ -10,6 +10,7 @@ class CacheKey(ExtendedEnum): AI_MODEL = "ai_model" LOGGED_USER = "logged_user" FILE = "file" + SHOT = "shot" class StCache: diff --git a/utils/cache/cache_methods.py b/utils/cache/cache_methods.py index 60663969..a9b9dd0a 100644 --- a/utils/cache/cache_methods.py +++ b/utils/cache/cache_methods.py @@ -285,6 +285,7 @@ def _cache_create_timing(self, *args, **kwargs): timing = original_func(self, *args, **kwargs) if timing: StCache.delete_all(CacheKey.TIMING_DETAILS.value) + StCache.delete_all(CacheKey.SHOT.value) return timing @@ -297,6 +298,9 @@ def _cache_update_specific_timing(self, *args, **kwargs): if status: StCache.delete_all(CacheKey.TIMING_DETAILS.value) + # deleting shots as well. for e.g. timing update can be moving it from + # one shot to another + StCache.delete_all(CacheKey.SHOT.value) # updating the timing list timing_func = getattr(cls, '_original_get_timing_from_uuid') @@ -335,6 +339,7 @@ def _cache_delete_timing_from_uuid(self, *args, **kwargs): if status: StCache.delete(args[0],CacheKey.TIMING_DETAILS.value) + StCache.delete_all(CacheKey.SHOT.value) setattr(cls, '_original_delete_timing_from_uuid', cls.delete_timing_from_uuid) setattr(cls, "delete_timing_from_uuid", _cache_delete_timing_from_uuid) @@ -345,6 +350,7 @@ def _cache_remove_existing_timing(self, *args, **kwargs): if status: StCache.delete_all(CacheKey.TIMING_DETAILS.value) + StCache.delete_all(CacheKey.SHOT.value) setattr(cls, '_original_remove_existing_timing', cls.remove_existing_timing) setattr(cls, "remove_existing_timing", _cache_remove_existing_timing) @@ -355,6 +361,7 @@ def _cache_remove_primary_frame(self, *args, **kwargs): if status: StCache.delete_all(CacheKey.TIMING_DETAILS.value) + StCache.delete_all(CacheKey.SHOT.value) setattr(cls, '_original_remove_primary_frame', cls.remove_primary_frame) setattr(cls, "remove_primary_frame", _cache_remove_primary_frame) @@ -365,6 +372,7 @@ def _cache_remove_source_image(self, *args, **kwargs): if status: StCache.delete_all(CacheKey.TIMING_DETAILS.value) + StCache.delete_all(CacheKey.SHOT.value) setattr(cls, '_original_remove_source_image', cls.remove_source_image) setattr(cls, "remove_source_image", _cache_remove_source_image) @@ -450,9 +458,10 @@ def _cache_delete_app_setting(self, *args, **kwargs): # ------------------ PROJECT SETTING METHODS --------------------- def _cache_get_project_setting(self, *args, **kwargs): - project_setting = StCache.get(args[0], CacheKey.PROJECT_SETTING.value) - if project_setting: - return project_setting + project_setting_list = StCache.get_all(CacheKey.PROJECT_SETTING.value) + for ele in project_setting_list: + if str(ele.project.uuid) == str(args[0]): + return ele original_func = getattr(cls, '_original_get_project_setting') project_setting = original_func(self, *args, **kwargs) @@ -547,4 +556,120 @@ def _cache_google_user_login(self, **kwargs): setattr(cls, '_original_google_user_login', cls.google_user_login) setattr(cls, "google_user_login", _cache_google_user_login) + # ---------------------- SHOT METHODS --------------------- + def _cache_get_shot_from_uuid(self, *args, **kwargs): + shot_list = StCache.get_all(CacheKey.SHOT.value) + if shot_list and len(shot_list): + for shot in shot_list: + if shot.uuid == args[0]: + return shot + + original_func = getattr(cls, '_original_get_shot_from_uuid') + shot = original_func(self, *args, **kwargs) + + return shot + + setattr(cls, '_original_get_shot_from_uuid', cls.get_shot_from_uuid) + setattr(cls, "get_shot_from_uuid", _cache_get_shot_from_uuid) + + def _cache_get_shot_from_number(self, *args, **kwargs): + shot_list = StCache.get_all(CacheKey.SHOT.value) + if shot_list and len(shot_list): + for shot in shot_list: + if shot.project.uuid == args[0] and shot.shot_idx == kwargs['shot_number']: + return shot + + original_func = getattr(cls, '_original_get_shot_from_number') + shot = original_func(self, *args, **kwargs) + + return shot + + setattr(cls, '_original_get_shot_from_number', cls.get_shot_from_number) + setattr(cls, "get_shot_from_number", _cache_get_shot_from_number) + + def _cache_get_shot_list(self, *args, **kwargs): + shot_list = StCache.get_all(CacheKey.SHOT.value) + if shot_list and len(shot_list): + res = [] + for shot in shot_list: + if shot.project.uuid == args[0]: + res.append(shot) + if len(res): + return res + + original_func = getattr(cls, '_original_get_shot_list') + shot_list = original_func(self, *args, **kwargs) + if shot_list: + StCache.add_all(shot_list, CacheKey.SHOT.value) + + return shot_list + + setattr(cls, '_original_get_shot_list', cls.get_shot_list) + setattr(cls, "get_shot_list", _cache_get_shot_list) + + def _cache_create_shot(self, *args, **kwargs): + original_func = getattr(cls, '_original_create_shot') + shot = original_func(self, *args, **kwargs) + + if shot: + # deleting all the shots as this could have affected other shots as well + # for e.g. shot_idx shift + StCache.delete_all(CacheKey.SHOT.value) + + return shot + + setattr(cls, '_original_create_shot', cls.create_shot) + setattr(cls, "create_shot", _cache_create_shot) + + def _cache_update_shot(self, *args, **kwargs): + original_func = getattr(cls, '_original_update_shot') + status = original_func(self, *args, **kwargs) + + if status: + StCache.delete_all(CacheKey.SHOT.value) + + return status + + setattr(cls, '_original_update_shot', cls.update_shot) + setattr(cls, "update_shot", _cache_update_shot) + + def _cache_delete_shot(self, *args, **kwargs): + original_func = getattr(cls, '_original_delete_shot') + status = original_func(self, *args, **kwargs) + + if status: + StCache.delete(args[0], CacheKey.SHOT.value) + + return status + + setattr(cls, '_original_delete_shot', cls.delete_shot) + setattr(cls, "delete_shot", _cache_delete_shot) + + def _cache_add_interpolated_clip(self, *args, **kwargs): + original_func = getattr(cls, '_original_add_interpolated_clip') + status = original_func(self, *args, **kwargs) + + if status: + StCache.delete(args[0], CacheKey.SHOT.value) + + return status + + setattr(cls, '_original_add_interpolated_clip', cls.add_interpolated_clip) + setattr(cls, "add_interpolated_clip", _cache_add_interpolated_clip) + + def _cache_get_timing_list_from_shot(self, *args, **kwargs): + shot_list = StCache.get_all(CacheKey.SHOT.value) + if shot_list and len(shot_list): + for shot in shot_list: + if str(shot.uuid) == str(args[0]): + return shot.timing_list + + original_func = getattr(cls, '_original_get_timing_list_from_shot') + timing_list = original_func(self, *args, **kwargs) + + return timing_list + + setattr(cls, '_original_get_timing_list_from_shot', cls.get_timing_list_from_shot) + setattr(cls, "get_timing_list_from_shot", _cache_get_timing_list_from_shot) + return cls \ No newline at end of file From cf90d722d3180c9c1b1cc2f6516ddb85a2937650 Mon Sep 17 00:00:00 2001 From: peter942 Date: Sat, 4 Nov 2023 04:26:42 +0000 Subject: [PATCH 150/164] Lots of improvements --- ui_components/banodoco-website | 1 + .../components/frame_styling_page.py | 2 +- ui_components/setup.py | 9 +- .../widgets/animation_style_element.py | 79 ++++++++--- ui_components/widgets/explorer_element.py | 70 +++++----- .../widgets/frame_movement_widgets.py | 1 + ui_components/widgets/frame_selector.py | 29 +++- ui_components/widgets/shot_view.py | 132 ++++++++++-------- 8 files changed, 206 insertions(+), 117 deletions(-) create mode 160000 ui_components/banodoco-website diff --git a/ui_components/banodoco-website b/ui_components/banodoco-website new file mode 160000 index 00000000..50bc5567 --- /dev/null +++ b/ui_components/banodoco-website @@ -0,0 +1 @@ +Subproject commit 50bc5567ba64916da7ae5fe69c3e7aa140907e3f diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 2a811b34..d8c81877 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -172,7 +172,7 @@ def frame_styling_page(shot_uuid: str): if st_memory.toggle("Open", value=True, key="explorer_shortlist_toggle"): project_setting = data_repo.get_project_setting(shot.project.uuid) page_number = st.radio("Select page", options=range(1, project_setting.total_gallery_pages + 1), horizontal=True) - gallery_image_view(shot.project.uuid, page_number=page_number, num_items_per_page=10, open_detailed_view_for_all=False, shortlist=True, num_columns=2) + gallery_image_view(shot.project.uuid, page_number=page_number, num_items_per_page=10, open_detailed_view_for_all=False, shortlist=True, num_columns=2,view="sidebar") timeline_view(shot_uuid, "Key Frames") elif st.session_state['page'] == "Shots": diff --git a/ui_components/setup.py b/ui_components/setup.py index 8b6138db..9f1eb921 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -125,8 +125,8 @@ def setup_app_ui(): with st.sidebar: view_types = ["Explorer","Timeline","Individual"] - if 'frame_styling_view_type_index' not in st.session_state: - st.session_state['frame_styling_view_type_index'] = 0 + if 'frame_styling_view_type_manual_select' not in st.session_state: + st.session_state['frame_styling_view_type_manual_select'] = 0 st.session_state['frame_styling_view_type'] = "Explorer" st.session_state['change_view_type'] = False @@ -149,9 +149,12 @@ def setup_app_ui(): key="section-selecto1r", styles={"nav-link": {"font-size": "15px", "margin":"0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "green"}}, - manual_select=st.session_state['frame_styling_view_type_index'] + manual_select=st.session_state['frame_styling_view_type_manual_select'] ) + if st.session_state['frame_styling_view_type_manual_select'] != None: + st.session_state['frame_styling_view_type_manual_select'] = None + if st.session_state['frame_styling_view_type'] != "Explorer": pages = CreativeProcessType.value_list() else: diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 4a1a214e..d764ed5f 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -6,21 +6,50 @@ from ui_components.methods.video_methods import create_single_interpolated_clip from utils.data_repo.data_repo import DataRepo from utils.ml_processor.motion_module import AnimateDiffCheckpoint +from ui_components.models import InternalFrameTimingObject, InternalShotObject def animation_style_element(shot_uuid): motion_modules = AnimateDiffCheckpoint.get_name_list() variant_count = 1 current_animation_style = AnimationStyleType.INTERPOLATION.value # setting a default value + data_repo = DataRepo() + if current_animation_style == AnimationStyleType.INTERPOLATION.value: - animation_tool = st.radio("Animation Tool:", options=AnimationToolType.value_list(), key="animation_tool", horizontal=True) - video_resolution = None - settings = { - "animation_tool": animation_tool - } + animation_type = st.radio("Animation Interpolation:", options=['Creative Interpolation', "Video To Video"], key="animation_tool", horizontal=True, disabled=True) + + - if animation_tool == AnimationToolType.ANIMATEDIFF.value: + if animation_type == "Creative Interpolation": + + st.markdown("***") + + shot: InternalShotObject = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) + timing_list: List[InternalFrameTimingObject] = shot.timing_list + st.markdown("#### Keyframe Settings") + if timing_list and len(timing_list): + columns = st.columns(len(timing_list)) # Create columns equal to the number of images + for idx, timing in enumerate(timing_list): + if timing.primary_image and timing.primary_image.location: + columns[idx].image(timing.primary_image.location, use_column_width=True) + else: + columns[idx].warning("No primary image present") + prompt = columns[idx].text_area(f"Prompt {idx+1}", value=timing.prompt, key=f"prompt_{idx+1}") + + + else: + st.warning("No keyframes present") + + st.markdown("***") + video_resolution = None + + settings = { + "animation_tool": animation_type + } + + st.markdown("#### Overall Settings") + c1, c2 = st.columns([1,1]) with c1: motion_module = st.selectbox("Which motion module would you like to use?", options=motion_modules, key="motion_module") @@ -34,27 +63,39 @@ def animation_style_element(shot_uuid): ] sd_model = st.selectbox("Which Stable Diffusion model would you like to use?", options=sd_model_list, key="sd_model") - prompt_column_1, prompt_column_2 = st.columns([1, 1]) + d1, d2 = st.columns([1, 1]) - with prompt_column_1: - positive_prompt = st.text_area("Positive Prompt:", value=DefaultProjectSettingParams.batch_prompt, key="positive_prompt") + with d1: + ip_adapter_strength = st.slider("IP Adapter Strength", min_value=0.0, max_value=1.0, value=0.5, step=0.1, key="ip_adapter_strength") - with prompt_column_2: - negative_prompt = st.text_area("Negative Prompt:", value=DefaultProjectSettingParams.batch_negative_prompt, key="negative_prompt") + with d2: + ip_adapter_noise = st.slider("IP Adapter Noise", min_value=0.0, max_value=1.0, value=0.5, step=0.1, key="ip_adapter_noise") + + interpolation_style = st.selectbox("Interpolation Style", options=["Big Dipper", "Linear", "Slerp", "Custom"], key="interpolation_style") + if interpolation_style == "Big Dipper": + interpolation_settings = "0=1.0,1=0.99,2=0.97,3=0.95,4=0.92,5=0.9,6=0.86,7=0.83,8=0.79,9=0.75,10=0.71,11=0.67,12=0.62,13=0.58,14=0.53,15=0.49,16=0.44,17=0.39,18=0.35,19=0.31,20=0.26,21=0.22,22=0.19,23=0.15,24=0.12,25=0.09,26=0.06,27=0.04,28=0.02,29=0.01,30=0.0,31=0.0,32=0.0,33=0.0,34=0.0,35=0.0,36=0.0,37=0.0,38=0.0,39=0.0,40=0.0,41=0.0,42=0.0,43=0.0,44=0.0,45=0.0,46=0.0,47=0.0,48=0.0,49=0.0,50=0.0,51=0.0,52=0.0,53=0.0,54=0.0,55=0.0,56=0.0,57=0.0,58=0.0,59=0.0,60=0.0,61=0.0,62=0.0,63=0.0" + elif interpolation_style == "Linear": + interpolation_settings = "0=1.0,1=0.99,2=0.97,3=0.95,4=0.92,5=0.9,6=0.86,7=0.83,8=0.79,9=0.75,10=0.71,11=0.67,12=0.62,13=0.58,14=0.53,15=0.49,16=0.44,17=0.39,18=0.35,19=0.31,20=0.26,21=0.22,22=0.19,23=0.15,24=0.12,25=0.09,26=0.06,27=0.04,28=0.02,29=0.01,30=0.0,31=0.0,32=0.0,33=0.0,34=0.0,35=0.0,36=0.0,37=0.0,38=0.0,39=0.0,40=0.0,41=0.0,42=0.0,43=0.0,44=0.0,45=0.0,46=0.0,47=0.0,48=0.0,49=0.0,50=0.0,51=0.0,52=0.0,53=0.0,54=0.0,55=0.0,56=0.0,57=0.0,58=0.0,59=0.0,60=0.0,61=0.0,62=0.0,63=0.0" + elif interpolation_style == "Slerp": + interpolation_settings = "0=1.0,1=0.99,2=0.97,3=0.95,4=0.92,5=0.9,6=0.86,7=0.83,8=0.79,9=0.75,10=0.71,11=0.67,12=0.62,13=0.58,14=0.53,15=0.49,16=0.44,17=0.39,18=0.35,19=0.31,20=0.26,21=0.22,22=0.19,23=0.15,24=0.12,25=0.09,26=0.06,27=0.04,28=0.02,29=0.01,30=0.0,31=0.0,32=0.0,33=0.0,34=0.0,35=0.0,36=0.0,37=0.0,38=0.0,39=0.0,40=0.0,41=0.0,42=0.0,43=0.0,44=0.0,45=0.0,46=0.0,47=0.0,48=0.0,49=0.0,50=0.0,51=0.0,52=0.0,53=0.0,54=0.0,55=0.0,56=0.0,57=0.0,58=0.0,59=0.0,60=0.0,61=0.0,62=0.0,63=0.0" + if interpolation_style == "Custom": + interpolation_settings = st.text_area("Custom Interpolation Style", value="0=1.0,1=0.99,2=0.97,3=0.95,4=0.92,5=0.9,6=0.86,7=0.83,8=0.79,9=0.75,10=0.71,11=0.67,12=0.62,13=0.58,14=0.53,15=0.49,16=0.44,17=0.39,18=0.35,19=0.31,20=0.26,21=0.22,22=0.19,23=0.15,24=0.12,25=0.09,26=0.06,27=0.04,28=0.02,29=0.01,30=0.0,31=0.0,32=0.0,33=0.0,34=0.0,35=0.0,36=0.0,37=0.0,38=0.0,39=0.0,40=0.0,41=0.0,42=0.0,43=0.0,44=0.0,45=0.0,46=0.0,47=0.0,48=0.0,49=0.0,50=0.0,51=0.0,52=0.0,53=0.0,54=0.0,55=0.0,56=0.0,57=0.0,58=0.0,59=0.0,60=0.0,61=0.0,62=0.0,63=0.0", key="custom_interpolation_style") - animate_col_1, animate_col_2, _ = st.columns([1, 1, 2]) + st.markdown("***") + st.markdown("#### Generation Settings") + animate_col_1, _, _ = st.columns([1, 1, 2]) with animate_col_1: - img_dimension_list = ["512x512", "512x768", "768x512"] - img_dimension = st.selectbox("Image Dimension:", options=img_dimension_list, key="img_dimension") - with animate_col_2: + # img_dimension_list = ["512x512", "512x768", "768x512"] + # img_dimension = st.selectbox("Image Dimension:", options=img_dimension_list, key="img_dimension") + img_dimension = "512x512" variant_count = st.number_input("How many variants?", min_value=1, max_value=100, value=1, step=1, key="variant_count") normalise_speed = st.checkbox("Normalise Speed", value=True, key="normalise_speed") settings.update( - positive_prompt=positive_prompt, - negative_prompt=negative_prompt, + # positive_prompt=positive_prompt, + # negative_prompt=negative_prompt, image_dimension=img_dimension, sampling_steps=30, motion_module=motion_module, @@ -102,11 +143,13 @@ def animation_style_element(shot_uuid): st.session_state['travel_list'][i] = {'prompt': new_prompt, 'frame_count': new_frame_count} st.markdown("***") + st.markdown("***") + animate_col_1, animate_col_2 = st.columns([1, 3]) with animate_col_1: variant_count = st.number_input("How many variants?", min_value=1, max_value=100, value=1, step=1, key="variant_count") - + if st.button("Generate Animation Clip", key="generate_animation_clip"): vid_quality = "full" if video_resolution == "Full Resolution" else "preview" st.write("Generating animation clip...") diff --git a/ui_components/widgets/explorer_element.py b/ui_components/widgets/explorer_element.py index 1c32c82f..34deedfc 100644 --- a/ui_components/widgets/explorer_element.py +++ b/ui_components/widgets/explorer_element.py @@ -120,10 +120,11 @@ def explorer_element(project_uuid): with tab2: k1,k2 = st.columns([5,1]) shortlist_page_number = k1.radio("Select page", options=range(1, project_setting.total_shortlist_gallery_pages + 1), horizontal=True, key="shortlist_gallery") - open_detailed_view_for_all = k2.toggle("Open detailed view for all:", key='shortlist_gallery_toggle') + with k2: + open_detailed_view_for_all = st_memory.toggle("Open detailed view for all:", key='shortlist_gallery_toggle') gallery_image_view(project_uuid, shortlist_page_number, num_items_per_page, open_detailed_view_for_all, True, num_columns) -def gallery_image_view(project_uuid,page_number=1,num_items_per_page=20, open_detailed_view_for_all=False, shortlist=False, num_columns=2): +def gallery_image_view(project_uuid,page_number=1,num_items_per_page=20, open_detailed_view_for_all=False, shortlist=False, num_columns=2, view="main"): data_repo = DataRepo() project_settings = data_repo.get_project_setting(project_uuid) @@ -151,39 +152,44 @@ def gallery_image_view(project_uuid,page_number=1,num_items_per_page=20, open_de if gallery_image_list and len(gallery_image_list): start_index = 0 end_index = min(start_index + num_items_per_page, total_image_count) - + shot_names = [s.name for s in shot_list] + shot_names.append('**Create New Shot**') + shot_names.insert(0, '') for i in range(start_index, end_index, num_columns): cols = st.columns(num_columns) for j in range(num_columns): if i + j < len(gallery_image_list): with cols[j]: st.image(gallery_image_list[i + j].location, use_column_width=True) - if st.toggle(f'Open Details For #{(page_number - 1) * num_items_per_page + i + j + 1}', open_detailed_view_for_all, key=f"open_gallery_details_{gallery_image_list[i + j].uuid}"): - if not shortlist: - if st.button("Add To Shortlist", key=f"shortlist_{gallery_image_list[i + j].uuid}",use_container_width=True): - data_repo.update_file(gallery_image_list[i + j].uuid, tag=InternalFileTag.SHORTLISTED_GALLERY_IMAGE.value) - st.success("Added To Shortlist") - time.sleep(0.3) - st.rerun() - else: - if st.button("Remove From Shortlist", key=f"shortlist_{gallery_image_list[i + j].uuid}",use_container_width=True): - data_repo.update_file(gallery_image_list[i + j].uuid, tag=InternalFileTag.GALLERY_IMAGE.value) - st.success("Removed From Shortlist") - time.sleep(0.3) - st.rerun() - - if gallery_image_list[i + j].inference_log: - log = data_repo.get_inference_log_from_uuid(gallery_image_list[i + j].inference_log.uuid) - if log: - input_params = json.loads(log.input_params) - prompt = input_params.get('prompt', 'No prompt found') - model = json.loads(log.output_details)['model_name'].split('/')[-1] - st.info(f"Prompt: {prompt}") - st.info(f"Model: {model}") - shot_names = [s.name for s in shot_list] - shot_names.append('**Create New Shot**') - shot_name = st.selectbox('Shot Name', shot_names, key=f"current_shot_sidebar_selector_{gallery_image_list[i + j].uuid}") - + + if shortlist: + if st.button("Remove from shortlist ➖", key=f"shortlist_{gallery_image_list[i + j].uuid}",use_container_width=True, help="Remove from shortlist"): + data_repo.update_file(gallery_image_list[i + j].uuid, tag=InternalFileTag.GALLERY_IMAGE.value) + st.success("Removed From Shortlist") + time.sleep(0.3) + st.rerun() + + else: + + if st.button("Add to shortlist ➕", key=f"shortlist_{gallery_image_list[i + j].uuid}",use_container_width=True, help="Add to shortlist"): + data_repo.update_file(gallery_image_list[i + j].uuid, tag=InternalFileTag.SHORTLISTED_GALLERY_IMAGE.value) + st.success("Added To Shortlist") + time.sleep(0.3) + st.rerun() + + if gallery_image_list[i + j].inference_log: + log = data_repo.get_inference_log_from_uuid(gallery_image_list[i + j].inference_log.uuid) + if log: + input_params = json.loads(log.input_params) + prompt = input_params.get('prompt', 'No prompt found') + model = json.loads(log.output_details)['model_name'].split('/')[-1] + if view == "main": + with st.expander("Prompt Details", expanded=open_detailed_view_for_all): + st.info(f"**Prompt:** {prompt}\n\n**Model:** {model}") + + shot_name = st.selectbox('Add to shot:', shot_names, key=f"current_shot_sidebar_selector_{gallery_image_list[i + j].uuid}") + + if shot_name != "": if shot_name == "**Create New Shot**": shot_name = st.text_input("New shot name:", max_chars=40, key=f"shot_name_{gallery_image_list[i+j].uuid}") if st.button("Create new shot", key=f"create_new_{gallery_image_list[i + j].uuid}", use_container_width=True): @@ -196,15 +202,15 @@ def gallery_image_view(project_uuid,page_number=1,num_items_per_page=20, open_de else: if st.button(f"Add to shot", key=f"add_{gallery_image_list[i + j].uuid}", help="Promote this variant to the primary image", use_container_width=True): shot_number = shot_names.index(shot_name) + 1 - shot_uuid = shot_list[shot_number - 1].uuid + shot_uuid = shot_list[shot_number - 2].uuid add_key_frame(gallery_image_list[i + j], False, shot_uuid, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False) # removing this from the gallery view data_repo.update_file(gallery_image_list[i + j].uuid, tag="") st.rerun() - else: - st.warning("No data found") else: st.warning("No data found") + else: + st.warning("No data found") st.markdown("***") else: diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py index 998632da..6337181c 100644 --- a/ui_components/widgets/frame_movement_widgets.py +++ b/ui_components/widgets/frame_movement_widgets.py @@ -170,6 +170,7 @@ def jump_to_single_frame_view_button(display_number, timing_list, src): st.session_state['current_frame_uuid'] = timing_list[st.session_state['current_frame_index'] - 1].uuid st.session_state['frame_styling_view_type'] = "Individual" st.session_state['change_view_type'] = True + st.session_state['frame_styling_view_type_manual_select'] = 2 st.session_state['shot_uuid'] = timing_list[st.session_state['current_frame_index'] - 1].shot.uuid st.session_state['prev_shot_index'] = st.session_state['current_shot_index'] = timing_list[st.session_state['current_frame_index'] - 1].shot.shot_idx st.session_state["manual_select"] = 0 diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index 08f59e80..ce164c9a 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -1,8 +1,8 @@ from typing import List import streamlit as st -from ui_components.widgets.frame_movement_widgets import delete_frame, replace_image_widget,jump_to_single_frame_view_button +from ui_components.widgets.frame_movement_widgets import delete_frame_button, replace_image_widget,jump_to_single_frame_view_button from ui_components.widgets.image_carousal import display_image -from ui_components.widgets.shot_view import update_shot_name,update_shot_duration +from ui_components.widgets.shot_view import update_shot_name,update_shot_duration, delete_shot_button from ui_components.models import InternalFrameTimingObject, InternalShotObject from utils.data_repo.data_repo import DataRepo from ui_components.constants import WorkflowStageType @@ -18,15 +18,21 @@ def frame_selector_widget(): shot = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) shot_list = data_repo.get_shot_list(shot.project.uuid) len_timing_list = len(timing_list) if len(timing_list) > 0 else 1.0 - st.progress(st.session_state['current_frame_index'] / len_timing_list) - + if st.session_state['page'] == "Key Frames": + st.progress(st.session_state['current_frame_index'] / len_timing_list) + elif st.session_state['page'] == "Shots": + st.progress(st.session_state['current_shot_index'] / len(shot_list)) with time1: if 'prev_shot_index' not in st.session_state: st.session_state['prev_shot_index'] = shot.shot_idx shot_names = [s.name for s in shot_list] - shot_name = st.selectbox('Shot name:', shot_names, key="current_shot_sidebar_selector") + shot_name = st.selectbox('Shot name:', shot_names, key="current_shot_sidebar_selector",index=shot_names.index(shot.name)) + + if shot_name != shot.name: + st.session_state["shot_uuid"] = shot_list[shot_names.index(shot_name)].uuid + st.rerun() if not ('current_shot_index' in st.session_state and st.session_state['current_shot_index']): st.session_state['current_shot_index'] = shot_names.index(shot_name) + 1 @@ -56,7 +62,9 @@ def frame_selector_widget(): with a2: st.caption("Replace styled image") replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value) - + + st.markdown("---") + st.info("In Context:") shot_list = data_repo.get_shot_list(shot.project.uuid) shot: InternalShotObject = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) @@ -77,6 +85,10 @@ def frame_selector_widget(): st.markdown("---") + delete_frame_button(st.session_state['current_frame_uuid']) + + + else: shot_list = data_repo.get_shot_list(shot.project.uuid) shot: InternalShotObject = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) @@ -88,6 +100,8 @@ def frame_selector_widget(): with a2: update_shot_duration(shot, data_repo) + st.markdown("---") + timing_list: List[InternalFrameTimingObject] = shot.timing_list if timing_list and len(timing_list): @@ -105,6 +119,9 @@ def frame_selector_widget(): else: st.warning("No keyframes present") + st.markdown("---") + + delete_shot_button(shot, data_repo) def update_current_frame_index(index): data_repo = DataRepo() diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 553d94d3..9dad99dc 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -22,26 +22,26 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): timing_list: List[InternalFrameTimingObject] = shot.timing_list with st.expander(f"_-_-_-_", expanded=True): - # st.info(f"##### {shot.name}") - header_col_1, header_col_0, header_col_2, header_col_3 = st.columns([1.5, 1,1,4]) + # st.info(f"##### {shot.name}") + + header_col_0, header_col_1, header_col_2, header_col_3 = st.columns([1.75, 1,1,3]) - with header_col_0: - if st.session_state["open_shot"] != shot.uuid: - if st.toggle("Open shot", key=f"shot_{shot.uuid}"): - st.session_state["open_shot"] = shot.uuid - st.rerun() - else: + + + if st.session_state["open_shot"] == shot.uuid: + + + + with header_col_0: + update_shot_name(shot, data_repo) if not st.toggle("Open shot", key=f"close_shot_{shot.uuid}", value=True): st.session_state["open_shot"] = None st.rerun() - - if st.session_state["open_shot"] == shot.uuid: - - with header_col_1: - update_shot_name(shot, data_repo) - with header_col_2: + with header_col_1: update_shot_duration(shot, data_repo) + + with header_col_3: col2, col3, col4 = st.columns(3) @@ -56,34 +56,47 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): with col4: change_shot_toggle = st_memory.toggle("Change Shot", value=False, key="change_shot_toggle") - st.markdown("***") - else: - with header_col_1: - st.info(f"**{shot.name}**") - - - grid = st.columns(items_per_row) - # if timing_list and len(timing_list): - for idx in range(len(timing_list) + 1): - with grid[idx%items_per_row]: - if idx == len(timing_list): - if st.session_state["open_shot"] == shot.uuid: - st.info("**Add new frame to shot**") - selected_image, inherit_styling_settings, _ = add_key_frame_section(shot_uuid, False) - if st.button(f"Add key frame",type="primary",use_container_width=True): - add_key_frame(selected_image, inherit_styling_settings, shot_uuid) - st.rerun() - else: - timing = timing_list[idx] - if timing.primary_image and timing.primary_image.location: - st.image(timing.primary_image.location, use_column_width=True) - if st.session_state["open_shot"] == shot.uuid: - timeline_view_buttons(idx, shot_uuid, replace_image_widget_toggle, copy_frame_toggle, move_frames_toggle,delete_frames_toggle, change_shot_toggle) - else: - - st.warning("No primary image present") - # else: - # st.warning("No keyframes present") + + else: + + + with header_col_0: + st.info(f"##### {shot.name}") + if st.toggle("Open shot", key=f"shot_{shot.uuid}"): + st.session_state["open_shot"] = shot.uuid + st.rerun() + + with header_col_1: + st.info(f"**{shot.duration} secs**") + + + + st.markdown("***") + + for i in range(0, len(timing_list) + 1, items_per_row): + with st.container(): + grid = st.columns(items_per_row) + for j in range(items_per_row): + idx = i + j + if idx <= len(timing_list): + with grid[j]: + if idx == len(timing_list): + if st.session_state["open_shot"] == shot.uuid: + st.info("**Add new frame to shot**") + selected_image, inherit_styling_settings, _ = add_key_frame_section(shot_uuid, False) + if st.button(f"Add key frame",type="primary",use_container_width=True): + add_key_frame(selected_image, inherit_styling_settings, shot_uuid) + st.rerun() + else: + timing = timing_list[idx] + if timing.primary_image and timing.primary_image.location: + st.image(timing.primary_image.location, use_column_width=True) + else: + st.warning("No primary image present") + if st.session_state["open_shot"] == shot.uuid: + timeline_view_buttons(idx, shot_uuid, replace_image_widget_toggle, copy_frame_toggle, move_frames_toggle,delete_frames_toggle, change_shot_toggle) + st.markdown("***") + st.markdown("***") @@ -91,13 +104,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): if st.session_state["open_shot"] == shot.uuid: bottom1, bottom2, bottom3 = st.columns([1,2,1]) with bottom1: - confirm_delete = st.checkbox("I know that this will delete all the frames and videos within") - help = "Check the box above to enable the delete bottom." if confirm_delete else "" - if st.button("Delete shot", disabled=(not confirm_delete), help=help, key=shot_uuid): - data_repo.delete_shot(shot_uuid) - st.success("Done!") - time.sleep(0.3) - st.rerun() + delete_shot_button(shot, data_repo) with bottom3: if st.button("Move shot up", key=f'shot_up_movement_{shot.uuid}'): @@ -115,12 +122,21 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): st.error("This is the last shot") time.sleep(0.3) st.rerun() - + +def delete_shot_button(shot, data_repo): + confirm_delete = st.checkbox("I know that this will delete all the frames and videos within") + help_text = "Check the box above to enable the delete button." if confirm_delete else "" + if st.button("Delete shot", disabled=(not confirm_delete), help=help_text, key=shot.uuid): + data_repo.delete_shot(shot.uuid) + st.success("Shot deleted successfully") + time.sleep(0.3) + st.rerun() + def update_shot_name(shot, data_repo): - name = st.text_area("Name:", value=shot.name, max_chars=40, height=15) + name = st.text_input("Name:", value=shot.name, max_chars=25) if name != shot.name: data_repo.update_shot(shot.uuid, name=name) - st.success("Success") + st.success("Name updated!") time.sleep(0.3) st.rerun() @@ -128,7 +144,7 @@ def update_shot_duration(shot, data_repo): duration = st.number_input("Duration:", value=shot.duration) if duration != shot.duration: data_repo.update_shot(shot.uuid, duration=duration) - st.success("Success") + st.success("Duration updated!") time.sleep(0.3) st.rerun() @@ -136,15 +152,17 @@ def shot_video_element(shot_uuid): data_repo = DataRepo() shot: InternalShotObject = data_repo.get_shot_from_uuid(shot_uuid) - - st.markdown(f"### {shot.name}") + + st.markdown(f"#### {shot.name}") if shot.main_clip and shot.main_clip.location: st.video(shot.main_clip.location) else: st.warning('''No video present''') - if st.button(f"Jump to {shot.name}", key=f"btn_{shot_uuid}"): - st.success("Coming soon") + if st.button(f"Jump to shot", key=f"btn_{shot_uuid}", use_container_width=True): + st.session_state["shot_uuid"] = shot.uuid + st.session_state["frame_styling_view_type_manual_select"] = 2 + st.rerun() From 9e56590f50d82fd46ef0639168bc59b11410c55c Mon Sep 17 00:00:00 2001 From: peter942 Date: Sat, 4 Nov 2023 04:31:52 +0000 Subject: [PATCH 151/164] Lots of improvements --- ui_components/banodoco-website | 1 - 1 file changed, 1 deletion(-) delete mode 160000 ui_components/banodoco-website diff --git a/ui_components/banodoco-website b/ui_components/banodoco-website deleted file mode 160000 index 50bc5567..00000000 --- a/ui_components/banodoco-website +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 50bc5567ba64916da7ae5fe69c3e7aa140907e3f From 9acf66d97f5cb8e7a1bd9e3802530e64ee12bff6 Mon Sep 17 00:00:00 2001 From: peter942 Date: Sat, 4 Nov 2023 04:37:44 +0000 Subject: [PATCH 152/164] Lots of improvements --- ui_components/components/new_project_page.py | 27 +++++++++++++------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/ui_components/components/new_project_page.py b/ui_components/components/new_project_page.py index 29159679..47b7aacc 100644 --- a/ui_components/components/new_project_page.py +++ b/ui_components/components/new_project_page.py @@ -43,17 +43,26 @@ def new_project_page(): st.success(f"The dimensions of the image are {img_width} x {img_height}") # Prompt user for video dimension specifications - video_width_column, video_height_column, video_info_column = st.columns(3) + v1, v2 = st.columns(2) - - frame_sizes = ["512", "704", "768", "896", "1024"] - with video_width_column: - width = int(st.selectbox("Select video width:", options=frame_sizes, key="video_width")) - with video_height_column: - height = int(st.selectbox("Select video height:", options=frame_sizes, key="video_height")) - with video_info_column: + frame_sizes = ["512x512", "768x512", "512x768"] + with v1: + frame_size = st.selectbox("Select frame size:", options=frame_sizes, key="frame_size") + if frame_size == "512x512": + width = 512 + height = 512 + elif frame_size == "768x512": + width = 768 + height = 512 + elif frame_size == "512x768": + width = 512 + height = 768 + with v2: + st.write("") + st.write("") st.info("Uploaded images will be resized to the selected dimensions.") - + + # Prompt user for audio preferences audio = st.radio("Audio:", ["No audio", "Attach new audio"], key="audio", horizontal=True) From 50d16224abef137c2611771ef8303418ae766fc9 Mon Sep 17 00:00:00 2001 From: peter942 Date: Sat, 4 Nov 2023 04:54:00 +0000 Subject: [PATCH 153/164] Lots of improvements --- ui_components/widgets/frame_selector.py | 50 +++++++++++++------------ ui_components/widgets/shot_view.py | 4 +- 2 files changed, 28 insertions(+), 26 deletions(-) diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index ce164c9a..b9715485 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -72,16 +72,7 @@ def frame_selector_widget(): # shot = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) timing_list: List[InternalFrameTimingObject] = shot.timing_list - if timing_list and len(timing_list): - grid = st.columns(3) # Change to 4 columns - for idx, timing in enumerate(timing_list): - with grid[idx % 3]: # Change to 4 columns - if timing.primary_image and timing.primary_image.location: - st.image(timing.primary_image.location, use_column_width=True) - else: - st.warning("No primary image present") - else: - st.warning("No keyframes present") + display_shot_frames(timing_list, False) st.markdown("---") @@ -104,20 +95,7 @@ def frame_selector_widget(): timing_list: List[InternalFrameTimingObject] = shot.timing_list - if timing_list and len(timing_list): - grid = st.columns(3) # Change to 3 columns - for idx, timing in enumerate(timing_list): - with grid[idx % 3]: # Change to 3 columns - if timing.primary_image and timing.primary_image.location: - st.image(timing.primary_image.location, use_column_width=True) - - # Call jump_to_single_frame_view_button function - jump_to_single_frame_view_button(idx + 1, timing_list, f"jump_to_{idx + 1}") - - else: - st.warning("No primary image present") - else: - st.warning("No keyframes present") + display_shot_frames(timing_list, True) st.markdown("---") @@ -153,3 +131,27 @@ def update_current_shot_index(index): st.session_state['frame_styling_view_type'] = "Individual View" st.rerun() + + +def display_shot_frames(timing_list: List[InternalFrameTimingObject], show_button: bool): + if timing_list and len(timing_list): + items_per_row = 3 + for i in range(0, len(timing_list), items_per_row): + with st.container(): + grid = st.columns(items_per_row) + for j in range(items_per_row): + idx = i + j + if idx < len(timing_list): + timing = timing_list[idx] + with grid[j]: + if timing.primary_image and timing.primary_image.location: + st.image(timing.primary_image.location, use_column_width=True) + # Show button if show_button is True + if show_button: + # Call jump_to_single_frame_view_button function + jump_to_single_frame_view_button(idx + 1, timing_list, f"jump_to_{idx + 1}") + else: + st.warning("No primary image present") + st.markdown("***") + else: + st.warning("No keyframes present") \ No newline at end of file diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 9dad99dc..8e050b8c 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -34,7 +34,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): with header_col_0: update_shot_name(shot, data_repo) - if not st.toggle("Open shot", key=f"close_shot_{shot.uuid}", value=True): + if not st.toggle("Expand", key=f"close_shot_{shot.uuid}", value=True): st.session_state["open_shot"] = None st.rerun() @@ -62,7 +62,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): with header_col_0: st.info(f"##### {shot.name}") - if st.toggle("Open shot", key=f"shot_{shot.uuid}"): + if st.toggle("Expand", key=f"shot_{shot.uuid}"): st.session_state["open_shot"] = shot.uuid st.rerun() From d6b9e0d38ca51c984e9adbf73d3eb438c2e0312d Mon Sep 17 00:00:00 2001 From: peter942 Date: Sat, 4 Nov 2023 05:01:27 +0000 Subject: [PATCH 154/164] Lots of improvements --- ui_components/widgets/frame_selector.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index b9715485..359113c5 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -18,10 +18,7 @@ def frame_selector_widget(): shot = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) shot_list = data_repo.get_shot_list(shot.project.uuid) len_timing_list = len(timing_list) if len(timing_list) > 0 else 1.0 - if st.session_state['page'] == "Key Frames": - st.progress(st.session_state['current_frame_index'] / len_timing_list) - elif st.session_state['page'] == "Shots": - st.progress(st.session_state['current_shot_index'] / len(shot_list)) + with time1: if 'prev_shot_index' not in st.session_state: @@ -29,6 +26,8 @@ def frame_selector_widget(): shot_names = [s.name for s in shot_list] shot_name = st.selectbox('Shot name:', shot_names, key="current_shot_sidebar_selector",index=shot_names.index(shot.name)) + # find shot index based on shot name + st.session_state['current_shot_index'] = shot_names.index(shot_name) + 1 if shot_name != shot.name: st.session_state["shot_uuid"] = shot_list[shot_names.index(shot_name)].uuid @@ -37,7 +36,13 @@ def frame_selector_widget(): if not ('current_shot_index' in st.session_state and st.session_state['current_shot_index']): st.session_state['current_shot_index'] = shot_names.index(shot_name) + 1 update_current_shot_index(st.session_state['current_shot_index']) - + + + + if st.session_state['page'] == "Key Frames": + st.progress(st.session_state['current_frame_index'] / len_timing_list) + elif st.session_state['page'] == "Shots": + st.progress(st.session_state['current_shot_index'] / len(shot_list)) if st.session_state['page'] == "Key Frames": with time2: if 'prev_frame_index' not in st.session_state: From 80ec82bc33d95427330a49f851fdbbfb667f66ab Mon Sep 17 00:00:00 2001 From: peter942 Date: Sat, 4 Nov 2023 05:14:28 +0000 Subject: [PATCH 155/164] Lots of improvements --- ui_components/widgets/explorer_element.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui_components/widgets/explorer_element.py b/ui_components/widgets/explorer_element.py index 34deedfc..47da9be2 100644 --- a/ui_components/widgets/explorer_element.py +++ b/ui_components/widgets/explorer_element.py @@ -121,7 +121,7 @@ def explorer_element(project_uuid): k1,k2 = st.columns([5,1]) shortlist_page_number = k1.radio("Select page", options=range(1, project_setting.total_shortlist_gallery_pages + 1), horizontal=True, key="shortlist_gallery") with k2: - open_detailed_view_for_all = st_memory.toggle("Open detailed view for all:", key='shortlist_gallery_toggle') + open_detailed_view_for_all = st_memory.toggle("Open prompt details for all:", key='shortlist_gallery_toggle') gallery_image_view(project_uuid, shortlist_page_number, num_items_per_page, open_detailed_view_for_all, True, num_columns) def gallery_image_view(project_uuid,page_number=1,num_items_per_page=20, open_detailed_view_for_all=False, shortlist=False, num_columns=2, view="main"): From d69e0f3bdaa6945970b47cfd45b785925be26e13 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 4 Nov 2023 20:11:43 +0530 Subject: [PATCH 156/164] individual view fix --- ui_components/methods/common_methods.py | 10 ++++-- .../widgets/add_key_frame_element.py | 8 ++--- ui_components/widgets/frame_selector.py | 33 ++++++++++++------- 3 files changed, 31 insertions(+), 20 deletions(-) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index aa78f8fe..1090a292 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -251,16 +251,20 @@ def rotate_image(location, degree): return rotated_image -def save_uploaded_image(image, shot_uuid, frame_uuid, stage_type): +def save_uploaded_image(image: Union[Image.Image, str, np.ndarray, io.BytesIO, InternalFileObject], shot_uuid, frame_uuid, stage_type): ''' - saves the image file (which can be a PIL, arr or url) into the project, without + saves the image file (which can be a PIL, arr, InternalFileObject or url) into the project, without any tags or logs. then adds that file as the source_image/primary_image, depending on the stage selected ''' data_repo = DataRepo() try: - saved_image = save_new_image(image, shot_uuid) + if isinstance(image, InternalFileObject): + saved_image = image + else: + saved_image = save_new_image(image, shot_uuid) + # Update records based on stage_type if stage_type == WorkflowStageType.SOURCE.value: data_repo.update_specific_timing(frame_uuid, source_image_id=saved_image.uuid) diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index 3d10adb4..98bde94b 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -93,12 +93,8 @@ def add_key_frame(selected_image: Union[Image.Image, InternalFileObject], inheri timing_list = data_repo.get_timing_list_from_shot(shot_uuid) # updating the newly created frame timing - if isinstance(selected_image, InternalFileObject): - data_repo.update_specific_timing(timing_list[target_aux_frame_index].uuid, source_image_id=selected_image.uuid) - data_repo.update_specific_timing(timing_list[target_aux_frame_index].uuid, primary_image_id=selected_image.uuid) - else: - save_uploaded_image(selected_image, shot_uuid, timing_list[target_aux_frame_index].uuid, WorkflowStageType.SOURCE.value) - save_uploaded_image(selected_image, shot_uuid, timing_list[target_aux_frame_index].uuid, WorkflowStageType.STYLED.value) + save_uploaded_image(selected_image, shot_uuid, timing_list[target_aux_frame_index].uuid, WorkflowStageType.SOURCE.value) + save_uploaded_image(selected_image, shot_uuid, timing_list[target_aux_frame_index].uuid, WorkflowStageType.STYLED.value) if inherit_styling_settings == "Yes" and st.session_state['current_frame_index']: clone_styling_settings(st.session_state['current_frame_index'] - 1, timing_list[target_aux_frame_index].uuid) diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index d1adccd4..3ff9c89f 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -40,19 +40,28 @@ def frame_selector_widget(): if st.session_state['page'] == "Key Frames": + if st.session_state['current_frame_index'] > len_timing_list: + update_current_frame_index(len_timing_list) st.progress(st.session_state['current_frame_index'] / len_timing_list) elif st.session_state['page'] == "Shots": + if st.session_state['current_shot_index'] > len(shot_list): + update_current_shot_index(len(shot_list)) st.progress(st.session_state['current_shot_index'] / len(shot_list)) if st.session_state['page'] == "Key Frames": - with time2: - if 'prev_frame_index' not in st.session_state: - st.session_state['prev_frame_index'] = 1 - - st.session_state['current_frame_index'] = st.number_input(f"Key frame # (out of {len(timing_list)})", 1, - len(timing_list), value=st.session_state['prev_frame_index'], - step=1, key="current_frame_sidebar_selector") - - update_current_frame_index(st.session_state['current_frame_index']) + + if len(timing_list): + with time2: + if 'prev_frame_index' not in st.session_state: + st.session_state['prev_frame_index'] = 1 + + st.session_state['current_frame_index'] = st.number_input(f"Key frame # (out of {len(timing_list)})", 1, + len(timing_list), value=st.session_state['prev_frame_index'], + step=1, key="current_frame_sidebar_selector") + + update_current_frame_index(st.session_state['current_frame_index']) + else: + with time2: + st.error("No frames present") with st.expander(f"🖼️ Frame #{st.session_state['current_frame_index']} Details", expanded=True): if st_memory.toggle("Open", value=True, key="frame_toggle"): @@ -112,8 +121,9 @@ def update_current_frame_index(index): st.session_state['current_frame_uuid'] = timing_list[index - 1].uuid - if st.session_state['prev_frame_index'] != index: + if st.session_state['prev_frame_index'] != index or st.session_state['current_frame_index'] != index: st.session_state['prev_frame_index'] = index + st.session_state['current_frame_index'] = index st.session_state['current_frame_uuid'] = timing_list[index - 1].uuid st.session_state['reset_canvas'] = True st.session_state['frame_styling_view_type_index'] = 0 @@ -128,7 +138,8 @@ def update_current_shot_index(index): st.session_state['shot_uuid'] = shot_list[index - 1].uuid - if st.session_state['prev_shot_index'] != index: + if st.session_state['prev_shot_index'] != index or st.session_state['current_shot_index'] != index: + st.session_state['current_shot_index'] = index st.session_state['prev_shot_index'] = index st.session_state['shot_uuid'] = shot_list[index - 1].uuid st.session_state['reset_canvas'] = True From d4ebd578f15827ad5c36e34d332e42a365c69e21 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 4 Nov 2023 20:58:32 +0530 Subject: [PATCH 157/164] img prompt added --- ui_components/models.py | 6 ++---- ui_components/widgets/add_key_frame_element.py | 3 --- ui_components/widgets/animation_style_element.py | 7 +++---- 3 files changed, 5 insertions(+), 11 deletions(-) diff --git a/ui_components/models.py b/ui_components/models.py index da9afbdf..a870f90e 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -26,7 +26,7 @@ def location(self): return self.hosted_url @property - def inference_params(self) -> MLQueryObject: + def inference_params(self): log = self.inference_log if not log: from utils.data_repo.data_repo import DataRepo @@ -36,9 +36,7 @@ def inference_params(self) -> MLQueryObject: log = fresh_obj.inference_log if log and log.input_params: - params = json.loads(log.input_params) - if InferenceParamType.QUERY_DICT.value in params: - return MLQueryObject(**json.loads(params[InferenceParamType.QUERY_DICT.value])) + return json.loads(log.input_params) return None diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index 98bde94b..6804420f 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -76,9 +76,6 @@ def add_key_frame_element(shot_uuid): return selected_image, inherit_styling_settings - - - def add_key_frame(selected_image: Union[Image.Image, InternalFileObject], inherit_styling_settings, shot_uuid, target_frame_position=None, refresh_state=True): ''' either a pil image or a internalfileobject can be passed to this method, for adding it inside a shot diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index d764ed5f..44284b62 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -32,12 +32,11 @@ def animation_style_element(shot_uuid): columns = st.columns(len(timing_list)) # Create columns equal to the number of images for idx, timing in enumerate(timing_list): if timing.primary_image and timing.primary_image.location: - columns[idx].image(timing.primary_image.location, use_column_width=True) + columns[idx].image(timing.primary_image.location, use_column_width=True) + b = timing.primary_image.inference_params + prompt = columns[idx].text_area(f"Prompt {idx+1}", value=(b['prompt'] if b else ""), key=f"prompt_{idx+1}") else: columns[idx].warning("No primary image present") - prompt = columns[idx].text_area(f"Prompt {idx+1}", value=timing.prompt, key=f"prompt_{idx+1}") - - else: st.warning("No keyframes present") From 046832c16757f34c6857e240f3e4deaec39a96f4 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 4 Nov 2023 22:01:47 +0530 Subject: [PATCH 158/164] shot duplication added --- backend/db_repo.py | 44 +++++++++++++++++++ ui_components/methods/common_methods.py | 4 +- ui_components/widgets/frame_selector.py | 6 +-- ui_components/widgets/shot_view.py | 56 ++++++++++++------------- utils/cache/cache_methods.py | 12 ++++++ utils/data_repo/api_repo.py | 5 +++ utils/data_repo/data_repo.py | 4 ++ 7 files changed, 97 insertions(+), 34 deletions(-) diff --git a/backend/db_repo.py b/backend/db_repo.py index fb2eec69..6ac83b93 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -1519,6 +1519,50 @@ def update_shot(self, shot_uuid, shot_idx=None, name=None, duration=None, meta_d } return InternalResponse(payload, 'shot updated successfully', True) + + def duplicate_shot(self, shot_uuid): + shot: Shot = Shot.objects.filter(uuid=shot_uuid, is_disabled=False).first() + if not shot: + return InternalResponse({}, 'invalid shot uuid', False) + + shot_number = Shot.objects.filter(project_id=shot.project.id, is_disabled=False).count() + 1 + shot_data = { + "name" : shot.name + " (copy)", + "desc" : shot.desc, + "shot_idx" : shot_number, + "duration" : shot.duration, + "meta_data" : shot.meta_data, + "project_id" : shot.project.id + } + + new_shot = Shot.objects.create(**shot_data) + + timing_list = Timing.objects.filter(shot_id=shot.id, is_disabled=False).all() + new_timing_list = [] + for timing in timing_list: + data = { + "model_id": timing.model_id, + "source_image_id": timing.source_image_id, + "mask_id": timing.mask_id, + "canny_image_id": timing.canny_image_id, + "primary_image_id": timing.primary_image_id, + "shot_id": new_shot.id, + "alternative_images": timing.alternative_images, + "notes": timing.notes, + "clip_duration": timing.clip_duration, + "aux_frame_index": timing.aux_frame_index, + } + + new_timing = Timing.objects.create(**data) + new_timing_list.append(new_timing) + + context = {'timing_list': new_timing_list} + + payload = { + 'data': ShotDto(new_shot, context=context).data + } + + return InternalResponse(payload, 'shot duplicated successfully', True) def delete_shot(self, shot_uuid): shot: Shot = Shot.objects.filter(uuid=shot_uuid, is_disabled=False).first() diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 1090a292..5f6cc40a 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -856,7 +856,7 @@ def process_inference_output(**kwargs): def check_project_meta_data(project_uuid): ''' - checking for project metadata (like cache updates) + checking for project metadata (like cache updates - we update specific entities using this flag) project_update_data is of the format {"data_update": [timing_uuid], "gallery_update": True/False, "background_img_list": []} ''' data_repo = DataRepo() @@ -878,4 +878,4 @@ def check_project_meta_data(project_uuid): if gallery_update_data: pass - release_lock(key) \ No newline at end of file + release_lock(key) diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index 3ff9c89f..0760a685 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -101,9 +101,9 @@ def frame_selector_widget(): if st_memory.toggle("Open", value=True, key="shot_details_toggle"): a1,a2 = st.columns([2,2]) with a1: - update_shot_name(shot, data_repo) + update_shot_name(shot.uuid) with a2: - update_shot_duration(shot, data_repo) + update_shot_duration(shot.uuid) st.markdown("---") @@ -113,7 +113,7 @@ def frame_selector_widget(): st.markdown("---") - delete_shot_button(shot, data_repo) + delete_shot_button(shot.uuid) def update_current_frame_index(index): data_repo = DataRepo() diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 8e050b8c..6a1195fa 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -22,26 +22,18 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): timing_list: List[InternalFrameTimingObject] = shot.timing_list with st.expander(f"_-_-_-_", expanded=True): - # st.info(f"##### {shot.name}") - header_col_0, header_col_1, header_col_2, header_col_3 = st.columns([1.75, 1,1,3]) - - if st.session_state["open_shot"] == shot.uuid: - - - with header_col_0: - update_shot_name(shot, data_repo) + update_shot_name(shot.uuid) + duplicate_shot_button(shot.uuid) if not st.toggle("Expand", key=f"close_shot_{shot.uuid}", value=True): st.session_state["open_shot"] = None st.rerun() with header_col_1: - update_shot_duration(shot, data_repo) - - + update_shot_duration(shot.uuid) with header_col_3: col2, col3, col4 = st.columns(3) @@ -55,21 +47,15 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): with col4: change_shot_toggle = st_memory.toggle("Change Shot", value=False, key="change_shot_toggle") - - - else: - - - with header_col_0: - st.info(f"##### {shot.name}") + else: + with header_col_0: + st.info(f"##### {shot.name}") if st.toggle("Expand", key=f"shot_{shot.uuid}"): st.session_state["open_shot"] = shot.uuid st.rerun() with header_col_1: st.info(f"**{shot.duration} secs**") - - st.markdown("***") @@ -96,15 +82,12 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): if st.session_state["open_shot"] == shot.uuid: timeline_view_buttons(idx, shot_uuid, replace_image_widget_toggle, copy_frame_toggle, move_frames_toggle,delete_frames_toggle, change_shot_toggle) st.markdown("***") - - - st.markdown("***") - + st.markdown("***") if st.session_state["open_shot"] == shot.uuid: - bottom1, bottom2, bottom3 = st.columns([1,2,1]) + bottom1, _, bottom3 = st.columns([1,2,1]) with bottom1: - delete_shot_button(shot, data_repo) + delete_shot_button(shot.uuid) with bottom3: if st.button("Move shot up", key=f'shot_up_movement_{shot.uuid}'): @@ -123,7 +106,18 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): time.sleep(0.3) st.rerun() -def delete_shot_button(shot, data_repo): +def duplicate_shot_button(shot_uuid): + data_repo = DataRepo() + shot = data_repo.get_shot_from_uuid(shot_uuid) + if st.button("Duplicate shot", key=f"duplicate_btn_{shot.uuid}"): + data_repo.duplicate_shot(shot.uuid) + st.success("Shot duplicated successfully") + time.sleep(0.3) + st.rerun() + +def delete_shot_button(shot_uuid): + data_repo = DataRepo() + shot = data_repo.get_shot_from_uuid(shot_uuid) confirm_delete = st.checkbox("I know that this will delete all the frames and videos within") help_text = "Check the box above to enable the delete button." if confirm_delete else "" if st.button("Delete shot", disabled=(not confirm_delete), help=help_text, key=shot.uuid): @@ -132,7 +126,9 @@ def delete_shot_button(shot, data_repo): time.sleep(0.3) st.rerun() -def update_shot_name(shot, data_repo): +def update_shot_name(shot_uuid): + data_repo = DataRepo() + shot = data_repo.get_shot_from_uuid(shot_uuid) name = st.text_input("Name:", value=shot.name, max_chars=25) if name != shot.name: data_repo.update_shot(shot.uuid, name=name) @@ -140,7 +136,9 @@ def update_shot_name(shot, data_repo): time.sleep(0.3) st.rerun() -def update_shot_duration(shot, data_repo): +def update_shot_duration(shot_uuid): + data_repo = DataRepo() + shot = data_repo.get_shot_from_uuid(shot_uuid) duration = st.number_input("Duration:", value=shot.duration) if duration != shot.duration: data_repo.update_shot(shot.uuid, duration=duration) diff --git a/utils/cache/cache_methods.py b/utils/cache/cache_methods.py index a9b9dd0a..427fecb6 100644 --- a/utils/cache/cache_methods.py +++ b/utils/cache/cache_methods.py @@ -672,4 +672,16 @@ def _cache_get_timing_list_from_shot(self, *args, **kwargs): setattr(cls, '_original_get_timing_list_from_shot', cls.get_timing_list_from_shot) setattr(cls, "get_timing_list_from_shot", _cache_get_timing_list_from_shot) + def _cache_duplicate_shot(self, *args, **kwargs): + original_func = getattr(cls, '_original_duplicate_shot') + shot = original_func(self, *args, **kwargs) + + if shot: + StCache.delete_all(CacheKey.SHOT.value) + + return shot + + setattr(cls, '_original_duplicate_shot', cls.duplicate_shot) + setattr(cls, "duplicate_shot", _cache_duplicate_shot) + return cls \ No newline at end of file diff --git a/utils/data_repo/api_repo.py b/utils/data_repo/api_repo.py index 90207f9e..7c542dbd 100644 --- a/utils/data_repo/api_repo.py +++ b/utils/data_repo/api_repo.py @@ -84,6 +84,7 @@ def _setup_urls(self): self.SHOT_URL = 'v1/data/shot' self.SHOT_LIST_URL = 'v1/data/shot/list' self.SHOT_INTERPOLATED_CLIP = 'v1/data/shot/interpolated-clip' + self.SHOT_DUPLICATE_URL = 'v1/data/shot/duplicate' def logout(self): delete_url_param(AUTH_TOKEN) @@ -488,6 +489,10 @@ def create_shot(self, project_uuid, name, duration, meta_data="", desc=""): def update_shot(self, shot_uuid, **kwargs): res = self.http_put(self.SHOT_URL, data=kwargs) return InternalResponse(res['payload'], 'success', res['status']) + + def duplicate_shot(self, shot_uuid): + res = self.http_post(self.SHOT_DUPLICATE_URL, params={'uuid': shot_uuid}) + return InternalResponse(res['payload'], 'success', res['status']) def delete_shot(self, shot_uuid): res = self.http_delete(self.SHOT_URL, params={'uuid': shot_uuid}) diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 54b71e8b..46ba25f8 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -422,6 +422,10 @@ def update_shot(self, shot_uuid, shot_idx=None, name=None, duration=None, meta_d def delete_shot(self, shot_uuid): res = self.db_repo.delete_shot(shot_uuid) return res.status + + def duplicate_shot(self, shot_uuid): + res = self.db_repo.duplicate_shot(shot_uuid) + return res.status def add_interpolated_clip(self, shot_uuid, **kwargs): res = self.db_repo.add_interpolated_clip(shot_uuid, **kwargs) From 63d94d97a25ac68a891202b11de5f9789fe0a19b Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 4 Nov 2023 22:34:43 +0530 Subject: [PATCH 159/164] max frams check added --- ui_components/constants.py | 1 + ui_components/widgets/explorer_element.py | 34 +++++++++++++++++++---- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/ui_components/constants.py b/ui_components/constants.py index 210c55f9..2ac3abe1 100644 --- a/ui_components/constants.py +++ b/ui_components/constants.py @@ -52,6 +52,7 @@ class DefaultProjectSettingParams: total_log_pages = 1 total_gallery_pages = 1 total_shortlist_gallery_pages = 1 + max_frames_per_shot = 3 # TODO: make proper paths for every file CROPPED_IMG_LOCAL_PATH = "videos/temp/cropped.png" diff --git a/ui_components/widgets/explorer_element.py b/ui_components/widgets/explorer_element.py index 47da9be2..364d2b3a 100644 --- a/ui_components/widgets/explorer_element.py +++ b/ui_components/widgets/explorer_element.py @@ -51,7 +51,7 @@ def explorer_element(project_uuid): with d2: number_to_generate = st.slider("How many images would you like to generate?", min_value=0, max_value=100, value=4, step=4, key="number_to_generate", help="It'll generate 4 from each variation.") - _, e2, _ = st.columns([0.5, 1, 0.5]) + _, e2, e3 = st.columns([0.5, 1, 0.5]) if e2.button("Generate images", key="generate_images", use_container_width=True, type="primary"): ml_client = get_ml_client() counter = 0 @@ -102,6 +102,9 @@ def explorer_element(project_uuid): } process_inference_output(**inference_data) e2.info("Check the Generation Log to the left for the status.") + + with e3: + update_max_frame_per_shot_element(project_uuid) project_setting = data_repo.get_project_setting(project_uuid) st.markdown("***") @@ -203,9 +206,17 @@ def gallery_image_view(project_uuid,page_number=1,num_items_per_page=20, open_de if st.button(f"Add to shot", key=f"add_{gallery_image_list[i + j].uuid}", help="Promote this variant to the primary image", use_container_width=True): shot_number = shot_names.index(shot_name) + 1 shot_uuid = shot_list[shot_number - 2].uuid - add_key_frame(gallery_image_list[i + j], False, shot_uuid, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False) - # removing this from the gallery view - data_repo.update_file(gallery_image_list[i + j].uuid, tag="") + + shot = data_repo.get_shot_from_uuid(shot_uuid) + project_settings = data_repo.get_project_setting(shot.project.uuid) + if len(shot.timing_list) < project_settings.max_frames_per_shot: + add_key_frame(gallery_image_list[i + j], False, shot_uuid, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False) + # removing this from the gallery view + data_repo.update_file(gallery_image_list[i + j].uuid, tag="") + else: + st.error('Max frame limit reached') + time.sleep(0.3) + st.rerun() else: st.warning("No data found") @@ -245,4 +256,17 @@ def create_prompt(**kwargs): result = query_llama2(user_instruction, system_instruction_template_list[instruction_type]) text_list.append(result) - return ", ".join(text_list) \ No newline at end of file + return ", ".join(text_list) + + +def update_max_frame_per_shot_element(project_uuid): + data_repo = DataRepo() + project_settings = data_repo.get_project_setting(project_uuid) + + max_frames = st.number_input(label='Max frames per shot', min_value=1, value=project_settings.max_frames_per_shot) + + if max_frames != project_settings.max_frames_per_shot: + project_settings.max_frames_per_shot = max_frames + st.success("Updated") + time.sleep(0.3) + st.rerun() \ No newline at end of file From ee5b7339b927ab314f4c713adff7815cd2ac49e0 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sun, 5 Nov 2023 15:54:36 +0530 Subject: [PATCH 160/164] endpoint fixes --- ui_components/components/app_settings_page.py | 2 +- utils/data_repo/api_repo.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ui_components/components/app_settings_page.py b/ui_components/components/app_settings_page.py index 4262ace1..5919a60d 100644 --- a/ui_components/components/app_settings_page.py +++ b/ui_components/components/app_settings_page.py @@ -21,7 +21,7 @@ def app_settings_page(): if SERVER != ServerType.DEVELOPMENT.value: with st.expander("Purchase Credits", expanded=True): - user_credits = get_current_user(fresh_fetch=True)['total_credits'] + user_credits = get_current_user().total_credits st.write(f"Total Credits: {user_credits}") c1, c2 = st.columns([1,1]) with c1: diff --git a/utils/data_repo/api_repo.py b/utils/data_repo/api_repo.py index 7c542dbd..a3519427 100644 --- a/utils/data_repo/api_repo.py +++ b/utils/data_repo/api_repo.py @@ -78,13 +78,13 @@ def _setup_urls(self): self.STRIPE_PAYMENT_URL = '/v1/payment/stripe-link' # lock - self.LOCK_URL = 'v1/data/lock' + self.LOCK_URL = '/v1/data/lock' # shot - self.SHOT_URL = 'v1/data/shot' - self.SHOT_LIST_URL = 'v1/data/shot/list' - self.SHOT_INTERPOLATED_CLIP = 'v1/data/shot/interpolated-clip' - self.SHOT_DUPLICATE_URL = 'v1/data/shot/duplicate' + self.SHOT_URL = '/v1/data/shot' + self.SHOT_LIST_URL = '/v1/data/shot/list' + self.SHOT_INTERPOLATED_CLIP = '/v1/data/shot/interpolated-clip' + self.SHOT_DUPLICATE_URL = '/v1/data/shot/duplicate' def logout(self): delete_url_param(AUTH_TOKEN) @@ -475,7 +475,7 @@ def get_shot_list(self, project_uuid): res = self.http_get(self.SHOT_LIST_URL, params={'project_id': project_uuid}) return InternalResponse(res['payload'], 'success', res['status']) - def create_shot(self, project_uuid, name, duration, meta_data="", desc=""): + def create_shot(self, project_uuid, duration, name, meta_data="", desc=""): data = { 'project_id': project_uuid, 'name': name, From dbbecd1b87def81d3ff1f8e400bb360ae3f6507e Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sun, 5 Nov 2023 21:29:00 +0530 Subject: [PATCH 161/164] api fixes --- ui_components/components/frame_styling_page.py | 2 -- ui_components/setup.py | 12 +----------- utils/cache/cache_methods.py | 3 ++- utils/data_repo/api_repo.py | 14 ++++---------- 4 files changed, 7 insertions(+), 24 deletions(-) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index d8c81877..12b6bf1e 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -24,8 +24,6 @@ def frame_styling_page(shot_uuid: str): - - data_repo = DataRepo() shot = data_repo.get_shot_from_uuid(shot_uuid) timing_list = data_repo.get_timing_list_from_shot(shot_uuid) diff --git a/ui_components/setup.py b/ui_components/setup.py index 9f1eb921..3ead18c6 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -1,8 +1,5 @@ -import json -import time import streamlit as st import os -import math from moviepy.editor import * from shared.constants import SERVER, ServerType @@ -16,13 +13,11 @@ from ui_components.constants import CreativeProcessType from ui_components.methods.common_methods import check_project_meta_data from ui_components.models import InternalAppSettingObject -from utils.common_utils import acquire_lock, create_working_assets, get_current_user, get_current_user_uuid, release_lock, reset_project_state +from utils.common_utils import create_working_assets, get_current_user, get_current_user_uuid, reset_project_state from utils import st_memory from utils.data_repo.data_repo import DataRepo -# TODO: CORRECT-CODE - def setup_app_ui(): data_repo = DataRepo() @@ -119,8 +114,6 @@ def setup_app_ui(): st.session_state['main_view_type'] = st_memory.menu(None, main_view_types, icons=['search-heart', 'tools', "play-circle", 'stopwatch'], menu_icon="cast", default_index=0, key="main_view_type_name", orientation="horizontal", styles={ "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "red"}}) - _, mainheader2 = st.columns([3, 2]) - if st.session_state["main_view_type"] == "Creative Process": with st.sidebar: view_types = ["Explorer","Timeline","Individual"] @@ -174,12 +167,9 @@ def setup_app_ui(): if st.session_state["manual_select"] != None: st.session_state["manual_select"] = None - - frame_styling_page(st.session_state["shot_uuid"]) elif st.session_state["main_view_type"] == "Tools & Settings": - with st.sidebar: tool_pages = ["Query Logger", "Custom Models", "Project Settings"] diff --git a/utils/cache/cache_methods.py b/utils/cache/cache_methods.py index 427fecb6..062a7d02 100644 --- a/utils/cache/cache_methods.py +++ b/utils/cache/cache_methods.py @@ -549,7 +549,8 @@ def _cache_google_user_login(self, **kwargs): original_func = getattr(cls, '_original_google_user_login') user, token, refresh_token = original_func(self, **kwargs) StCache.delete_all(CacheKey.LOGGED_USER.value) - StCache.add(user, CacheKey.LOGGED_USER.value) + if user: + StCache.add(user, CacheKey.LOGGED_USER.value) return user, token, refresh_token diff --git a/utils/data_repo/api_repo.py b/utils/data_repo/api_repo.py index a3519427..d5c6244d 100644 --- a/utils/data_repo/api_repo.py +++ b/utils/data_repo/api_repo.py @@ -189,14 +189,8 @@ def get_file_list_from_log_uuid_list(self, log_uuid_list): res = self.http_post(self.FILE_UUID_LIST_URL, data={'log_uuid_list': log_uuid_list}) return InternalResponse(res['payload'], 'success', res['status']) - def get_all_file_list(self, type: InternalFileType, tag = None, project_id = None): - filter_data = {"type": type} - if tag: - filter_data['tag'] = tag - if project_id: - filter_data['project_id'] = project_id - - res = self.http_get(self.FILE_LIST_URL, params=filter_data) + def get_all_file_list(self, **kwargs): + res = self.http_get(self.FILE_LIST_URL, params=kwargs) return InternalResponse(res['payload'], 'success', res['status']) def create_or_update_file(self, uuid, type=InternalFileType.IMAGE.value, **kwargs): @@ -285,8 +279,8 @@ def get_inference_log_from_uuid(self, uuid): res = self.http_get(self.LOG_URL, params={'uuid': uuid}) return InternalResponse(res['payload'], 'success', res['status']) - def get_all_inference_log_list(self, project_id=None, model_id=None): - res = self.http_get(self.LOG_LIST_URL, params={'project_id': project_id, 'model_id': model_id}) + def get_all_inference_log_list(self, **kwargs): + res = self.http_get(self.LOG_LIST_URL, params=kwargs) return InternalResponse(res['payload'], 'success', res['status']) def create_inference_log(self, **kwargs): From 396815c547c1262831ca459b3111f678594a4315 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sun, 5 Nov 2023 21:46:13 +0530 Subject: [PATCH 162/164] max_frame check fixed --- ui_components/widgets/add_key_frame_element.py | 9 +++++++++ ui_components/widgets/explorer_element.py | 13 +++---------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index 6804420f..b53e70be 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -1,3 +1,4 @@ +import time from typing import Union import streamlit as st from ui_components.constants import CreativeProcessType, WorkflowStageType @@ -83,6 +84,14 @@ def add_key_frame(selected_image: Union[Image.Image, InternalFileObject], inheri data_repo = DataRepo() timing_list = data_repo.get_timing_list_from_shot(shot_uuid) + # checking if the shot has reached the max frame limit + shot = data_repo.get_shot_from_uuid(shot_uuid) + project_settings = data_repo.get_project_setting(shot.project.uuid) + if len(shot.timing_list) >= project_settings.max_frames_per_shot: + st.error('Max frame limit reached') + time.sleep(0.3) + st.rerun() + # creating frame inside the shot at target_frame_position target_frame_position = st.session_state['current_frame_index'] if target_frame_position is None else target_frame_position target_aux_frame_index = min(len(timing_list), target_frame_position) diff --git a/ui_components/widgets/explorer_element.py b/ui_components/widgets/explorer_element.py index 364d2b3a..ba92075a 100644 --- a/ui_components/widgets/explorer_element.py +++ b/ui_components/widgets/explorer_element.py @@ -207,16 +207,9 @@ def gallery_image_view(project_uuid,page_number=1,num_items_per_page=20, open_de shot_number = shot_names.index(shot_name) + 1 shot_uuid = shot_list[shot_number - 2].uuid - shot = data_repo.get_shot_from_uuid(shot_uuid) - project_settings = data_repo.get_project_setting(shot.project.uuid) - if len(shot.timing_list) < project_settings.max_frames_per_shot: - add_key_frame(gallery_image_list[i + j], False, shot_uuid, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False) - # removing this from the gallery view - data_repo.update_file(gallery_image_list[i + j].uuid, tag="") - else: - st.error('Max frame limit reached') - time.sleep(0.3) - + add_key_frame(gallery_image_list[i + j], False, shot_uuid, len(data_repo.get_timing_list_from_shot(shot_uuid)), refresh_state=False) + # removing this from the gallery view + data_repo.update_file(gallery_image_list[i + j].uuid, tag="") st.rerun() else: st.warning("No data found") From dc85d1196bbedee090e6e8ee6d8ba9e58b7d1de3 Mon Sep 17 00:00:00 2001 From: peter942 Date: Sun, 5 Nov 2023 21:17:19 +0100 Subject: [PATCH 163/164] Lots of improvements --- .../components/frame_styling_page.py | 2 +- ui_components/methods/common_methods.py | 12 +- .../widgets/add_key_frame_element.py | 35 ++-- .../widgets/animation_style_element.py | 180 +++++++++--------- ui_components/widgets/cropping_element.py | 38 +--- ui_components/widgets/explorer_element.py | 4 +- ui_components/widgets/image_zoom_widgets.py | 34 ++-- ui_components/widgets/shot_view.py | 64 ++++--- ui_components/widgets/styling_element.py | 2 +- utils/st_memory.py | 15 ++ 10 files changed, 205 insertions(+), 181 deletions(-) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index d8c81877..d70765e1 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -59,7 +59,7 @@ def frame_styling_page(shot_uuid: str): else: if st.session_state['page'] == "Key Frames": st.markdown( - f"#### :red[{st.session_state['main_view_type']}] > **:green[{st.session_state['frame_styling_view_type']}]** > :orange[{st.session_state['page']}] > :blue[Frame #{st.session_state['current_frame_index']}]") + f"#### :red[{st.session_state['main_view_type']}] > **:green[{st.session_state['frame_styling_view_type']}]** > :orange[{shot.name}] > :blue[Frame #{st.session_state['current_frame_index']}]") else: st.markdown(f"#### :red[{st.session_state['main_view_type']}] > **:green[{st.session_state['frame_styling_view_type']}]** > :orange[{st.session_state['page']}] > :blue[{shot.name}]") diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 5f6cc40a..3b59611a 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -190,7 +190,7 @@ def zoom_image(image, zoom_factor, fill_with=None): return cropped_image # image here is a PIL object -def apply_image_transformations(image: Image, zoom_level, rotation_angle, x_shift, y_shift) -> Image: +def apply_image_transformations(image: Image, zoom_level, rotation_angle, x_shift, y_shift, flip_vertically, flip_horizontally) -> Image: width, height = image.size # Calculate the diagonal for the rotation @@ -207,7 +207,7 @@ def apply_image_transformations(image: Image, zoom_level, rotation_angle, x_shif # Shift # Create a new image with black background shift_bg = Image.new("RGB", (diagonal, diagonal), "black") - shift_bg.paste(rotated_image, (x_shift, y_shift)) + shift_bg.paste(rotated_image, (-x_shift, y_shift)) # Zoom zoomed_width = int(diagonal * (zoom_level / 100)) @@ -221,6 +221,14 @@ def apply_image_transformations(image: Image, zoom_level, rotation_angle, x_shif crop_y2 = crop_y1 + height cropped_image = zoomed_image.crop((crop_x1, crop_y1, crop_x2, crop_y2)) + # Flip vertically + if flip_vertically: + cropped_image = cropped_image.transpose(Image.FLIP_TOP_BOTTOM) + + # Flip horizontally + if flip_horizontally: + cropped_image = cropped_image.transpose(Image.FLIP_LEFT_RIGHT) + return cropped_image def fetch_image_by_stage(shot_uuid, stage, frame_idx): diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index 6804420f..ebe9717d 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -19,12 +19,12 @@ def add_key_frame_section(shot_uuid, individual_view=True): data_repo = DataRepo() shot = data_repo.get_shot_from_uuid(shot_uuid) timing_list = data_repo.get_timing_list_from_shot(shot_uuid) - + len_shot_timing_list = len(timing_list) if len(timing_list) > 0 else 0 selected_image_location = "" - source_of_starting_image = st.radio("Starting image source:", ["Uploaded image", "Existing Frame"], key="source_of_starting_image") + source_of_starting_image = st.radio("Starting image:", ["None","Uploaded image", "Existing Frame"], key="source_of_starting_image") if source_of_starting_image == "Existing Frame": - image_idx = st.number_input("Which frame would you like to use?", min_value=1, max_value=max(1, len(timing_list)), value=st.session_state['current_frame_index'], step=1, key="image_idx") + image_idx = st.number_input("Which frame would you like to use?", min_value=1, max_value=max(1, len(timing_list)), value=len_shot_timing_list, step=1, key="image_idx") selected_image_location = timing_list[image_idx - 1].primary_image_location elif source_of_starting_image == "Uploaded image": uploaded_image = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"]) @@ -35,30 +35,30 @@ def add_key_frame_section(shot_uuid, individual_view=True): selected_image_location = selected_image_location or file_location else: selected_image_location = "" - image_idx = st.session_state['current_frame_index'] + image_idx = len_shot_timing_list if individual_view: radio_text = "Inherit styling settings from the " + ("current frame?" if source_of_starting_image == "Uploaded image" else "selected frame") inherit_styling_settings = st_memory.radio(radio_text, ["Yes", "No"], key="inherit_styling_settings", horizontal=True) - apply_zoom_effects = st_memory.radio("Apply zoom effects to inputted image?", ["No","Yes"], key="apply_zoom_effects", horizontal=True) + # apply_zoom_effects = st_memory.radio("Apply zoom effects to inputted image?", ["No","Yes"], key="apply_zoom_effects", horizontal=True) - if apply_zoom_effects == "Yes": - zoom_inputs(position='new', horizontal=True) + #if apply_zoom_effects == "Yes": + # zoom_inputs(position='new', horizontal=True) else: inherit_styling_settings = "Yes" apply_zoom_effects = "No" - return selected_image_location, inherit_styling_settings, apply_zoom_effects + return selected_image_location, inherit_styling_settings def display_selected_key_frame(selected_image_location, apply_zoom_effects): selected_image = None if selected_image_location: - if apply_zoom_effects == "Yes": - image_preview = generate_pil_image(selected_image_location) - selected_image = apply_image_transformations(image_preview, st.session_state['zoom_level_input'], st.session_state['rotation_angle_input'], st.session_state['x_shift'], st.session_state['y_shift']) - else: - selected_image = generate_pil_image(selected_image_location) + # if apply_zoom_effects == "Yes": + # image_preview = generate_pil_image(selected_image_location) + # selected_image = apply_image_transformations(image_preview, st.session_state['zoom_level_input'], st.session_state['rotation_angle_input'], st.session_state['x_shift'], st.session_state['y_shift'], st.session_state['flip_vertically'], st.session_state['flip_horizontally']) + + selected_image = generate_pil_image(selected_image_location) st.info("Starting Image:") st.image(selected_image) else: @@ -69,9 +69,9 @@ def display_selected_key_frame(selected_image_location, apply_zoom_effects): def add_key_frame_element(shot_uuid): add1, add2 = st.columns(2) with add1: - selected_image_location, inherit_styling_settings, apply_zoom_effects = add_key_frame_section(shot_uuid) + selected_image_location, inherit_styling_settings = add_key_frame_section(shot_uuid) with add2: - selected_image = display_selected_key_frame(selected_image_location, apply_zoom_effects) + selected_image = display_selected_key_frame(selected_image_location, False) return selected_image, inherit_styling_settings @@ -84,7 +84,8 @@ def add_key_frame(selected_image: Union[Image.Image, InternalFileObject], inheri timing_list = data_repo.get_timing_list_from_shot(shot_uuid) # creating frame inside the shot at target_frame_position - target_frame_position = st.session_state['current_frame_index'] if target_frame_position is None else target_frame_position + len_shot_timing_list = len(timing_list) if len(timing_list) > 0 else 0 + target_frame_position = len_shot_timing_list if target_frame_position is None else target_frame_position target_aux_frame_index = min(len(timing_list), target_frame_position) _ = create_frame_inside_shot(shot_uuid, target_aux_frame_index) @@ -94,7 +95,7 @@ def add_key_frame(selected_image: Union[Image.Image, InternalFileObject], inheri save_uploaded_image(selected_image, shot_uuid, timing_list[target_aux_frame_index].uuid, WorkflowStageType.STYLED.value) if inherit_styling_settings == "Yes" and st.session_state['current_frame_index']: - clone_styling_settings(st.session_state['current_frame_index'] - 1, timing_list[target_aux_frame_index].uuid) + clone_styling_settings(st.session_state['current_frame_index'] - 1, timing_list[target_aux_frame_index-1].uuid) if len(timing_list) == 1: st.session_state['current_frame_index'] = 1 diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index 44284b62..7b88e7c2 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -11,102 +11,108 @@ def animation_style_element(shot_uuid): motion_modules = AnimateDiffCheckpoint.get_name_list() variant_count = 1 - current_animation_style = AnimationStyleType.INTERPOLATION.value # setting a default value + # current_animation_style = AnimationStyleType.INTERPOLATION.value # setting a default value data_repo = DataRepo() - if current_animation_style == AnimationStyleType.INTERPOLATION.value: + # if current_animation_style == AnimationStyleType.INTERPOLATION.value: - animation_type = st.radio("Animation Interpolation:", options=['Creative Interpolation', "Video To Video"], key="animation_tool", horizontal=True, disabled=True) + animation_type = st.radio("Animation Interpolation:", options=['Creative Interpolation', "Video To Video"], key="animation_tool", horizontal=True, disabled=True) + + if animation_type == "Creative Interpolation": + st.markdown("***") + + shot: InternalShotObject = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) + timing_list: List[InternalFrameTimingObject] = shot.timing_list + st.markdown("#### Keyframe Settings") + if timing_list and len(timing_list): + columns = st.columns(len(timing_list)) + disable_generate = False + help = "" + for idx, timing in enumerate(timing_list): + if timing.primary_image and timing.primary_image.location: + columns[idx].image(timing.primary_image.location, use_column_width=True) + b = timing.primary_image.inference_params + prompt = columns[idx].text_area(f"Prompt {idx+1}", value=(b['prompt'] if b else ""), key=f"prompt_{idx+1}") + else: + columns[idx].warning("No primary image present") + disable_generate = True + help = "You can't generate a video because one of your keyframes is missing an image." + else: + st.warning("No keyframes present") + st.markdown("***") + video_resolution = None + + settings = { + "animation_tool": animation_type + } + + st.markdown("#### Overall Settings") + c1, c2 = st.columns([1,1]) + with c1: + motion_module = st.selectbox("Which motion module would you like to use?", options=motion_modules, key="motion_module") + with c2: + sd_model_list = [ + "Realistic_Vision_V5.0.safetensors", + "Counterfeit-V3.0_fp32.safetensors", + "epic_realism.safetensors", + "dreamshaper_v8.safetensors", + "deliberate_v3.safetensors" + ] + sd_model = st.selectbox("Which Stable Diffusion model would you like to use?", options=sd_model_list, key="sd_model") + vae_list = [ + "Baked", + "Standard"] + vae = st.selectbox("Which VAE would you like to use?", options=vae_list, key="vae_model") + + d1, d2 = st.columns([1, 1]) + + with d1: + ip_adapter_strength = st.slider("IP Adapter Strength", min_value=0.0, max_value=1.0, value=0.5, step=0.1, key="ip_adapter_strength") - if animation_type == "Creative Interpolation": + with d2: + ip_adapter_noise = st.slider("IP Adapter Noise", min_value=0.0, max_value=1.0, value=0.5, step=0.1, key="ip_adapter_noise") + + interpolation_style = st.selectbox("Interpolation Style", options=["Big Dipper", "Linear", "Slerp", "Custom"], key="interpolation_style") + if interpolation_style == "Big Dipper": + interpolation_settings = "0=1.0,1=0.99,2=0.97,3=0.95,4=0.92,5=0.9,6=0.86,7=0.83,8=0.79,9=0.75,10=0.71,11=0.67,12=0.62,13=0.58,14=0.53,15=0.49,16=0.44,17=0.39,18=0.35,19=0.31,20=0.26,21=0.22,22=0.19,23=0.15,24=0.12,25=0.09,26=0.06,27=0.04,28=0.02,29=0.01,30=0.0,31=0.0,32=0.0,33=0.0,34=0.0,35=0.0,36=0.0,37=0.0,38=0.0,39=0.0,40=0.0,41=0.0,42=0.0,43=0.0,44=0.0,45=0.0,46=0.0,47=0.0,48=0.0,49=0.0,50=0.0,51=0.0,52=0.0,53=0.0,54=0.0,55=0.0,56=0.0,57=0.0,58=0.0,59=0.0,60=0.0,61=0.0,62=0.0,63=0.0" + elif interpolation_style == "Linear": + interpolation_settings = "0=1.0,1=0.99,2=0.97,3=0.95,4=0.92,5=0.9,6=0.86,7=0.83,8=0.79,9=0.75,10=0.71,11=0.67,12=0.62,13=0.58,14=0.53,15=0.49,16=0.44,17=0.39,18=0.35,19=0.31,20=0.26,21=0.22,22=0.19,23=0.15,24=0.12,25=0.09,26=0.06,27=0.04,28=0.02,29=0.01,30=0.0,31=0.0,32=0.0,33=0.0,34=0.0,35=0.0,36=0.0,37=0.0,38=0.0,39=0.0,40=0.0,41=0.0,42=0.0,43=0.0,44=0.0,45=0.0,46=0.0,47=0.0,48=0.0,49=0.0,50=0.0,51=0.0,52=0.0,53=0.0,54=0.0,55=0.0,56=0.0,57=0.0,58=0.0,59=0.0,60=0.0,61=0.0,62=0.0,63=0.0" + elif interpolation_style == "Slerp": + interpolation_settings = "0=1.0,1=0.99,2=0.97,3=0.95,4=0.92,5=0.9,6=0.86,7=0.83,8=0.79,9=0.75,10=0.71,11=0.67,12=0.62,13=0.58,14=0.53,15=0.49,16=0.44,17=0.39,18=0.35,19=0.31,20=0.26,21=0.22,22=0.19,23=0.15,24=0.12,25=0.09,26=0.06,27=0.04,28=0.02,29=0.01,30=0.0,31=0.0,32=0.0,33=0.0,34=0.0,35=0.0,36=0.0,37=0.0,38=0.0,39=0.0,40=0.0,41=0.0,42=0.0,43=0.0,44=0.0,45=0.0,46=0.0,47=0.0,48=0.0,49=0.0,50=0.0,51=0.0,52=0.0,53=0.0,54=0.0,55=0.0,56=0.0,57=0.0,58=0.0,59=0.0,60=0.0,61=0.0,62=0.0,63=0.0" + if interpolation_style == "Custom": + interpolation_settings = st.text_area("Custom Interpolation Style", value="0=1.0,1=0.99,2=0.97,3=0.95,4=0.92,5=0.9,6=0.86,7=0.83,8=0.79,9=0.75,10=0.71,11=0.67,12=0.62,13=0.58,14=0.53,15=0.49,16=0.44,17=0.39,18=0.35,19=0.31,20=0.26,21=0.22,22=0.19,23=0.15,24=0.12,25=0.09,26=0.06,27=0.04,28=0.02,29=0.01,30=0.0,31=0.0,32=0.0,33=0.0,34=0.0,35=0.0,36=0.0,37=0.0,38=0.0,39=0.0,40=0.0,41=0.0,42=0.0,43=0.0,44=0.0,45=0.0,46=0.0,47=0.0,48=0.0,49=0.0,50=0.0,51=0.0,52=0.0,53=0.0,54=0.0,55=0.0,56=0.0,57=0.0,58=0.0,59=0.0,60=0.0,61=0.0,62=0.0,63=0.0", key="custom_interpolation_style") - st.markdown("***") - - shot: InternalShotObject = data_repo.get_shot_from_uuid(st.session_state["shot_uuid"]) - timing_list: List[InternalFrameTimingObject] = shot.timing_list - st.markdown("#### Keyframe Settings") - if timing_list and len(timing_list): - columns = st.columns(len(timing_list)) # Create columns equal to the number of images - for idx, timing in enumerate(timing_list): - if timing.primary_image and timing.primary_image.location: - columns[idx].image(timing.primary_image.location, use_column_width=True) - b = timing.primary_image.inference_params - prompt = columns[idx].text_area(f"Prompt {idx+1}", value=(b['prompt'] if b else ""), key=f"prompt_{idx+1}") - else: - columns[idx].warning("No primary image present") - else: - st.warning("No keyframes present") - - st.markdown("***") - video_resolution = None - - settings = { - "animation_tool": animation_type - } - - st.markdown("#### Overall Settings") - - c1, c2 = st.columns([1,1]) - with c1: - motion_module = st.selectbox("Which motion module would you like to use?", options=motion_modules, key="motion_module") - with c2: - sd_model_list = [ - "Realistic_Vision_V5.0.safetensors", - "Counterfeit-V3.0_fp32.safetensors", - "epic_realism.safetensors", - "dreamshaper_v8.safetensors", - "deliberate_v3.safetensors" - ] - sd_model = st.selectbox("Which Stable Diffusion model would you like to use?", options=sd_model_list, key="sd_model") - - d1, d2 = st.columns([1, 1]) - - with d1: - ip_adapter_strength = st.slider("IP Adapter Strength", min_value=0.0, max_value=1.0, value=0.5, step=0.1, key="ip_adapter_strength") - - with d2: - ip_adapter_noise = st.slider("IP Adapter Noise", min_value=0.0, max_value=1.0, value=0.5, step=0.1, key="ip_adapter_noise") - - interpolation_style = st.selectbox("Interpolation Style", options=["Big Dipper", "Linear", "Slerp", "Custom"], key="interpolation_style") - if interpolation_style == "Big Dipper": - interpolation_settings = "0=1.0,1=0.99,2=0.97,3=0.95,4=0.92,5=0.9,6=0.86,7=0.83,8=0.79,9=0.75,10=0.71,11=0.67,12=0.62,13=0.58,14=0.53,15=0.49,16=0.44,17=0.39,18=0.35,19=0.31,20=0.26,21=0.22,22=0.19,23=0.15,24=0.12,25=0.09,26=0.06,27=0.04,28=0.02,29=0.01,30=0.0,31=0.0,32=0.0,33=0.0,34=0.0,35=0.0,36=0.0,37=0.0,38=0.0,39=0.0,40=0.0,41=0.0,42=0.0,43=0.0,44=0.0,45=0.0,46=0.0,47=0.0,48=0.0,49=0.0,50=0.0,51=0.0,52=0.0,53=0.0,54=0.0,55=0.0,56=0.0,57=0.0,58=0.0,59=0.0,60=0.0,61=0.0,62=0.0,63=0.0" - elif interpolation_style == "Linear": - interpolation_settings = "0=1.0,1=0.99,2=0.97,3=0.95,4=0.92,5=0.9,6=0.86,7=0.83,8=0.79,9=0.75,10=0.71,11=0.67,12=0.62,13=0.58,14=0.53,15=0.49,16=0.44,17=0.39,18=0.35,19=0.31,20=0.26,21=0.22,22=0.19,23=0.15,24=0.12,25=0.09,26=0.06,27=0.04,28=0.02,29=0.01,30=0.0,31=0.0,32=0.0,33=0.0,34=0.0,35=0.0,36=0.0,37=0.0,38=0.0,39=0.0,40=0.0,41=0.0,42=0.0,43=0.0,44=0.0,45=0.0,46=0.0,47=0.0,48=0.0,49=0.0,50=0.0,51=0.0,52=0.0,53=0.0,54=0.0,55=0.0,56=0.0,57=0.0,58=0.0,59=0.0,60=0.0,61=0.0,62=0.0,63=0.0" - elif interpolation_style == "Slerp": - interpolation_settings = "0=1.0,1=0.99,2=0.97,3=0.95,4=0.92,5=0.9,6=0.86,7=0.83,8=0.79,9=0.75,10=0.71,11=0.67,12=0.62,13=0.58,14=0.53,15=0.49,16=0.44,17=0.39,18=0.35,19=0.31,20=0.26,21=0.22,22=0.19,23=0.15,24=0.12,25=0.09,26=0.06,27=0.04,28=0.02,29=0.01,30=0.0,31=0.0,32=0.0,33=0.0,34=0.0,35=0.0,36=0.0,37=0.0,38=0.0,39=0.0,40=0.0,41=0.0,42=0.0,43=0.0,44=0.0,45=0.0,46=0.0,47=0.0,48=0.0,49=0.0,50=0.0,51=0.0,52=0.0,53=0.0,54=0.0,55=0.0,56=0.0,57=0.0,58=0.0,59=0.0,60=0.0,61=0.0,62=0.0,63=0.0" - if interpolation_style == "Custom": - interpolation_settings = st.text_area("Custom Interpolation Style", value="0=1.0,1=0.99,2=0.97,3=0.95,4=0.92,5=0.9,6=0.86,7=0.83,8=0.79,9=0.75,10=0.71,11=0.67,12=0.62,13=0.58,14=0.53,15=0.49,16=0.44,17=0.39,18=0.35,19=0.31,20=0.26,21=0.22,22=0.19,23=0.15,24=0.12,25=0.09,26=0.06,27=0.04,28=0.02,29=0.01,30=0.0,31=0.0,32=0.0,33=0.0,34=0.0,35=0.0,36=0.0,37=0.0,38=0.0,39=0.0,40=0.0,41=0.0,42=0.0,43=0.0,44=0.0,45=0.0,46=0.0,47=0.0,48=0.0,49=0.0,50=0.0,51=0.0,52=0.0,53=0.0,54=0.0,55=0.0,56=0.0,57=0.0,58=0.0,59=0.0,60=0.0,61=0.0,62=0.0,63=0.0", key="custom_interpolation_style") - - st.markdown("***") - st.markdown("#### Generation Settings") - animate_col_1, _, _ = st.columns([1, 1, 2]) - - with animate_col_1: - # img_dimension_list = ["512x512", "512x768", "768x512"] - # img_dimension = st.selectbox("Image Dimension:", options=img_dimension_list, key="img_dimension") - img_dimension = "512x512" - variant_count = st.number_input("How many variants?", min_value=1, max_value=100, value=1, step=1, key="variant_count") - - normalise_speed = st.checkbox("Normalise Speed", value=True, key="normalise_speed") - - settings.update( - # positive_prompt=positive_prompt, - # negative_prompt=negative_prompt, - image_dimension=img_dimension, - sampling_steps=30, - motion_module=motion_module, - model=sd_model, - normalise_speed=normalise_speed - ) + st.markdown("***") + st.markdown("#### Generation Settings") + animate_col_1, _, _ = st.columns([1, 1, 2]) + + with animate_col_1: + # img_dimension_list = ["512x512", "512x768", "768x512"] + # img_dimension = st.selectbox("Image Dimension:", options=img_dimension_list, key="img_dimension") + project_settings = data_repo.get_project_setting(shot.project.uuid) + width = project_settings.width + height = project_settings.height + img_dimension = f"{width}x{height}" + variant_count = st.number_input("How many variants?", min_value=1, max_value=100, value=1, step=1, key="variant_count") + normalise_speed = True + # normalise_speed = st.checkbox("Normalise Speed", value=True, key="normalise_speed") + + settings.update( + # positive_prompt=positive_prompt, + # negative_prompt=negative_prompt, + image_dimension=img_dimension, + sampling_steps=30, + motion_module=motion_module, + model=sd_model, + normalise_speed=normalise_speed + ) + + elif animation_type == "Image To Video": - elif animation_tool == AnimationToolType.G_FILM.value: - video_resolution = st.selectbox("Video Resolution:", options=["Full Resolution", "Preview"], key="video_resolution") - - elif current_animation_style == AnimationStyleType.IMAGE_TO_VIDEO.value: st.info("For image to video, you can select one or more prompts, and how many frames you want to generate for each prompt - it'll attempt to travel from one prompt to the next.") which_motion_module = st.selectbox("Which motion module would you like to use?", options=motion_modules, key="which_motion_module") @@ -149,7 +155,7 @@ def animation_style_element(shot_uuid): with animate_col_1: variant_count = st.number_input("How many variants?", min_value=1, max_value=100, value=1, step=1, key="variant_count") - if st.button("Generate Animation Clip", key="generate_animation_clip"): + if st.button("Generate Animation Clip", key="generate_animation_clip", disabled=disable_generate, help=help): vid_quality = "full" if video_resolution == "Full Resolution" else "preview" st.write("Generating animation clip...") settings.update(animation_style=current_animation_style) diff --git a/ui_components/widgets/cropping_element.py b/ui_components/widgets/cropping_element.py index 557468b9..f5cf1f10 100644 --- a/ui_components/widgets/cropping_element.py +++ b/ui_components/widgets/cropping_element.py @@ -21,9 +21,11 @@ def cropping_selector_element(shot_uuid): selector1, selector2, _ = st.columns([1, 1, 1]) with selector1: - crop_stage = st_memory.radio("Which stage to work on?", ["Styled Key Frame", "Unedited Key Frame"], key="crop_stage", horizontal=True) - with selector2: + # crop_stage = st_memory.radio("Which stage to work on?", ["Styled Key Frame", "Unedited Key Frame"], key="crop_stage", horizontal=True) + crop_stage = "Styled Key Frame" how_to_crop = st_memory.radio("How to crop:", options=["Precision Cropping","Manual Cropping"], key="how_to_crop",horizontal=True) + + if crop_stage == "Styled Key Frame": stage_name = WorkflowStageType.STYLED.value @@ -65,7 +67,7 @@ def precision_cropping_element(stage, shot_uuid): st.caption("Output Image:") output_image = apply_image_transformations( - input_image, st.session_state['zoom_level_input'], st.session_state['rotation_angle_input'], st.session_state['x_shift'], st.session_state['y_shift']) + input_image, st.session_state['zoom_level_input'], st.session_state['rotation_angle_input'], st.session_state['x_shift'], st.session_state['y_shift'], st.session_state['flip_vertically'], st.session_state['flip_horizontally']) st.image(output_image, use_column_width=True) if st.button("Save Image"): @@ -157,32 +159,12 @@ def get_working_image(): cropbtn1, cropbtn2 = st.columns(2) with cropbtn1: - if st.button("Save Cropped Image"): - if stage == WorkflowStageType.SOURCE.value: - # resize the image to the original width and height - cropped_img = cropped_img.resize( - (width, height), Image.ANTIALIAS) - # generate a random filename and save it to /temp - file_path = f"videos/temp/{uuid.uuid4()}.png" - hosted_url = save_or_host_file(cropped_img, file_path) - - file_data = { - "name": str(uuid.uuid4()), - "type": InternalFileType.IMAGE.value, - "project_id": project_uuid - } - - if hosted_url: - file_data.update({'hosted_url': hosted_url}) - else: - file_data.update({'local_path': file_path}) - cropped_image: InternalFileObject = data_repo.create_file(**file_data) - - st.success("Cropped Image Saved Successfully") - data_repo.update_specific_timing( - st.session_state['current_frame_uuid'], source_image_id=cropped_image.uuid) - time.sleep(1) + if st.button("Save Cropped Image"): + save_zoomed_image(cropped_img, st.session_state['current_frame_uuid'], stage, promote=True) + st.success("Image saved successfully!") + time.sleep(0.5) st.rerun() + with cropbtn2: st.warning("Warning: This will overwrite the original image") diff --git a/ui_components/widgets/explorer_element.py b/ui_components/widgets/explorer_element.py index 364d2b3a..3705162a 100644 --- a/ui_components/widgets/explorer_element.py +++ b/ui_components/widgets/explorer_element.py @@ -214,8 +214,8 @@ def gallery_image_view(project_uuid,page_number=1,num_items_per_page=20, open_de # removing this from the gallery view data_repo.update_file(gallery_image_list[i + j].uuid, tag="") else: - st.error('Max frame limit reached') - time.sleep(0.3) + st.error('We currently only allow 5 frames per shot.') + time.sleep(0.4) st.rerun() else: diff --git a/ui_components/widgets/image_zoom_widgets.py b/ui_components/widgets/image_zoom_widgets.py index 392570d2..e97d0fbd 100644 --- a/ui_components/widgets/image_zoom_widgets.py +++ b/ui_components/widgets/image_zoom_widgets.py @@ -10,27 +10,29 @@ def zoom_inputs(position='in-frame', horizontal=False): if horizontal: - col1, col2, col3, col4 = st.columns(4) + col1, col2, col3, col4, col5, col6 = st.columns(6) else: - col1 = col2 = col3 = col4 = st + col1 = col2 = col3 = col4 = col5 = col6 = st - zoom_level_input = col1.number_input( - "Zoom Level (%)", min_value=10, max_value=1000, step=10, key=f"zoom_level_input_key_{position}", value=st.session_state.get('zoom_level_input', 100)) + col1.number_input( + "Zoom In/Out", min_value=10, max_value=1000, step=10, key=f"zoom_level_input_{position}", value=100) - rotation_angle_input = col2.number_input( - "Rotation Angle", min_value=-360, max_value=360, step=5, key=f"rotation_angle_input_key_{position}", value=st.session_state.get('rotation_angle_input', 0)) + col2.number_input( + "Rotate Clockwise/Counterclockwise", min_value=-360, max_value=360, step=5, key=f"rotation_angle_input_{position}", value=0) - x_shift = col3.number_input( - "Shift Left/Right", min_value=-1000, max_value=1000, step=5, key=f"x_shift_key_{position}", value=st.session_state.get('x_shift', 0)) + col3.number_input( + "Shift Left/Right", min_value=-1000, max_value=1000, step=5, key=f"x_shift_{position}", value=0) + + col4.number_input( + "Shift Down/Up", min_value=-1000, max_value=1000, step=5, key=f"y_shift_{position}", value=0) + + col5.checkbox( + "Flip Vertically", key=f"flip_vertically_{position}", value=False) + + col6.checkbox( + "Flip Horizontally", key=f"flip_horizontally_{position}", value=False) + - y_shift = col4.number_input( - "Shift Up/Down", min_value=-1000, max_value=1000, step=5, key=f"y_shift_key_{position}", value=st.session_state.get('y_shift', 0)) - - # Assign values to st.session_state - st.session_state['zoom_level_input'] = zoom_level_input - st.session_state['rotation_angle_input'] = rotation_angle_input - st.session_state['x_shift'] = x_shift - st.session_state['y_shift'] = y_shift def save_zoomed_image(image, timing_uuid, stage, promote=False): data_repo = DataRepo() diff --git a/ui_components/widgets/shot_view.py b/ui_components/widgets/shot_view.py index 6a1195fa..aacdeb77 100644 --- a/ui_components/widgets/shot_view.py +++ b/ui_components/widgets/shot_view.py @@ -26,8 +26,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): if st.session_state["open_shot"] == shot.uuid: with header_col_0: - update_shot_name(shot.uuid) - duplicate_shot_button(shot.uuid) + update_shot_name(shot.uuid) if not st.toggle("Expand", key=f"close_shot_{shot.uuid}", value=True): st.session_state["open_shot"] = None st.rerun() @@ -71,7 +70,7 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): st.info("**Add new frame to shot**") selected_image, inherit_styling_settings, _ = add_key_frame_section(shot_uuid, False) if st.button(f"Add key frame",type="primary",use_container_width=True): - add_key_frame(selected_image, inherit_styling_settings, shot_uuid) + add_key_frame(selected_image, "No", shot_uuid) st.rerun() else: timing = timing_list[idx] @@ -81,35 +80,46 @@ def shot_keyframe_element(shot_uuid, items_per_row, **kwargs): st.warning("No primary image present") if st.session_state["open_shot"] == shot.uuid: timeline_view_buttons(idx, shot_uuid, replace_image_widget_toggle, copy_frame_toggle, move_frames_toggle,delete_frames_toggle, change_shot_toggle) - st.markdown("***") - st.markdown("***") + if (i < len(timing_list) - 1) or (st.session_state["open_shot"] == shot.uuid) or (len(timing_list) % items_per_row != 0 and st.session_state["open_shot"] != shot.uuid): + st.markdown("***") + # st.markdown("***") if st.session_state["open_shot"] == shot.uuid: - bottom1, _, bottom3 = st.columns([1,2,1]) - with bottom1: + st.markdown("##### Admin stuff:") + bottom1, bottom2, bottom3, _ = st.columns([1,1,1,3]) + with bottom1: + st.error("Delete:") delete_shot_button(shot.uuid) + + with bottom2: + st.warning("Duplicate:") + duplicate_shot_button(shot.uuid) with bottom3: - if st.button("Move shot up", key=f'shot_up_movement_{shot.uuid}'): - if shot.shot_idx > 0: - data_repo.update_shot(shot_uuid, shot_idx=shot.shot_idx-1) - else: - st.error("This is the first shot") - time.sleep(0.3) - st.rerun() - if st.button("Move shot down", key=f'shot_down_movement_{shot.uuid}'): - shot_list = data_repo.get_shot_list(shot.project.uuid) - if shot.shot_idx < len(shot_list): - data_repo.update_shot(shot_uuid, shot_idx=shot.shot_idx+1) - else: - st.error("This is the last shot") - time.sleep(0.3) - st.rerun() + st.info("Move:") + move1, move2 = st.columns(2) + with move1: + if st.button("⬆️", key=f'shot_up_movement_{shot.uuid}', help="Move shot up", use_container_width=True): + if shot.shot_idx > 0: + data_repo.update_shot(shot_uuid, shot_idx=shot.shot_idx-1) + else: + st.error("This is the first shot") + time.sleep(0.3) + st.rerun() + with move2: + if st.button("⬇️", key=f'shot_down_movement_{shot.uuid}', help="Move shot down", use_container_width=True): + shot_list = data_repo.get_shot_list(shot.project.uuid) + if shot.shot_idx < len(shot_list): + data_repo.update_shot(shot_uuid, shot_idx=shot.shot_idx+1) + else: + st.error("This is the last shot") + time.sleep(0.3) + st.rerun() def duplicate_shot_button(shot_uuid): data_repo = DataRepo() shot = data_repo.get_shot_from_uuid(shot_uuid) - if st.button("Duplicate shot", key=f"duplicate_btn_{shot.uuid}"): + if st.button("Duplicate shot", key=f"duplicate_btn_{shot.uuid}", help="This will duplicate this shot.", use_container_width=True): data_repo.duplicate_shot(shot.uuid) st.success("Shot duplicated successfully") time.sleep(0.3) @@ -118,9 +128,9 @@ def duplicate_shot_button(shot_uuid): def delete_shot_button(shot_uuid): data_repo = DataRepo() shot = data_repo.get_shot_from_uuid(shot_uuid) - confirm_delete = st.checkbox("I know that this will delete all the frames and videos within") - help_text = "Check the box above to enable the delete button." if confirm_delete else "" - if st.button("Delete shot", disabled=(not confirm_delete), help=help_text, key=shot.uuid): + confirm_delete = st.checkbox("I know that this will delete all the frames and videos within") + help_text = "Check the box above to enable the delete button." if not confirm_delete else "This will this shot and all the frames and videos within." + if st.button("Delete shot", disabled=(not confirm_delete), help=help_text, key=shot.uuid, use_container_width=True): data_repo.delete_shot(shot.uuid) st.success("Shot deleted successfully") time.sleep(0.3) @@ -151,7 +161,7 @@ def shot_video_element(shot_uuid): shot: InternalShotObject = data_repo.get_shot_from_uuid(shot_uuid) - st.markdown(f"#### {shot.name}") + st.info(f"##### {shot.name}") if shot.main_clip and shot.main_clip.location: st.video(shot.main_clip.location) else: diff --git a/ui_components/widgets/styling_element.py b/ui_components/widgets/styling_element.py index e75918e8..71f964d8 100644 --- a/ui_components/widgets/styling_element.py +++ b/ui_components/widgets/styling_element.py @@ -53,7 +53,7 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): if stages.index(st.session_state["transformation_stage"]) != st.session_state[f'frame_styling_stage_index_{append_to_item_name}']: st.session_state[f'frame_styling_stage_index_{append_to_item_name}'] = stages.index(st.session_state["transformation_stage"]) - st.rerun() + # st.rerun() # -------------------- Model Selection -------------------- # diff --git a/utils/st_memory.py b/utils/st_memory.py index 96aa9bd3..f456c109 100644 --- a/utils/st_memory.py +++ b/utils/st_memory.py @@ -93,6 +93,20 @@ def toggle(label, value=True,key=None, help=None, on_change=None, disabled=False return selection +def checkbox(label, value=True,key=None, help=None, on_change=None, disabled=False, label_visibility="visible"): + + if key not in st.session_state: + st.session_state[key] = value + + selection = st.checkbox(label=label, value=st.session_state[key], help=help, on_change=on_change, disabled=disabled, label_visibility=label_visibility, key=f"{key}_value") + + if selection != st.session_state[key]: + st.session_state[key] = selection + st.rerun() + + return selection + + def menu(menu_title,options, icons=None, menu_icon=None, default_index=0, key=None, help=None, on_change=None, disabled=False, orientation="horizontal", default_value=0, styles=None): if key not in st.session_state: @@ -119,3 +133,4 @@ def text_area(label, value='', height=None, max_chars=None, key=None, help=None, return selection + From b1fe13e6bae1af6fd97abb41db925c432e465e2a Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Tue, 7 Nov 2023 18:12:28 +0530 Subject: [PATCH 164/164] hosting flag added --- django_settings.py | 32 ++++++++++++++++++++++++++------ requirements.txt | 3 ++- shared/constants.py | 3 ++- 3 files changed, 30 insertions(+), 8 deletions(-) diff --git a/django_settings.py b/django_settings.py index d2868357..eeb11749 100644 --- a/django_settings.py +++ b/django_settings.py @@ -7,7 +7,7 @@ from dotenv import load_dotenv -from shared.constants import LOCAL_DATABASE_NAME, SERVER, ServerType +from shared.constants import HOSTED_BACKGROUND_RUNNER_MODE, LOCAL_DATABASE_NAME, SERVER, ServerType load_dotenv() @@ -19,12 +19,32 @@ BASE_DIR = Path(__file__).resolve().parent.parent -DATABASES = { - 'default': { - 'ENGINE': 'django.db.backends.sqlite3', - 'NAME': DB_LOCATION, +if not HOSTED_BACKGROUND_RUNNER_MODE: + DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': DB_LOCATION, + } + } +else: + import boto3 + ssm = boto3.client("ssm", region_name="ap-south-1") + DB_NAME = ssm.get_parameter(Name='/backend/banodoco/db/name')['Parameter']['Value'] + DB_USER = ssm.get_parameter(Name='/backend/banodoco/db/user')['Parameter']['Value'] + DB_PASS = ssm.get_parameter(Name='/backend/banodoco/db/password')['Parameter']['Value'] + DB_HOST = ssm.get_parameter(Name='/backend/banodoco/db/host')['Parameter']['Value'] + DB_PORT = ssm.get_parameter(Name='/backend/banodoco/db/port')['Parameter']['Value'] + + DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.postgresql', + 'NAME': DB_NAME, + 'USER': DB_USER, + 'PASSWORD': DB_PASS, + 'HOST': DB_HOST, + 'PORT': DB_PORT, + } } -} INSTALLED_APPS = ('backend',) diff --git a/requirements.txt b/requirements.txt index ed8d0394..9bc66097 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,4 +34,5 @@ httpx-oauth==0.13.0 extra-streamlit-components==0.1.56 wrapt==1.15.0 pydantic==1.10.9 -streamlit-server-state==0.17.1 \ No newline at end of file +streamlit-server-state==0.17.1 +setproctitle==1.3.3 \ No newline at end of file diff --git a/shared/constants.py b/shared/constants.py index 9aaff064..d5f9b499 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -115,4 +115,5 @@ class SortOrder(ExtendedEnum): LOCAL_DATABASE_NAME = 'banodoco_local.db' ENCRYPTION_KEY = 'J2684nBgNUYa_K0a6oBr5H8MpSRW0EJ52Qmq7jExE-w=' -QUEUE_INFERENCE_QUERIES = True \ No newline at end of file +QUEUE_INFERENCE_QUERIES = True +HOSTED_BACKGROUND_RUNNER_MODE = False \ No newline at end of file

z)1ON$!5jpTK@x_uUlp$aKaYHTL`ioZzOIYwr}1~GOHWp$j#B|d znBCw^ki;X5F1X@)Wt>PD8%Ew97}kAZt;O+hgz0-|EpAv^X&k;kv2%gxTPf6fF4pey zP${=9c;O=`hOZoeDu7fx0XS&I7cKd8mk5VbD2mkgJ=n%f5o-icS5*9TNGjg`G1bHu z)-Z_LyK$lRr@25>RIGv2%Hpj?UU_I4Fh;DS-U32I==&bKt_?0&KW^m}?x28Jt*(y) zc^>?nJFWOp2-+X{3qWoGi2>*j>|{m7OF&kcF)q8@9W7O90kx%t)KYhpL0@k|4+{10 z)FBm{1P?xM@nLPTPXcn*q{H_C)>^#3zeACNF$TMx)CN(R1oAViig`w{!g96B{jWGD z7%}S^h5rVi0i8Qd_hw>1F3CXj$~blZ!O>?%TYrx+99|=2VfH4{L1GrpEC)+B@-5Q6 zk&tQ8#25?Hia`|PH>}JhYEk$~ z8XqN}CB7oI&Y69zWmImGu;>hdfZ6kr`N)-hH+`V5l(};$(L_&iczPO7A1eG5nzr}13jmEy_3X1tP z&x_|>&J91k3NMeZn0;+K#+$Wb3I7DQzP2MfsghhTrHNMV5ZrbfRBBTV{c%B-zquT zLk$WyaNorjU+fDujff z$VG!%te4TOQY!o1OCG7V@0w>pp`f4tqF^ND%dE#?={o`Ba5%yj1 zEHD#8Vc9}kB?(yknpbr3W6H!f#|L<_kv zgcw7P^c#>VKvvqbmR+Ghe<`nIJ@-C`%1Tc!L+?i)P$D=vK`NAGvK0DWU9c)%8*Z^! zD*ragVsi;^3!KKN)#`aRaPR|%YQn)K+g4D_> zg)1s5PQw?}n}&C9rVs~0MCiH>yL}5|dsuTs+iJ9310ukf`_~cLf)J7lc0Y-ZRRN@8 z99wZy32q$-@8>rmmXDyp*~-1$4colmLHzHN)7?jJ9BcV9Uvw87HKb`e(C~A%hQ*C! zQKM(WyHz5Z{72UOPj|zsxB?rTI$N60rKN~1<9Iyc_wP@9e0<>R>kGTxPSz&_ZP%b{ z6#xUv{X_2Mrb5UWx3i_^cn{PmmlN8QiM)eloij6}8MayZe`Fn_JJ_sM`$QSoEpI87*6 z0N;mi9FIpRrC@Cj<{rDQL)Uhq#L)CSvg!&LqlY$Fh+m&JRRN^pWsqt!@*fCn8U3+` zHFoors<)221Q}O66)a1kF>B538T-VQi-@39=`GZ{sus@Q|d;eYa29;f)43;f8PFLf1t9hx?ojo8Yy{WNYER@Gr+P~ zEwdCdDv+kAvsQ>*X)di8gT zk^(Eg-C%CWHAH7QkLv<@R zC;~-Gh+$VEA*oNmHg@dXyW)h-CC}5(O9C>hCR^_&IG&NqyI`%Cac7w`fqx!^WGK=f zp91G;sD&QoaUkTH38{zylnIsYw{Fw>aV|ql0J~?(92d4v_sj@NuXSe7*qC}=BF02$ zt;OHp3D2puFJ@3u$t3eV6gMb|(=t#lR?1j8CQQb2jQd8)z2dc*9q@V^P5AOdOBP$d2CasuOWz}fs}#qNAh$+XV#Fg zPc*~+{&V-MxJld zDrFY;UAniWG<~j5L(ThgitPI=?O4k&J>&3o1hc{)fBYdrkg-rop>e`htmSiRpuP>3xOhDluV8sy?G9)wPvtaxO@I&s@-U8a?z)^= zKkn1d)0=Qb#W=Wl-yX(k^>9BS=NWRe2A|*GI39bvy}di3#E8}1Wge64V-I9aS6-$* zV^t`Id?T7O)EjPr-khd;RRF1YDkQqy0n*fV<(#*c(VHH|nB2r#u{pee{PE+!)}{Ki z-pZK%`UrR4@#`@o2p*3WlqxD-0F%plWL=SXBOH!L{QbXwLs0|M8`z$qAgQyJQj%8o zmVKVVW$s?n;DOSk#-Vyd7cAOBG5U%}fw|PP4o$CfC>O1`KymgvpZI&M4WF?p_-hw^ zlHx($zu3Kq#O%pN{8lZ)^gT>(LhO3fEnRME-6{spVV22?=}V^wZ!o(Cx0SWctvirpD#VA7J8{xO(n>9^|Nhs%@c;bZ|2M>2NepV! zoajWJ1*;O`AB!bY`L!Zynw4to@2RT_Q)6I_4=Rn5+W#+`hD*W86kI}#mEudsqk-vd z_$~UGzT%uO?`ej*(K5HEObEHKXY+`~>zM6JKAq=B;%)4Ge?5a^8OB-|SNt!;vw*Yb z8bUIXR)<9>hwGCN1qScdk^LUxV5VHx@>(P!s8mOQTO$gW<11Pi*ZUOjJdK=LcVFd4 z_^R2SvA<)VQ$b)Ud_VoUx#iENvvz`pW2A{l0?om|Ou<5Pz-e>>$a6IDU^EY<8 z9onWr)3)d>qfy!wOQkR$XrvI}tw)x{OroaYtr$naWO|Lj13D6f4)5Y|Y9j+&As@@(-SifH-^1APj5t3XgwxiK zii%kfLgp#cn^0>*krF1~slanf8U~pc*FzLCV#XQC=#$2OSj$jV{-}6ilmf_ENY^a9 zMd*rkN(5$$pDLJ`#ub9VB^IRdg#Tl&pc&!eO0Ih{AqwEPMyov#?Ul-GBs|*sJaKXhnF0mu- znn7tOmW95z_~+v{x?ShY3gh<5wK1pzR(g11=%!U-;5NQls=VbLL-79t00960?7hvF zeVsSu=}zi8=jB(+@KJB-88ZUG=;~_nA7UYwF&rn;8)q z?kby+)&79D<0k$HMk|T7ZIc)#2=Y4P* zYRB1R-$qy8p-0o})e)lz+R=Sl(!+v5^0Cr?GT~pm2RN?-(*0e~rY3)^-nWlan6rDulri)vH3iSt_{_AtgoGtTatQ=*n7fV(nQN66p+w_?1KPkH z&asXsEU%|GY)goj>43ZF5s#VFt;Mzh@KOseb0@Q6wKqkzy#9M1!A3YDN9I?V4kw(4#m)&azxJ=Q^pett%Q@b!HQ&ckPbJkz@?0K2SN z&)@zv*N6cTJ@_CzEP(W4Rrd5Tzs8Bci+U6okOofsp!i@0A|DfDFIsH}g0wzX-OjOVyX!v`&4>0!TXo=j>b$|;`=;K zzX5^Gi~0-%>8J1Bgl?haybGZSr2*gwU;_$un9{r@erIex4$mn?9#5JoBQX&}bJ5|E zTxcpy*-K@r>sm&nOCmqAXwxry%sSSTX_j5=!Lv37vmQ7I?}8zYE+gKBMsd zn7hnVy*Lq5tse{C4_X%EfIP?ckY9K7OZU!wTu$)K(8D15?=M&|itau;)3S-lC5F(> zaMe|SELgB$4scFX0C|3fDGbWh3Pn+d0B?c7LroCzwd~{fh`Sa*76iO=hPowaW^^cK zx9JKX&2^Nn^G-76`JGSL7)}_z_(iZ^FbyHZeE4&Tg$l>l(%2+Fe`F>e>DqBA&n^+tWY0Oa%)-H&J~;P>9JxM1BOdRZFA0~n6K2OTz}o`xaD zW4{b6qH#9H7!WbMb&bEOXz-%<0>f=zVdtVvP=mwjTI+xS>VNLtl=>s@7kQiw5aTb} zzM0*Fh!~oa{lZLJH#5?L`#|IRMA&U>)K%TMPk><9z{2)^bI7faDAef~fCqy;;2AK{ z*IZ&|ErOF63(^Tf_X6u&$c*RVoCS0F*5NPsK)4ls5)l}qXI+e)8j2S^bhdSCIT15B zSsX;;p+?#mXf(A;MB38G961O|Xblm=jMv>OzW(+V^=_wbSmaO?8b!CODtKX#StXa& zeqxmc*Fv%|W4{;DRJ=O@WMN+37u0=9U<@F#icwDh#)+*Lp90*xbBXaD4D$cD^2lt^ zD-gHfZjoCfQ${-9SU(mzyIHcbFbRs1@yB0&#^*0T z;rs8;`0HQ4Vq4j8PA`r-?*;Ep09o+5dzgz_%J3*^BGLE#02r04Xd^_ zs;Y)_4!M*uCcG@T4Hm}qeIVBTAgCq6yG5W}u;3cF_Strdz$4w5y5#EHVDI6EkYRmQoGuk^AfUt-H&_@35Sz;2=v*t zS~#hHV+?#&!y~!0weQQ)j722mix-{Ksm#Mr8JT=Ei8=8S)^)$-vE(PWO3fH)B@t`E zDiR2J2x5(y*zs1?stGM;)EtyWVvmb3>gJ?=)gZtS!3$@TNGauJEEpICcpVF+!~YZ( zG5+2;huYSty=@MvSSdX+p8`NQq+=boDbIz=IKvV?zjtBWd*3=ghlO z6om=A0kxW?QAO1he(V};Ou^fC1$lvC1_-Fd=`)drhn&Wm9v+JXW;80>B4yoMpM{X6 z-p;cU3K*?Igq_WkdVL5VN%#Qz1m^tXA7a{$TR?-Yj$ScbWBhFx5nl#BWj2YJmxaN@ z!x~>cKjF(yU-0;_$;_`hA(3J79x$e1FaTwmo%8U%%~D-Xd(v&U^L}JIU41uEb1>~i zDuo*v$4qcJ^1$uirtK03;G$K9&xzd4fk>%re*7wekP<$cB9M1(l4%?!PD=2{4rLL+ z1M@jl_>Y+fQ&=C1DA;JAch=s4=X(4dWB-v6%t#=+>b~LDFYl(3D;_8*k3QOonIk5^!k)<49o;3LRpyLGVLJ_5U=M^ zVGN3*fH4J(u>esJ{1{E)&?)@sTX)>G1d#K9@Kz!-?J1_KCfsZ25(<*Z5(Bc}MrgDg zIT2%K7-O(nm3VmA;PG*dPfw3{e0n&H5CDqO4Ox*B-QjoDT0joON*4Hfgahwr1X>nYjvDTWsE4 zU9g~ymJ+1oJ+9yWEv48OEcgHrsE}bgwB)Us>_h~n zC}4^LV1f%4Q!f(IYEw{TSkzU8x{hhR1(4|Yv*4aVbjJe7M`9!+a}ZybkpBDgIp8qU zy{79fcG5Eu5DADFt960TpP%sg%O|W>WyleZPzNN8J(`e{X*60r2(dOuw02nQu&XLq z>)z16>*A_?r1t!E`w>&7#(E0JH$Sq5qW^KgVx>9{L_}k&4lJcy;K)Ba7j5y9)%XbR z7XA!v=l!FgIc+SX`FFY7N&EMKXGc;jcn?q>=pE^N zBTz68(1oG)ldI=xJ4gkP606NRoYhwQhuqqsnJjHx!&wJFKq4WoVcEC0gk+o;(B2cr zoFR7^4|WfX2xVSUU{ZXo_8C(^xc;8b0q|^uyP-A|dtO zqp~%2b=AwjvtSCELRS)lXpU~7^WOv^cfobgRcf3YB6(Jw=Ciiu^lfZJ+~@svfqiQ$ z2FW|;-mJx4u%N-EXjvz7m5<3UrtjV8%9M6pQYtkLws~>69Q#^N7SyB1)1W8}%5nwg zJ!)qGXx2-JCZ90ODul$?v@DR5mVJ9OM34D~A3Gg)DS#|^Ptc%Ijs0L0W%T_NG}&w# zVmHLH^b9lNQP(x&hfmDYat$*jqBNh1#?9sT({jO zqwo=hi>vl2b6tGP{xxjCOvznS!#_wfU$IHCY2JHCs3VkwrNshrQ?!*nr$39kb-`Nj zPC)85WelMx3m7&qCJT z4b>k20KskixEsNzO*0mp7t5@k6JhU!NzDb2OIk7a_9pJ3H?%=SV58}Et+jB@g_taL z*JQ&j0VI#()INMfsyYS3v9;&jkx?L~AP}k@?UXfGB<=Fx^IXWt)jgt+OPOJ$uZEPgxSWhIlNiC4z@rRIsH-)pr_@vxFwn;mBi8aIKXwGMS%CwibYb6Dd!l~8284a_bO()WtSfDv`P9ri`1 z9LLq1FJ<%_S96z8MjDa~5R$P$ow`~{iF+=D=NMBrOIU|_#!L)7hkhT5!;<9Up|uZ} z%Iu;WbYQVkLu{m#cbc^b8rp^9-n!t1s%Ku~K%iiPCB|r}~90;7H4a>N$}!RK}&UnYv#BS{0`qox?7}6Lup`*o$wg0QLM0k z4@#fMaZozA^_3@C;tN%G9}JLf(TcvClNRl~EJO&-3!D>_1&3pa5hM@9l*6OXuFbX% zTtV7yI1GoC7BukE&xaVdF8$Ru$DW_*yw48ZBy><@E=jwvG0;ioKvAF`o`arSPMAMp22*gchDT8pD9aMo*4S0MmJ+quCn~rYZVKRr za)~IpgeQ-O%?4n?^UDjY@9tA+y4+iVz@!4NZ{Bp{XZ9#Eg+<&p9F^5Ec?&CzB~}ZP zcP>*tok=MzI+i3zf&igr&E)0Rhi)4yBZjdo62`m0JsnF&V1Pj#{fPk1IaGCxs;=R@ zU;ITE+#LSGsc7d0fxtW09D>p?a<>cqZWxfO-#%$1r)wkA)puwK87ipT9Yjn{kqCVd zjBjDlTo91$(@TLwndh=BArM&WAg;Ne7d^A1;3NQSi~-5|=DfpdRbstaqhQk!n$E5D zpC9HRp!Y<&uGX>D$^J9>Sf}EdaOu4EA09MPoqGf#0+;+sPs`x6V8MdZ!K8Wo>)Hkv zilF(Lfu%8OXqO=L86dOPHLvA-y(QinSUeT0)e7PP=PaCeO-8$5I=0s0<@p7#FE4P; zCRd|~vNU*ndcecO3f||jo{Y0t9Tr?1OAz{kJH%n)XlontiU&pm3n}V?j+v3$^Ne?Z zWrcO&oDY8405YckcJ7X`4^0umxwu|_R8X{j#CV`9*3>u$ns}>u_xad$9)*xg3Ft(O zRj`0K=OHOmp7xW8uv(P>85m>Woe$}QqYJ6WW>ezv@d1VzwcWvs!$ODe3q_-C=pPKy z@uwqRG!qLhjF|7-g%rGlMVEkFgGpo{;*!s+a~8F&QPoyWw*xUTA64_2{FgJ$WBfcv z%Uve-}sd8emek?c<-uqS>To#YHSDK3JJ?;%XCtb=km4*0a*K4u4;eGfvFf$$= zA5hyG+iHiZt`fgl6b406f|+5hMO|BsA29+SiVA!0u->fkuvx)5Nqbu9!1>OqeMKJ^ zoCk|UvEc3z(`tIBOsscaUlOZzF6P)-Bl;cBT#I-Jlt&k1Ii%Y09{!^9kqWDY!iVCU z+6hyByI{eBKJwZlmq1aJ5CG1r0M`v4cg~`!E7Z1zbr!?~3WbTwRf#|R{4+j(`Gl(8 z0_X+JJZh^hSXwo9DD%a~L~AL5_j+2e;Ff68WM77+tyU|DcsN@_#HZA{ zAVyIZ01ox_4mxP%PGuHrg4ly2&8VIY-gLk}M&T zRgNCK&wcFql)6^#PlY1XX8Ojmc|~3^j(5QvaH$930rFTGEkkE*?ncQVs5irITj6zE zrMM4kEd(B=G5G28XKXebyuQ9-x2@n(0J_nC&tCgtewc$$)#D1NW5;TBvabv-#)`cy zXb|UeNP~CHwLqOzXPRBVSq4hz?^m|8)&k@Mqa4#_dU&ghDh1-=LoEx5`N(15YKy88 zI7^ymKM%B-A`dPk)UxN;O6rgLp+6)r|knl#DhrA69+8N z`VP)Rynsx+9VK6HKXMZd_3B)cEeH~gON^`1hk5sC_L@k%jsS76*1}qcL zn@U1``i#RlkKlSv*%k|9dE?n6)bIcvvCJ)L{g}XaXe?nqCvjIxZ|W8otaIVU^ybRE z^hM#NVU(k4tg%!`^I3A%3YDjYVXdIrIk;LhCZx+?WX#n)XhF{cYW{7{TEp}Y`p-)C zqWr_DM4k@3izJEKZ6EnauBQd+-bgxM^(N|$aG5?!2z2${gN+H|Z}={{Odb0DSnz9C zxfZVpd%XQbC}h9#!D4dENUr=$1wk@z_Ue5i@3}{0_FKZoEO4kVV**Ok#+@bbN+1mP zdoMNXDFvoHOWlpJ;ACkGN@GxOJ;Zsy#WRROYydLwLhwH9SvWXRcgu&(3P1nhGq$@5 zFE6j~z2p}8&L8XBew$n;tna{Y&Y_$*yl~R_JbJ`XI^iWKa8-nyTC;u%nri1FfRNq; zdj;?P9ll>R`GR*1Wgw<+7$Viv)Rje5RU!T7v_RAa3(kXOvE}gI!&=Sp9}jfkd^V!@ zKihL+($4|8!x2qkViz1nZY@@nWr0;$!i6kX&gxlBM4D}tG?Sk59@cB+c0f=V!sjnf zczSxkuG-@H`NzN+-=;8f;5CeUj^(>nza?pTyqBJ3XzKhe0?2|{Ag0zm#dkBAyalN1 zItxI5|18Ra1#^XnG{r#k&#~;x1q(hZ2%2iQEDJd2QCkP=JwzzEvjM8Wh!#sgU-u z$!wegFD_IxWiSt?)f17-KR7xRg}MW_hPAZ@q9(s%9f#%!Z(ENkyZ5auCAV?|%b&xc zT<4Np-p&sR_i0|2xz9Cm-)?>9PX-|3lQj}RQYa6){6#n6xb}Lk9%yagShpnq_lTY~ zLF&svcYUQEi0L;E3C9UsMjCqmB|)q9Oo+LPcUDiz2TfWka66j?9oy> zU7-IWfOoDHaK4W*il4_qX@iZm-Zp}Y@6w2oK7F`2u$iGPfZjs|hkTz%M+lh*x`j{= zt!-5bjC%>lz<<4KvJFMlf0hjksTu~uj8$pi#iFV#oNLnMi%1ecoDTwsK;t+`P?n5e z{`50mUw`2H_iwOa9An$K$9mw@Q~x#(Tp;k`oB5y1!kZ!*9N;Y%7pw)dz*`vs4KQ`6 ztqoQbQsE=kzQBC;?3}QV8C0fj$&k0;T#)q7c~kHIB z_W}Z8RhB?dH29DOF6HG}^lw`I+}1U`Q-T$>F_c;O_Z<|ABD9Z1JA7sA=acUkSum(bV~#&l zs?yvAt9w5HnH4p6C26#^GW45^;$sIjzhBqx_*~!J20jzoPrWEiiV+iuj?|LABf~13 zI0w;mmUvii@TWih5rE+NzTGOrI@`<4bcL~b3ui?)y=EJ4e~M>0 zWCk1VU+|WDO=Grhu&OCK#O=R#w~hmWj%FV95I>=FqnG53=853+FD3%9WG33sYBKAF zBP;yFt@L=ofX4>T39NH!l12Dl(rwQ(ma!I2XyO|FrRg5gXF2`3Z(RjO@xXwl9JPZK zh1!3&b=4CFPyM%7A+Li|(QzZ85rgu9dK2lGkT4=@#+L1hiG9tUcW*OHXzGlU+N|gC zMx%hTmyYi_aHA07y@xTJKA%2kBdQKMMM2RYFC}--VU4-ogMd(r1vx$3t2pW2<2*uL zr_tUuhwl#_}$o>iNN zp!A9t#T3}n?sFK&k3X)5qyWlTiy}W1`~H3A=$mzM78CbK6W0?90s-0+s5BG=)xY+z zTT7QE)UkybM& zsPMstLs*9Zq1cTnOLz}dbp`8ef2`C9K1o+{frGo>M0fNV@fdCaBHf(k{cFbi+FoPK z+##y5Owqo!C=?tT>&apCM$hd`Yd_-pEMN zD-tc%*3@U|3L#T?gTUl*VBi>lv=3s=e8qg^w;#6N`%u@XUwE+Bel8>JoyX3K z2?a|VzxQ}esBkF)p;&ujc;AoHMj4o+!_eA8lmtnom7M0g?H1C+(9iGWCLN44wN9g$11(LljzsHvymtz(v7(O5sPRFoOR&B zV6`f-TCY%5TfDryz}iaVILHCSC=BULdrdfFgs@qa_|uo4@OkqI|NJlijNgBMg%-Qx ztQX2hkn^caKJvW#Sf_4L=eAZ1#La$f;%2T7>Sg$fYp?k{#={akm;?-h7X$bgN}bKV%O@nJ2jbs^3}=UCujokd+$aMqzHC8Gvn!5u;4C1q=BS$C`L%E#X_TH_96k` z*bi#a4#j$n$EOW8t2JJppYi?M@2D!N)W8BC zhp2cjI*-9cCCns%{cFh>4(4sel(~9)T+Ps)>H7SipNl(n9prmF&1>}Ae?z;+_MzXW z=pU{zMN=4)_&pQDpTW%|S~!d;KumDnw;~F!C9HX`aUett?>y?-!6r-T<@ylEPf$N{ z-FvYx>Tt~xCJLVTAr>f}1y>eax-lQ8K;sr;d{N^^zc&ViGB>Sk54GAHj8{4{bmMkD zWW-F1{fmL*gHK%{;PM*xJeI2$KbOUT#(0rfUnicG@$<9yD~r9*kQohjCjezpV!hhH z*&3C#nmneq5Ezu}0*2K+OS}c(v052?`SJ;$zkI^~_ve4dw$j=l>&*sL?Eo(*3WLv| zpRn0%@bdD4x~@>ipp*j}$ahwm_ew9BIP6*dxq4g7K**b$CzUkoY@cPj37&6$6R`*& z3vLDiz=ly4r3Rsc0IyZdg+*gX!SIYEL=Xff=VS!1UaP1BK*0r^SN7UdH2*GGu;7|# zDPTk(Q!6iDq#ydX7g0;8ZxTIR%oS|Gf(0E23=>RIf{0MpHZb@X0Z?m*G0n~sVZZ~Mz1~*9~ zhzMW`LJ6=cOFVsgz^6}75HI-k*I%_dIv9Whk*4h}OM_3JAMv`~!TTV9%!NZgBP7JX zrc}c>hop)Lxy5ZN76D|z)e+6JM1;Z=For-O$%hwByQ>n2rafe6tq}cOZp=5)C31w-p*{tyN_<*X~ z!Hb8S#ZO7@e$Fq4PGsFZ3n2S)G+*oYz*J`Su)k#V?;W|*v<@ACp3hz9`)(%h$mk!7 zo{uSa>P__eL7@EML`_K6jG>tAii30?>z7b^ufA9WCVd$u6h#SQhPA-+i{=v-&#*RR z#uA?rX0^0a!7?JrLNa;uBiHwRO^4$d`p+uH3ry1z`yS)ez76&KdYRkI;E>Wc`nJY? zES)ntfH`A!v+%(IImVxX6mf( zv3)l_5B>d-#|Fi?oOsWuOo{6u#_NpTLlrR1J({+<_dO?0tGzRQH8+gQeKLBw-x}^f z24WA!AIsSJGF4#naORe*5iLIBOw3oGG0^_5Cz0DkdZpM@AaB$Xg2pPqB`T#o`EBG%bxIr|WqU zR+oKFoi^Tr*j^Lo6w4kEG;U^`wl{qM8$sm0y14*y0eX2OFdGxD6E3(G-i_`?p(O0*U@e%^E;O%HLhl+VDtZs&)8}&k& zde#__qAcJcc->YhmO(&ZHYmz66(}r9gU!Pl>&+Sf@cjI$8SFqwxLB119v(J$dV0dH zs!;8A!TNy{>FGI`^lM>?rD5J!1ds(c0TH1ni&lDA%^aF-s%R4K`p0ElgOH%IHLSI8 zE~M9#T&H#~#ALyO_k%;s&NUgK7=NPH4|VHk-+%sx^ny7-&S$Q?5^gSl1aAZ)XbebE z!V9q7RjMpxwI9WT#NIp9wt^`Lo6Q;o!pqAR+t(eOlhmYh9_!5-pFTZdvst0qRjBGZ z@BqhJpcdQ$7jwb-DA`5YW=-JDt{4L?ymc-w9?k+HtZ@TEeZgf9Zbg^{ZHvsc0%P0vYn$ntSNk{T z*w4K+k{^mhJ?oa>IvVOp@dzUwIqYA2O`MY1so9s!-))?-MpCa}9$`rBOj z5ZpU8{jR`T7px%XzwLs<(6tcPSGxv?E)t*IW-0l&A954A43tK37LzAnj&wP=i-|xy zL0B#LLrIJIpPsN%8&hVECbAh~O0=E<0}TnOp*=bXp` z;fK)NKl=>xw_ioKz`pQ(`<_pImel&I&(OSSO$4_`=RBNuV9yth`^)wd!nz`ch^F8b zfBWb1At7zWN4pTA41BNIk*<$$@0cwu#Hpmj1r&<;g>^+f6pUm|xI%L1+`9}?(aVW6 zoQN84feRtdL&|swjoBe;CM-VY~+ghui!=z*_ zg&{nwHTxBbVEg*2uGl`@F^?q2z#}1KF{#va91TC?C8n7jR?M9^JFR}b2OK{doyg0$ zVBOl}w_w&#+O63**6THlDF8^w<3K5!R;rpyeInw$M_sG+3hMT;EiQPB8LE80CAeC@AdT++wCrBJqk6SXF+O-reQzxu~b98 zEZ*bPDxISwfVA@^+}R;@Ata$71c6`ShC%Jx?SoG>K<-KaA-Ow#C!!|^cHhjDCd7WS zF%S{_^2;CaAOGY34km_mHU8ya{ssU1|NamBkduHD8tmo}M1D*{mVnYt{3*hIbAm>WEbogr}!RJUu<( zU;pxN*zPK=5dst3=(4Wt%M3X|0eN2Q$2l>!9x_EN-Okng5Q>9?IFfT>KOl*TC@x``oWNY}Bsv^TFS?zn|h`KG;%5Q+zMhsn1-G)sY#Rg_gH{ zTIRXVkAfA%U04Vjb9;&XIn+YjeFBINJZ%1pq4~K!4VrS-F8?hV{h=7=F&nfb*31RW zaA|Lgya|M@Tf6W)7pboC)ZN{^a^$EgFI za9t9v8;YQNW|(R>wTc==NxZWAH^tgf-917>`*L6Z8@eOWl?gg0ktbxspu)gzJzS{w zA;K_*AOtusFd(dpH6B(g7!lO<7S?Y8wg4;$fMK7qWRK7p8Ej%bw^)iHRfv6N;^1})h_~v2=EFr2`JKZ(8GqLCIjgd#Q5MH} zo4`wDI--Ke?7xtYoHo)$WW9s+4yd~f=+!J(6o4+zK^31 z_L6Tb54NWt_LsX5feXTFv&QCe11<<$aH@_l;`VKL>&rA9Zf1DR-J7~W2_zPh2VwIO;>tSKY9eI z(=XQVRZA|z$LJ`NdM)f@jw8d-cfB}=#;LboN85}-2WQL*w;78T5%K`#F;n5Mj>c@P z-a!5@&}6oYpGTL;Rk>;yBj3?VcIUXq9JKqGU!tp70BKPU2-J7qSvSzn`2HMgs=McO z-!&H9zy#)Wr>7@~{_sKg=mir{=Rh4g>_@b@Ck(Z_j;9!Ax)#CYjvs~LxmZE=WlJKh zHTCB||118VfBJ8*w#M(j{|@II{`PPF1OD`fKjGWgZ`f74Ot~d`z65%ZSK}t4gXJ!# zIBpS|SdZKS3rGw}0kSlo>9hrY?&TQE^Aop%s&$nn;Ri^vQ<;#Cm^bMp>xB zk&QvQTEojTybzohcmm3D1!mIpy9MK6{eTshu+;F0)n}jefI9oR1opzyRfyf1^|(Ob`cHE3mZ(#2~RC%lbPSf~cjwGZ&Qwvqto# z7ZG?D+@M4cElF`&SLg@A+V-(H*%d%BP4mDt!O` z71iz)pB~rn&cm1;1cJw>4IUpi*zUG?dD+6*jM0AY$5;@Iy!XNSK`6@t#;7vMFhgVu z=j+B4G(((K09lxqdE*?Jj*?a---*nVoS-%k;w;J>=AQB~EcLs^hR zqGf0a)Ln2+NDAbQZcr?wm5%%B$MfGk3qp@V$gH{F1+zfZe`@oZnpE#5r1NV=HF&*#gIf@1@I4@#hvsSw7PQbZf{JHd>Z7W!-anH=!>e)~_LWcO@{^dLf zgu<9qO*v_&??h?VB-6NmPzYMkqI1#pbAJ1li17IMh=2UYf5hj{pYihYg8%W~|2w{Y z`-b1Xennkd2wZX*2Er(kFc8nTw_E$fYp|1uz{3akHiH;|EaVS*`{mvW-g!7!?6zCj zx^B|scH%Z;+wt$%PL3f@hWvUq!TMnecoCGV5-!Ed5QG`xeCGW?uukx@t*}`!9zT78 zwH4^)1=b1}g3=UNuh;nY?JJ(2f5317!ye)Z9+VX$j)QOaX)0nuoESzi?D28UO&MY? z2$>73W)iDw3uB6qz$auD4Z?^QtNarg+432Cb=S_HlM z6yAB*+QQZr;;)e2bHRcICn9PkW86n@#pDCYPtM%~X{;dKw4V#+2`&0JkLTpQ7O^bL z5=Bwq>FEidKYdQ_ME4@V4k0QIC}`^>1r{ra2%a_5*FHgG3`~T=6aZ;9I$PJMs|t1P z;C;JI@d9fdUSGBlTVb|lwu#$0a`c!Iouf+DISc>! z9Qv4zK)+$F%S>*bVIYeDvf!+kMvQ&>m$(3jAS%v-=r{yU4Jbvu_o!-%+SV=2Sxf*j z!+kc2dq2n)Ea)Po+9iSyE|cDSFpx6LoC_2)Hg>J}A@;OYz>J zs%q4A4QDO96cA=GHR-Z#ZLzB~hQm5Q$itLRD9Tb58CK)DwsnQt5v;9Y2Ner3LsLH$ zJckU7WAcYuaGRJQ;MNw?EN&yl9;H|yuPx~@~)VP4pBO6%jj zjW-J*V|6X21P9-$Hpa2&jb&hLqj8l}=3{Dyrrt-I_kw|(hv#;7y=d#hG|?iC3-X_n=k-Is9>eq3^s69P}c&D z9T}e2e9YG&979C;`fdageRcuW(oxe71_E=0;9{Qzpi4U_@m_Is|0nQ>d5|eVHE3$J zoBo7-_rg4GX<{3rAgHSYd8E|JXs4g0(C?^B6OmTk9e>XL1c(Tna~f>w3-wVxX>UYL zr}i*D7LFM>O}oflll>^7QECV9JffCIJG6mEH`aKBBpF`6J0koC{F@8Kh`PY|B&T_S@*X04u%=))e1$SDPj#9Y)Eay77?@BK&7^0SV;>C}D6%+oUm)!p29(Eu)+T{-Fa@_) zN;d;p_V=3vkaGwQ_ac0ZVjk|Q0QLw<2j)4;<_EOQmHjrlx`chj%2E(|*0%bQhL{iH zQ5WB+(dg!q7g0Y^SBo;!BC{!Y*~YX^U5x2ZL4EB$9XbL(BBqQxza|k^0b(xxQ<&)i z@4~YnDGD;dqa}cJ`}#Ho;q-om0P%i0$fH}rF?-87t-34{pHnU@Tyo(1-BQ}$$MJN8 zBSGu4dD6IvP}5f$P5y6Mqw%cBWVBn3S>;uRbsk^8euK3(SnC;v4cHhcx9f2DkR4{; zPX_h>*1l5k$QifD^b!t9bN&zavl&Bl#o7l^&6fU(3C0wN1rtRCb!9b}oC+`mKC@JK z02ZPJ^aPKfChFe?lZdW!8HdAXD2)3A^6epo}{%KSiP>5sZ0ZiZBq^S zChNbu?+!7lhu(i2%w^;PbqV|W?JHhhU-dHr$S{hcIOqY;;CghoOZW5l{z`jX>vRPh zL`W_}r0<<`sOu*48G%u-Y5GiY&ZCQ)atBU=T7?OwFq#mcEFmB`=l7&R4qg-4D_WYG z_xBG;{{wK(dx0P#Sm>RN-)(YJow6Ir>Y z%Tt!+aKVBe?rGl~Le7AB@8PUVE`|#h919V__wV2F`udtS2^#}b6qw?+a!vG#6r$M= z*ChK|ZC%~&qMlK_S|Y7?TDKxv#W-{EiY|L1nm9tjbwSZcY=R41fwIt`bmv^Mu6BBX zM&Rgv_MVVgmP5SVy|_^qOV0eu;^_Ikl({bR_1sszJYTcJxvt0kC)?k}%^!pgLokMu z@S%ifUx$IV=BOb~&Ux(WI+2oA4UmB`Nk-J>gmd-hxHUFzQa%@&eBaG;ZkcU6&uwTv z=0cx({h)k&K--&RPKL!8V6}(_O1Wx6QSu`5#e>>NA(G=AMKp zDLC&P(pHZS-~bIiD`He%aLoyI8sdL*_QiJ2;m7aK`2G72JU%=@1X&!*&^o>>jZNn_ zHLg?1drI0?k0D{COLrlXEEM^5`QPo=cpsZHUo%{#yc6C-*rT?#IpRZ{CxuK~;(|L7 zLcCBYR_vV9I0r*041w8Xbhh8AT<~h#!NX^fR*w&5w&+%K_gM_5&aKABurVNVxX5Tl z_jD_6W)FWkge42s%y5abpha*7*C2JSZTtnB&-+Am8X?laFJl-*&>9&wN|X-$PN;9V z4vX7bhq|uOP31Yfz3!RBjAQOhZOoyrf8V$>Z$K-sS&%VK;ed|>)AF5_OmZJ=C;MBxM_pI2wGF1%3lCz! zf(3VnRJA!1LZbWUf(6H-+Ew`W{Tp6iUs2bU2LEzog0F+diy{6!&%-aB<)SXvG*-Yc zk6(bM{fz=jw2~wtM%uA3ahe2QD_kQ6!O_kA?if5 zkdd`;{t^j87n~Q%^I6cxZD!bK%rf#L&`aJOM6$oW? zmD96E>4f;UfBtz0_E;n16GjB=c02s`+gCh4KZn>N0TCw`BZ;c|79D8W?oa7_zqfe| z(}5=UpWp4zn*`!*SFJJEtph!TkOzqekx9=tZ^8!)M~odv^eF4D8h9A0BM;0;wtX0s z)h?1|t7Y!6?NObk^w8#V8rq-J2`gy)sw0-nG=cKeyD`!ff+*GylJ0~}njqg|Ee zpFD@aF=y#P0fZ(|v&&Pfu>oC63hv$~SEi*vup`^J2!?`r!{Ut3RJgtTm^E8P{HdQ5 zpE<^GH5L$KOq(>vpe1OXkM^RL^rr^LL4(%8u(ic@yM?n3hD}qzszu=1{n9bN|A5m7 zlAPFNpTh@pxPBWmh-tp{^Rs4}i~Nw_*Dzez6G(WiK<;A;s_u&cXI_a!bUh9>&U4Bh!!JsFvc7mgdPKzQ~#I{t0?RDoM=jO z&%i*W!?{s_L>mRG$Nshl^g<7rfA)R zNy~JwUs)So;Jnfh69WGY5e$Vy08y2Z-7GKGyzf3^{SXYV_9<9`&=*_`)U|#X0~4}Z z5HUta_?V=TIgu#pyU$Q^F->?uOBJkd!widN@q3SuopXTFr*N+UrM59-*yAg7!%}N0*v8w zopTNax9x%w9K561RZ@W@C1ATUI1o1q&+H<#4K-F|fum0OoWRaN-<^*4O_ z^c1pRNh+*6*0)PQJc7rK`uoeoq>#)s*xyt@6^#{R=v;{Vwp*Yu9M}+eFcXT>H11f2 zlQ!Nui`rWCYH&3~VibpJ4G8M(;G6}p0vit5GZ_#Au@1z7(iC`jSmURkzu?b*{R&fi zz)Nshj5Shby2&mC=Nzo9!7wmn2A6+~(xEsQ}~lqiY< zY*@7yCoH!{n@Ol^hpMVHXc`PRGimFGx=^Q%+ve zRa-RhBgLCY@JpFt0+<#wrVXK!oxO7*w~0B!Q0X)HR(k3q zn8$?9^$bFh(!VyTu|*o&hQ@#e%MxPZ+{5?K^t;eHBgBBH&|%o1FeQi#KHNsRIWnbk zbcq#c4nNPMZnedJMj@KR4x!0Zu&)gop(5uXGUsUHzT%fgm^*cbZQh5+)C&BR{Ml-5 zaFJNkApc=(>cAvZj1gfJMTymVh4s2bSsEDA#3@M3tJe`ch)j5h?f8^#o{sJ=)$n?7o-oshXRN&*zUmR7k3?@oC=2z{56<-TSAocr@Uqn}2Y< zpt*4x(tSPy3-1mvMQDw6P2R#(wVs@yVM3e=1xy6rjUnsYOWn%3^`3iiMonFtoNkE- zoOd_z%|C>+&SYrKFh7ITA81b_-F5LM z^zMB^6GX|~>h<*%zy07R~OlUul6Z-tK zB7k`h11}KQ0Fh?9BmiEt03pMmAcGY#3L%gW&pH&Fs&~84kMO@xU^5<%`-iDC#ED?L zs{v-%T2Q?>xLUwmfXHc_nD|yZ`_@{omPQsU=e^d=qAWJ0xtg=RJQ8yW92%7NTPiYu2 zY07T5!?$nWP!uLuLu@b8+T3xtt3zwB{oD`(jl)Arq#?LK7$TI$pfn6a0EggYEs)n? zbpvDH@C5rht-_E%1sxF&_62O^P}e{#S{PN9r=lyEHA4$DOLBTMCxku;h3tROfbb|J zWFrB^^~z@&$sUGtLgR}I)`FAK&5)71S{W09&{IxdX!hRjD$Qz>1f%;Q`q#u% zW-<5ky{7|oc9`RNG&6{D4r#4A-vBRkVZJlM-`{J!FiOdHtL0Iu|TN2+8g-|FzvD)wp#}7K|ae=1?qh z=^%dY@p$J&*S)t36VmV(hC%Qk(j+oqNl9w_`(W5%%H zc31?E1pqLRL%q$I9xfUfQtuW8in4$y3a~MtjE2M<CYf!-rBvmngv8f*>D?L75A)F$4lqb{7N}!x_=^SwV#W zogF4d3~oQQt@a1fVqORUF>4fN{5}&t^rCqkzzycR-sgh3kOYkfi41L@xsO6F;lhk= zx~($m-Go0!3;40E7^9^>N@oo*r6Pn(oCFi^wOTn46VVuys}igA8r~AD)!IA|3UpIn z+P%C&cO(47O6WccAt9!VL*Y>;9^O+5f@cnY6ZQsl=R(MP6+nP^u7>%{ALT7OwFip; zvf#~NHYiH9eiTK~albme(Ho*LtcmpAYx>=~R@3)EJ;#E3!@UU~3rtl4_wXFzyM*AqT1pygbo_6oI)(*3ECR@aHz5x!D~b|DVYKpj zAi&W)9MfkD;xH&}#^4}_Qj%hQ{zAO~-OD1@ly)MWv}`|dUe7pw*M zfw-1T%fcVsUE0@P%?fgHWP3Fvu-u!{Ho9&FH!W)|oV87$K55*>uBzH40h&9Zi4AEY zLE?ALYieEZwT_7Q1n&g|jVmE0#N&B*~g&a?b9uL?Ayv@S?kI*_NyL^{TG2Et8Z;g1Do5ocNx2B5uH3 zJ&7Azz%!o3ysOPtKT|g)k$TI_hkuRwEPbwLVkFf+R86kkrNhN{`X1_MnL@4IhXLR9>uO+UGI+lPE^4NYC9;!kv`NWl+xV)>gzcDK7*e-t$H*E+URo$KS; z@?-yu^H@+=rNsU#$3CwGFYowux{Aoy#`N~q*=;xy5xX6-7f)J*Wt>wro&^Da zADyR|mA@3M@;OsyQmgQz>TsK6o=)5emumB%R7IBAWt zAOJ;IZe~g3LtU?ElKAExj%-5fihDE{uM|&q%egUr^aroq=xmx?-SxFugkrp>RiB?3 z!qIM8I^S6!EZZ(0MJJCO0Ou-I`Y^U`XH7t{y?yKBoDU$D>hoI#5E|vGo}+=2*O6%1 z26RNRpwIG6d)J}s9DZLe`0Lk2Ykwp`uG|JnY{=F zoYNH3&K^CR&+YmlFT3dF^I7IPi9zubh1=vmT)?^7?P8KpU?#!9xKHOa1_UO2Xkmbd z?xXCxW>WE3W-wV+snJO~vnS?WPdn1R!_ng4 zEY6Ju7Cp~@Qy;v5P6L=dfZic9dH4L098Ycp3m0BClhJ9IAD&70XVJfgZUvT(zSPD~qQVfXoVa!VE<0IVq zekwg9XvOqGnEV9A=Is9ejI_^tVCk}%Ing@iErpeGv^gV_l#8@l>!J+dVoY(Sr z9EF>u_SBUUSs*WzW(E~N9GuH8ajHmp!TKTApOZni&XkUnWmjH}HRWE50cp4&QOoD8 z{znW1OGMbTTX)8P000NZ5dBDFm*0fMH+%cuUA~W1DsEZ=g7NQyiseYfP6L;jSTszf2$i*(2Jyk zbPnVIauS2k(?HNTh0nlGRJkx+2qDBu!H0J7p*W`K z)ze(Bh~V7MnxtpN(R^3zcKy5KnTklR0}_*9G@J)w@n_?y5zP5mRhUwhbJCgDjq15Q ziLhp?;%wQO+qW($81*w228WO`T_yK!M}TvqZS9Nu$RFquDZZb4p{Nir)RoD_7?83y z)?qm365Bvl0G}Pr0`fz!?SrbBW2zvf;yXqN%4hBJB5W2IIIe* z0;(2@#Zzqc10Y*INZmgO=de&&>d`WEAPZ#Qm%dS)OA$fffWQu()WXdiqU^Cfx78H~ zoHu(=o_9J|TzyurE7Hp7ZffN_x)7cjH*D5RGFYxWemB+wQ!Fb3PX>jdehLv~*mYhs}cXfbR(1#o==4 z&<_kEzDxPAR5L5T+QUn|2euAGEbXzHp2)dqB`SG_bLeK80Mc+5h=5CX!R2z%w1kO~ zcx+hGeA00Fc1H;d$^ z)s!4($M?1QHarbPgkQfd=q_6E)R6_BKMu=fEkYOuMn5ou0Gu*}Tegv($H0{CKc9-N z!f*}W6QwUWkK9;(76^LmZtda1jdGM${ZnC-`_o{B^wRo$k?YsTAU+OHgwA#FDHp%w z-w;3om;=HvpghBs7Szu)XR>1mG?@gK?~Saejx6OYlAH2RRaQ>1MnB%WmNzUQTjXM! zF#fDXFitx=V~U*7WY=#^pXsd(XEaid-eeHjrY(ZF7K66*pp1socq<&^Kszs3MqiqB>Lbs;VCmYX2>QNJ=ug-HT14`A+BgdB&O7Hs2@ z8d2In>^ISXai$&<%w0@i&LD|p#7HBy_noDWpV>#O({UkMt%Mz2FNv93STp zR~)%GZ7O^?Ca|l|v{4vHx=*HhhSOyfV`6snpTi@{tD{R}v?Q=7U8=5FAg<1l##D~3 z)p(1{53m410zCQXKAB?<38Hi`o%>q$&XX4FoGBH0^0CRc?@#I~>;wi)VclaZLC~>HXO(f_iUG1Z2t*+HYUngZY{wXZJv$DA3iy zSEuw&ytSEeHMs^QM4p-3mPF=E{vXxX^LryEU4;)kK z&vVDcTQnCZjKqSuo>4BT@uB9mgr2QKc9@21?wAP ztXVmRNyv3TE|(6zb4BXixob5Kw+4pYAfyn1MKFYbzVE>y0QWiEe;efImmL~z#kii! zS@8K_Ofib|2a0SqhhubFQ&!Po8v)4O3_p z@8}pbvu+815P@s~#(0l8JhJXZ)C~&8q~EZHZmDIFg70~rm$0mM*~>xs1WyD4t6P-Q zJUfW?7D zDpyE~>-H=P-DUy#0XXh(SHjYDJ{AO;Z`U&2Xy@2NYl^Yzrs?}0{V*WNOmO0;ukVby z;NX32EHR1i$9ahNUc8&(KNHs1l}xTJe})iJb|J!n&(-?iSO~64_xv6|=WQGwjMeZ& z==!oCx|s`#0P0Ec#A8Pih1chyjM=J{SJ@w0VnAV8K_~_?Z zF5o?>VQJ^#PchZ3x?c^65H&P3oC~)Kvr_zKeolrXD_~}+e$ORvH#B??il`r1kFNkal z-2{qRFZzDKFz_+LMveUTwGhOsw@I1wD{-nC|;-*~-S}G`%S84tXV8W&-!(be2k2CaXL1I?A?ne_JCdc|3yF&lumqEAB={XYnCzlBA4OM6rly z@wtN*3HFX~>0(lvbpJ8dU?xkZDH?TAm+M?txm{m=GNC!8Ae8u6b9;-4d*=?O191F& z-qYeT-PFCOb7dtT%iFKK;J~1RY!OP!a$jttr1{Zyl{7m!2(}-}W?IxuE!4uuo{T-( zvD=ydJp#xU@!z2h*ckfUoJ`uiVKWi|jyx`1hwfrp4lOuj#eV=mIBt3&kT^J(eI||I zi1~w(eJ@up1sp_x5E$2<(GMExFmfNTGeFbsYvFBnjJ|Y5Kx#o*0-5`eh9{cD}-FBBg9zw`P zNFyUL>U><}Z_FI5ja5VtVurhtotXsKPh2;qXT@|}C<0y6yI7iIB6_^Y;2wxE1la;4 z7MO2xQgk)R@pJJBP-^0sfo$N2BqllbAy&JNa)mNOq_jXNSGSJea}tHM7cm8U=%Gyh znrh?AHR8;|Lj)IvtU0=UI1cQhY>I{RnlQIUK}=s3 z>cpOBeO{#V%0?9bcH|%u>Sr1)s}4vO7qhU8+dKqCVu4!%`d1o!_&Y*UDXPjEF4a*I zUvs4JqDLBzT0p7<(p}&{X zVP|gBoTGy7kvPH-Rtg|z5z0OT3u{pFq52BxzzM8|@V5X9XKToT3o^TT9cMFmJ&a|b z(ck9>i7Ka-FW9qP7@y!FJ{CR@Uw?;sRmPJtwBkCcBn3c)in+K>l9@t)q%k586+|uh zga~4g^t1@4NO@i@?_np|((XuOKO9IcF`vBs??kok#?~JpP^#-L0Lc^9>1})V{5!ni z6d=?-mX4mcboy(LKY#y07>1|y@hQ-NzQ_Gp6Vc~$rQ!WpNXh6n+yjoW8fQxl-O$ir zFd=|zfgoxB=fvV#Fj1bts|8F1Aq-&IQWCN%C^+cP*g9HfheZCn@Hz(K`;wzqUQk{TRDxvFy@v8t!E$ z+6=uPEJufa&hT8_)dKuJv?%J2M~rF>Xd`Qioczi zN(qjv&qyp{r+r{t`#x$&EShOP*LfcE`?r}9;u6z33GW}L`mX+P!h8Ux@jOpJ+#};$ z?q^u92{`4hSq%0XmgTNBfInpqX@)*Cm5w9=TRh8MVo8k#0} z=~k5)s`f`AR5_yU5a+eF3mmZpnFY>KjI$905;$fH;#E(I)ZaP$)7p-2I2DGW$Mt$m z)n>l~jtu^8X!wcvCZwRD;pH$l$D@#{qQ|6dK?L3tJPDl0Ok{}aYQn(azz}la8(B|* z)D~0oj*vZK#?M5W1ESNqq%{Gg;Ra;Va@=CJByo!C*A;*M{Ebj%-Z(kd#@U3AhDR}8 zU!Srsj)K({SdVOOXlS?>yR09G0!Ogq5$ZRyzP@_Zl)zUxr9Tl( z`1qbElD0h`=dhN)4QnK4HU{S-pVe3wxRlQ3Gq9G!isTdT2(YRb;~?uA0wG@c{5>e+ zCd;3Dlo=un;Xz$~VoF@+HSl}nSQP))l(iuQ3?ZoLWM*|(%2<$A9x(w&V)m>Y`%cC9 zqZ?<;>SWoSQnoI0S^Y2<%p;Iy)?)04^(QHO%jfZ)EPD?LAZyzYuw26%utm4h$JqCG z(!{y;`UYH0fSGE^lw?yAv6;8Nh>eiK*SZ=-p-s%2Icw`JsI073>YPh49U_ub-GKpd zv6hD|aEj>`$?6M&ps^n=<`36CV|zqk5nR6pXaV$cb!`!j@%@d78_!&G8)6o_gevHc zs$cm#Zz6b7&Tah9W@fI~-4WQcPv^h&_~v~@9S3d0sb9Xhl*^ofcq;PbPU^#bJNBkL zKatPSG+nP+O=PtyMB-TFn1@(E01UAf2s0~fnZ!`BDkdTWGne)gl=n3UMi_>wj)uZb zH*hhMakY^nkjk%?KI9hHrk?wl9QxucxG1$!OP|M&a~vD9Hzs7yawyqMyOWO;K2mIo zfMTsGC8)863=ztrJ!wT6)3<}s7m?Ky>QPE*!bbj?ey$-8T$BP}Olt znhj%e&yiss!ho-9ul}V^VGAC>$DNuPQE3E==|97BJxVxxz4Dw@DkzYFqJc+)FDFdePj-bh;VtmF*jgKTU ztc|hYkH>%2uWQD6Y`sgqCEsna9ftRFSWN(F*o{(vcyv!^LXlHx_SXSn2qWV3$&5wA z7)|(SXlQ774xU=;Nz{^&Vl^UM5F-NCvNA0YZYkow3!xU#x zxE{O#Hj3N6BZcO9o)#XlKHia4&0lqQ#0!N9&jdmbss}Uze@Vzb!G-76UPrEP>#hhmehjD_+!j2XZ5~N6PfT z_6*tk58#Ns^Q`S>mZqtz@Oh8~ka@qkb&ToxDGtmNp<0|T59|NuN z6wCpDgYz!NcE;duP%=J5+eM7wz$iYeXmN&@1dNHE@-NPLaE!4L#Dap6A$6W&f}Gg1 z2_KXNsQmvKh}$#IPMrb0?ep!hmWH=sC!Ui<+BwZCGV}w!{`>)lP;8Vl5Qq05WgXjT zcAk6P&~RSl9RH6oLq&^~h3|ZaZ9~I%fF*n|=2kOK5xjG75D?exu8jb{Ie{x}dSUXnppiW+iz~R#x>`2|X$-7#xG&)%xO`rsuVw^Jx;SXWIYo$6J+#Rv+Qn-K>1p z<@>*VAVRepmdrG>+3K6t#P+=ZFm1BL`XlC8T1A{&>cL_Y&&FqDcOf%%TGHLJo=9vG zfYy!>0j~Xk>-Cy^J#SkUTUQ^s$fhnIYini72Q7`I2%@9)fVN#cQ?c49r(hmG?X^};$4Tagh^LHh#EO)n-jh%tDr0Zg*x1uCC{asq%xUps`a4HjWSC~?z+K~{naPzz z+eD9+3iRB3Kw<;L~o(g4Jtj10N ztTysk)v<(40YviJB&W27gq5cv0uyP0-;Jd@_oUy$BSuB!DlhUFG}BppZuVa6+E#+a z2(=iHC*n*($T7J8ej+`o4p0(h7(xj6bNy0(a3GstPlJ*0aWd?y#EMLFKCcp3!%6Xo zg~b)?_Lv~#Z0~3;SPkC=r(`2WDs8FdkBoS1(yU+9Mi+uuaXe_yyD(TBD7tNO6!6qF z3(1I97E!gMu91T|?6DC&q1QrR)dY|y;QI+4$XWnPg+~rYSRFV73}L{ZKYtLI(ZZ5$ zL+P&dE!SPcX>ledKUoio1GUU})6np#I450KON7J78Ob~zsxi~(_m0+u!E6o}nla6+ zA1+!#ge7_v$DsHq=#*!o!06O5i7t-9S;p~7%&Bmj0P-aK#07RM1ke<`fzkJUBow8K zQ^V0H3qUqB+>0Y?^r+x)?{{++Ekv)Ll>1(Y(|qD3UVKlxk`IHt{u z1moA`H~#N`{tMS@0Kr)c=^M3g?MZjh3~DCmb$tR_h)Fd>h^bC3=f36J|D5O8a87Kk zqq*GuW8Py31Hupxm=QvN^ZpK@#-#|=5QgXzpJSdTfzC^9G&DRBr>g6-f-+G|elNdv zq*c!~1_E#%`r57qO@s_0f_EN7gkQgYq3b$a!yfMmZHHM90)qo1L{Bbe0&s!u-Js)& z``vqo%f;i@uMRAl;AY_Q{D68x(WBW)5d$|XxEnfTsy< zmcOq7=h@wg96m}x`7CgvhmWT=e+8Naq+u5bR+T%(fw*G=m^d*0`S;%%j4qlTV}I7O za|#}}q!w*xXm}2069+XkG&}@mK`#L;9WGsmuIrM8hcvxi;ru{%MbW2ZG@~DF(*EGe z-OG}2)^5fs4DHEw_O0+C{6`Z&8dgxIwqTTbYJ>&kpk_~~HDkS{7$BN_64iGz1i_y_ zR}3bwAC$K6c#P71Y+d~s`@i9XAlTk#uK)wNyu(RQ4yAU1IKcz^p=b)}?+FnBa{vpY z>pWm7wk^HU;=B_84+yEIeytBkWhF@#&e?pnGR<)QgSUOE{Ejup)90n(ox&Cc<@q~H z8s3-J9a_YTCS<%*M)xlprpMVPH#7`$+W4iz!88xf7BC51aWPTOgRQkaENVl);e^2u zz0cT~#fAtdmJnghZUK*LPw20Ju!nO_?>`n+vfR=7EBY;_vFx{g*gdVI_C}n~-(Ief zys4^Kl?OqkS^3W9YQmi>%WGWUQNhCm=j!J%u6-ac+T4$JvxiYC=ViH(5#xL31eWWS z;~Pf`ALgwvRtlDyMy8l|f3=mh2glO~J4%-$ENb~22FT?C@zdG>R>o8oQel$=c+f!f z5CVn}G$<9tZq2IGmGQ1&&)G2bd$;-rsu?EaXQq3Sszx54-Mbe2*anpo`C$ z2*3aOjS!-R!#Qvt;6>md_~kr?U%$`?0fvBj9`;8b*gR;G3oR2+pV2g7>D-9!(=7eC zmDij(Uq)?M90~~mj4Ns*Mz3BXECT0rJi^fu0=<8=<4ATJC!n#rb9nE`@^%-jhR;HFO?srTPiEx) z_2+N&!=UyPD;TudrfE>lC48L5`f)ORm!SMtT0a^Z8XERMoea#!;Q92)1Q6$<6$D1( zDLM3LdTapELelpE!=SF$d+Y)d6`s}QW(kf~GiFBDY5#1>&_%;zRGc>QeJV5oq+uUq zDl3^?HpL2|OV<0=u^5rO#<83+$ngjQ_s!&wZ$b2}@ZL{N z^EwfV4|oj19P8AurqjjkE(=K@G|$k&CB@<@n$UPYm}Xlo>*rz)$Lr-Da$LsV-J?*t zCu@_f`$rU5d-tFUAs)W#K+dIe8Dep3+7da9l^F&`U_iuyWk)@#p2jj*oGQifWS2R$mC!-7{B$*5h!&Zn(iQS$N14bXQ_}>HhW!{5=IDcIe{Ume zNJN0IKVKO7Uf)kmrz_HTlEnq3t3}lJoM_)SOws1&^D*nvalph13&2{Ol$qi0l0atO zD&s)DX>j+)BMOLA{5u2YLgj8BnPBP$ct^a&n;yx!5Toa@o&BCXT>1x)5P|@KH3^OP zfRF==XyO@>oqh{N|5aS*=&Bg}AJaV)?#nV;RMFZNxEOWxN>2PWSY9@H?nvUZdBLqq zbd57Pq+DN30BP8PgD9bBwh-qWLJUgl2hE6as|HN)J=}V}JHCtiRl{>ouZ_org;GJ$ zgpY=X8?mCT*3Tmo3kS=5<%!Tar`e5!AU$K~HS?4i9YjPeo}$)HxUGFTju%z8tDIeq z(s;@&OECyUKQFJHWwtGIS394c3iDW@Opi?nX#z;YE||n>L0**7bFbC~Chg>bLTXY>w6*tf_HkpmH%ECO;?rTu=sWo48{H+U%G zOd<(qqYvpN=@s)tj;(7=0I9HFD!-@AGDPdFNTaI=(dd1#y!Q5``ku_(hWI@y*1i@W zWL$gA2|nf(CQnCV71MMPLu-tbnLnQKOeYa9GyTl$=m!bX-aA>#bW)}+ty~Jy#O%R% zR&pb>Gj-oftRFAfQlkhYEZ_T{^)bfuJ=-&-gTNj~4kPZW#9Vpy#Zo3v-s|#YCUD`f z%frkA(|dk!FFl3f_&mJam4|eyDQQ0AnzGtXD5?)op7pfWWpSyz3G;3|0Mh0xub8a4 z!u4da2cZ|;b9Tc)weRX{EcVi3QOeWLl#nSKCgtly7=cXy5rEX$N&jYO%@7Or91UyE z^-ZYaoyVuJKtSZ`vvBn}Qd*no&hFVDHKTOP7Zz|Ep;~i!Ppn%E)|YtC$xsxpq9lNn zJnHKpB256v*z@S`@#8Efm!jX{b1_?ZF#2p-u=pBp0zu5h9yJPfQWX3bnJ-}x1Ck5& z2uLg?UM++O01iN7(btFL6auhHWTR5Ju{MpG>g0Y zCwE@peLW|u1;N(o5)mqT{M@lTAJhF3*I_yYr1(VOA+eYp!GYkSaN=0Nv`zzGj|Bll zl(}s+gFxg}-~q1=X?(^Q$nMqa7ZW#UVNP>Xw*`sHDQlZ1RNONJ9M2#9|Ay8VOfql@g9`uOFq%p1(3Daq&oO~ zZEk1If(dqqRddkDe&ED8X0o> z=g(YgwI%z8?*X;t0*Kajwd6VHaOpZ+E*E&OLEZuZIV~DUjzBkZyy{Xk{mu{u1P&@- z6z7zqY`3V+t?#)MKN?K{X;{D$+?wnk3$E8I`Y?bSQMeKBd0uo=;V4aAlzkPhJ*4J{P5>EU4R+#Go5G{AO8UJ|BPpz)(cq!jf4*sU>q#yU6{auF?fYsoo+ z^C`qlvu8O1VhEp;pFz!4=&I+qn(u6Yej9$Q0MhVkC@BzOgdi9~0N_*zX=rF@xC0^r zK8d2Jq2W{Ts30VPA%Std_PBJwH4M10ho|Xzt}fgT-Q|Mr(t$$Q5PYck!J_%XIS1eU z!tY-Wo$v6~_XrD8P145GcKY9j*P(HL4LdMXsh0NW5L8c+#B2gY2tz-B15bQO8yXrK zZozkrG5B<}c{wk#_2V%igqd+2uDJGB48x!?88NfpIO|^5b@=P|Z~XrC8_sPD0aWGw zAAJV(w&=PJm##zCU2wTv;&;YD=vZv0;r(bFV8c$BFu}{hN8x5LHSea}c?cX3_?8S< z4GnL^E@8XjUQE&v@5IzOu{nA-G;CohRrf9sK+b^)2+#~z{k2yCWEi3wWRY??rsMbC zAD@lX{orZt1qRo6W}`!as**UKlFjGOIA@fo{lG02z+ydI%;7%jXm(>tQBd%D?ZGTy4uE@w417c!iHsQ0 zu>&Q6#KEmT=W(ztrDyD%)m1U#TlQ09D}`dI-CFVkp42(HSz|dJ!}Qiag+i|cL6`Ll zioBu|pY|OXCx00~FGfyXT8^lE|A&rgQ5=G;%7t(Wz-;;fJq%lKZf=ow}; zNt_cfPeZpU?Y26$MeN8Ug7<307={4)ebHEqA;7y1W$77Z2L#R$x{HVN4#Lsy95P?}|d8M+|OVBJJr^7a(qM4tgP?Mvg2P)s28wZ(+A_+1h-33Gh z{m|p<>kD@(J>Q4PL!O9uFB-^xEYZUw0wJwZ?sHb*8DMI&rDyfL`xPJ+aC+4?Nqvq4 zt5+nvC^K0_c6@ug%{Qdh6e+&{Ar_Hcw>Q;0eJ;j@F+u6Qy}bJSdHQVCug=YLRZ+}H z2?0h@Z26Pq_o<6k#Ti6}8B0`n#=q;(4Qo?Burp&%%$7h44Kt!nfk^}Gi?Ogs%zzcZ zRic3WH^uuUXd%>_r)Fl2X>n#sDStl^XK|5i7LbO7q&0`+1EMTTTwlGbO(FqEGgvS) z`hLLmtB;v7+-D!6Z@{Akq#7EQaQ?kGTKiEOTY{NYt1Cv&+FtnxEFCiqhr+CxX&)7t z%J#q*`qW;soT>mRg&?W@&5j~=y>eY;d7c?Zlkfae=ziJ7Eg|sl9P)fHMI7BmSV-6Y`emrE~!Al?nG)eVQ zM00r&Yfus&A~(vMrEw`7IQf=0G@KRYE@0;S#?&4Ut5qO~zUT|_V_R)lJ4#oK>3J=U znKE>dKBBlMfiiIE>?zz?e;5ao;$LFy3*kFgtELH)`Y!(Co^_VLQwLQ=k82D=Ko|x{ z=PTE1lTQQ`GhVtFUow-bcjWr1Z%WiNjNJ8zAP|JW8iVT1S=Cjer1ad&wME&AbN3t& zkTQuG&3QkjE_3!z?{zlb_|3;!vw$?*iJ46&*&BAM6(BH+2A~Jom5Q&S;hiYzYBoF( zRy<7)^LtN36=L4S$fpdTG)MbP=qP^AeE+9G3PCIh{NHf1mEKP8M^+s2j#RwkGu+?+ zkPw7b02v03?E%7wLD}}Ls1%4whS@o`^tnYi5LN4einiv)KXT1P{oGR=2nwe6t_dN_ zAyFtj&VeNGrH~%UzZdXs#YYoB8di`s2)BK!_&$&}I){KifBqn(8X%{j$PEn*Z^SLy zU>f~2G&J0bob=&VSiA!ju2sb+&>(Zfpm5E8<(vligb-4dcKgOHV_tiC4&kohRx|;m zVHNk8#S4=sQ6gFzgx<$=fOJQ@(zuR>ABFF{+{dX}_h6hBylm^E+-fU7`G$rEaO1Ks zv(DWEwI)cNZ4TM8Aso}L%K-TO3eNhPD#NDKpYLZ`_Qbk2JPH9VW~}?M zF(CJ0?kSEp#`N8zkC>Ni8L|HaK$){Ux>$`a9;e-vQOY|OATVPH3^*$Me+kvscK4Hc8xZ2SGc+1e$#5$MHC$IGyqJM~+#oRNrhm)&n*z3I%V$W66-M;JExqA|eQ$a=hK9v>v{?ub` z?savgIizxNzUqmT`!-UGsQHuh`FJdpwm4y0FRc6)0Wo&KK8f)01$?uuz-|j_^m6yP+Zv8#ij9qMd}yXxW`%*!nP2a5BPsr7z|vtyO`^?P zshuq6@)^rqqVw`%HDmK5AAe3Uxdi}*R6nXZ*^VlZjWtFyaT}9(PI4%#woKhqFlj?_ z;g$G0Klk$W*3~R)i+}1CW&JjE2qTx764FJX-DZKkG)7G+SnOmH8EH?ro-lVkt!VfFx0fgg2v<$}SRYJ<0tX@=tse{ytnOAJfLvBC41>P+lCyzS_$Wm}pv9e@4J`(wVII+% zn^Pp-xp5<{3B!Qv^$KChHo1n=qO^Y8ileL_KLMpSbrvUkj7P>=h*dA16<}`bW9-*% z8rjBfG&IyGYggP~4dvpmx+x5Hbl9{^H3ShkEF z=yTRcO~cc1j=i}Tr4?j?t^E?e;}SzpaL&Owm)f)RxVoR!(kB4=z!*Yyw-RAw_vtau zaECL{XH7!`nhRFLB8s?d*LCpTgTnykpmA6NxEO~rF!1-+*Q}>lnJ{Gxv6vGgc<)mH`w~sGcoNG^ zu_;e&S@@;4$eN&K!SvqyG#2NO;xEqttCf;n0mVHIi8X6j23?g{v;2pt}hx3|d*D9ThQ1oyp zV#Ek3C_S(s1my$PTrKZ%!<IDOx}a1ddHfx+Qc4QaF;$^j~j zli}U~@3TTJUEgbt8usJJu#LONF!NrSDm1g3fUm)r*^gX`5 z286)Te>}EV6>qZc%kVQPgpW0X@d8gmnF?XI&u1A+W7&G*`wN(>6uZYKIrX;_(>~_= zE_#`^KPKuQ%tcK6)L(EbLe2}qiDWo&80;8N1Q+p^{{sXQ>}f>kNE zp36_iHE)ROzAgUDE+Y_pw9-2eeAj853kxJL1`0V>5o(;36>vmpUwS*bu9EfT2 zer>JW__X}6q$Cw?H1HJrA$7(qBB1LWy3V5yj1XqdtsBAP$vX+kwN5dt#i{L7vo^*dxNJBDbjh+HP7gG z7o@ISUwm*>4=;nR(s(x$W@69%JpNSEks@`Wz}v>L+;&QBO$3Gtie~jT7B*$HD+CA) z+SY6DOZ_V-S4!lq=SNBPomYyq7b%iH0a^(1`OibGXL>f!jMB0RAPt9Nd>}Im!l3@2 z_pSENJB$`##L!+gSCED`V9WaPNqDV5ew!t-q2beE0tJgy^}v#PkN_b{T6RI5hL@ZJ zfiT1vP>JpA!7=-5MS*cHmz1ZY%H~Dxh4gFLBcll*4Y%Re%oHpbLO8A2@|7?xQe#{i z-i=ePLrMSb|4Nm9`gsh+n(%SD=h)D2Akw}fB-hAs=DKYCK+1dxNU9f-cz~T-bZ$#o z+c%6H>mB|q0i?oRmZ;;=Vn99szc~Na_^d=QnxvF z0+|mY)tE5TnAR?w_n~g1tci0kwp^~%|NQReUyikB@$4)=OWJQ*d382SpU?NExt7V2 znE|4h0V`k_dO%!Cv8%^Ar#KCWzGy|0q7!;kO(9Uu_z%b?FKmq z?>zuQ-}ktFU84(DY82m{Xdi_$XeoXkilTsG@m+72hZuLILdd@T@{IMex2Pm7zCeus znvbG3ArE(Cx-qTjvhaP}^H@rm{YhJNkKp-KCS+0bS{5ZdE-3pI_@w!N1R(@K)Cv;9 zfG`9Q7$KMdB0va5Fgk&prs&l){3-|t@5_uJQ3??e?XO7VzgE1^;3DthEY!iInon9> z3%Ey7Fi*Sat9fVn810&uD}r0=a$B1B4$J>hV!% z0!YK*uwyxJz!3UGI8+8{LP$eHLk(kE&WLg5o@SM7Xvh#|&4xu<@52~==N!Cu7(&1> z4C+F~z&(mcQ&MQ!=2!@kTVdl*NB2Me`+s})7*2td!fqAo7gPsTa-T30mqq-KeEHIU z#XF<@c|;7z*iB)nja;GIfnw~(rStgxtHZ^6TrOVyI*A}L1_V%R2S*{pc@N)R5Q5umz$kka2R zI3m=qcLWQMV?aV0*j_?EbuiMr_WSFA2=bcJSYnyZ?Ly!0$7m)A$+a-QHJ+DvVg5?3 zX`62BZB<-+q`4l5r0T-E7u&+RD&d{ERj$VZEDuvAH7sOyjO%pEKDBp${q+}q{r-i^ zuV3i8tQ&JNWh9MVphOv->(>|l_4nWS`ud8lp5?lvhY18ZLS6P_E(Tz0ZpZFrgmE3I z=~uP~zD0#5JDawh3GX>yrq&^e{fl*5orx~)F`H9nLO%=$%wUKXzu6e+tI7;*YS0OA zBYHWV7ptKq*^%JrbxQiY$1zJKgbJE0ni{v7@5sZr*q(P9M*-2f`|Gd2@bCZj7dZ5| ze*J;>gn#?D-{>x#?h$jv7OfTb*T4M*7Qz4ikN<=J^&kIdQ=Eo2Oeb4y&+T+i4Kw!1l2&urdRd2@l z!t5Sc%yG2JXlaocA-hnF@fn!JqKyo)o>U05SQ$#=*Nd42LyX_C8p5}8H8bNX=F*Y! hy#xTR9D`o-{{e~-B_ Date: Tue, 10 Oct 2023 02:22:43 +0530 Subject: [PATCH 054/164] wip: queue prediction --- backend/db_repo.py | 17 ++- backend/migrations/0009_log_status_added.py | 18 +++ backend/models.py | 1 + banodoco_runner.py | 37 ++++- shared/constants.py | 16 ++ .../components/frame_styling_page.py | 21 ++- ui_components/methods/common_methods.py | 43 +++++- ui_components/methods/data_logger.py | 4 +- ui_components/methods/ml_methods.py | 140 ++++-------------- ui_components/models.py | 9 +- ui_components/setup.py | 2 +- .../widgets/add_key_frame_element.py | 7 +- ui_components/widgets/styling_element.py | 7 +- utils/data_repo/api_repo.py | 7 +- utils/data_repo/data_repo.py | 19 ++- utils/ml_processor/replicate/constants.py | 12 +- utils/ml_processor/replicate/replicate.py | 46 +++++- utils/ml_processor/replicate/utils.py | 19 +++ 18 files changed, 279 insertions(+), 146 deletions(-) create mode 100644 backend/migrations/0009_log_status_added.py diff --git a/backend/db_repo.py b/backend/db_repo.py index f8b82e11..8eb6e945 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -605,7 +605,8 @@ def get_all_inference_log_list(self, project_id=None): # cls_name = inspect.currentframe().f_code.co_name # print("db call: ", DBRepo._count, " class name: ", cls_name) if project_id: - log_list = InferenceLog.objects.filter(project_id=project_id, is_disabled=False).all() + project = Project.objects.filter(uuid=project_id, is_disabled=False).first() + log_list = InferenceLog.objects.filter(project_id=project.id, is_disabled=False).all() else: log_list = InferenceLog.objects.filter(is_disabled=False).all() @@ -661,6 +662,20 @@ def delete_inference_log_from_uuid(self, uuid): return InternalResponse({}, 'inference log deleted successfully', True) + def update_inference_log(self, uuid, **kwargs): + log = InferenceLog.objects.filter(uuid=uuid, is_disabled=False).first() + if not log: + return InternalResponse({}, 'invalid inference log uuid', False) + + for attr, value in kwargs.items(): + setattr(log, attr, value) + log.save() + + payload = { + 'data': InferenceLogDto(log).data + } + + return InternalResponse(payload, 'inference log updated successfully', True) # ai model param map # TODO: add DTO in the output diff --git a/backend/migrations/0009_log_status_added.py b/backend/migrations/0009_log_status_added.py new file mode 100644 index 00000000..7f9e4e27 --- /dev/null +++ b/backend/migrations/0009_log_status_added.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.1 on 2023-10-09 14:17 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('backend', '0008_interpolated_clip_list_added'), + ] + + operations = [ + migrations.AddField( + model_name='inferencelog', + name='status', + field=models.CharField(default='', max_length=255), + ), + ] diff --git a/backend/models.py b/backend/models.py index e1f3313f..6fdaf097 100644 --- a/backend/models.py +++ b/backend/models.py @@ -70,6 +70,7 @@ class InferenceLog(BaseModel): input_params = models.TextField(default="", blank=True) output_details = models.TextField(default="", blank=True) total_inference_time = models.FloatField(default=0) + status = models.CharField(max_length=255, default="") # success, failed, in_progress, queued class Meta: app_label = 'backend' diff --git a/banodoco_runner.py b/banodoco_runner.py index 722f7286..80081c56 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -1,9 +1,14 @@ +import json import os import time import requests import setproctitle from dotenv import load_dotenv import django +from shared.constants import InferenceParamType, InferenceStatus +from shared.logging.constants import LoggingType +from shared.logging.logging import AppLogger +from utils.ml_processor.replicate.constants import replicate_status_map from utils.constants import RUNNER_PROCESS_NAME @@ -49,10 +54,36 @@ def is_app_running(): return False def check_and_update_db(): - from backend.models import AppSetting + from backend.models import InferenceLog, AppSetting + + app_logger = AppLogger() + app_setting = AppSetting.objects.filter(is_disabled=False).first() + replicate_key = app_setting.replicate_key + log_list = InferenceLog.objects.filter(status__in=[InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value], + is_disabled=False).all() + + for log in log_list: + input_params = json.loads(log.input_params) + replicate_data = input_params.get(InferenceParamType.REPLICATE_INFERENCE.value, None) + if replicate_data: + prediction_id = replicate_data['id'] - app_settings = AppSetting.objects.first() - print("user name in db: ", app_settings.user.name) + url = "https://api.replicate.com/v1/predictions/" + prediction_id + headers = { + "Authorization": f"Token {replicate_key}" + } + response = requests.get(url, headers=headers) + if response.status_code == 200: + result = response.json() + log_status = replicate_status_map[result['status']] if result['status'] in replicate_status_map else InferenceStatus.IN_PROGRESS.value + output_details['output'] = result['output'] + + if log_status == InferenceStatus.COMPLETED.value: + output_details = json.loads(log.output_details) + + InferenceLog.objects.filter(id=log.id).update(status=log_status, output_details=json.dumps(output_details)) + else: + app_logger.log(LoggingType.DEBUG, f"Error: {response.content}") return main() \ No newline at end of file diff --git a/shared/constants.py b/shared/constants.py index 32dcb845..2e5a18f4 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -73,6 +73,22 @@ class ViewType(ExtendedEnum): SINGLE = "Single" LIST = "List" +class InferenceType(ExtendedEnum): + FRAME_TIMING_IMAGE_INFERENCE = "frame_timing_inference" # for generating variants of a frame + FRAME_TIMING_VIDEO_INFERENCE = "frame_timing_video_inference" # for generating variants of a video + +class InferenceStatus(ExtendedEnum): + QUEUED = "queued" + IN_PROGRESS = "in_progress" + COMPLETED = "completed" + FAILED = "failed" + CANCELED = "canceled" + +class InferenceParamType(ExtendedEnum): + REPLICATE_INFERENCE = "replicate_inference" # replicate url for queue inference and other data + QUERY_DICT = "query_dict" # query dict of standardized inference params + ORIGIN_DATA = "origin_data" # origin data - used to store file once inference is completed + ##################### global constants ##################### SERVER = os.getenv('SERVER', ServerType.PRODUCTION.value) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 96adcd7b..6ca07295 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -1,5 +1,5 @@ import streamlit as st -from shared.constants import ViewType +from shared.constants import InferenceStatus, ViewType from ui_components.methods.common_methods import add_key_frame,compare_to_previous_and_next_frame,compare_to_source_frame,style_cloning_element @@ -241,6 +241,23 @@ def frame_styling_page(mainheader2, project_uuid: str): elif st.session_state['page'] == "Motion": timeline_view(shift_frames_setting, project_uuid, "Motion", header_col_3, header_col_4) - + # ------- change this ---------- + elif st.session_state['frame_styling_view_type'] == "Log List": + log_list = data_repo.get_all_inference_log_list(project_uuid) + for log in log_list: + if not log.status: + continue + + c1, c2, c3 = st.columns([1, 1, 1]) + with c1: + st.write(log.uuid) + + with c2: + st.write(log.status) + + with c3: + if log.status == InferenceStatus.COMPLETED.value: + with st.button("Add to project"): + print("add to project") diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index c5d8eb90..15181f77 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -14,16 +14,15 @@ from io import BytesIO import numpy as np import urllib3 -from shared.constants import SERVER, AIModelCategory, AIModelType, InternalFileType, ServerType +from shared.constants import SERVER, AIModelCategory, AIModelType, InferenceType, InternalFileType, ServerType from pydub import AudioSegment from backend.models import InternalFileObject from ui_components.constants import CROPPED_IMG_LOCAL_PATH, MASK_IMG_LOCAL_PATH, SECOND_MASK_FILE, SECOND_MASK_FILE_PATH, TEMP_MASK_FILE, CreativeProcessType, WorkflowStageType from ui_components.methods.file_methods import add_temp_file_to_project, generate_pil_image, save_or_host_file, save_or_host_file_bytes -from ui_components.methods.ml_methods import create_depth_mask_image, inpainting, remove_background from ui_components.methods.video_methods import calculate_desired_duration_of_individual_clip, create_or_get_single_preview_video from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject -from utils.common_utils import reset_styling_settings, truncate_decimal -from utils.constants import ImageStage, MLQueryObject +from utils.common_utils import reset_styling_settings +from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo from shared.constants import AnimationStyleType @@ -484,6 +483,8 @@ def reset_zoom_element(): # cropped_img here is a PIL image object def inpaint_in_black_space_element(cropped_img, project_uuid, stage=WorkflowStageType.SOURCE.value): + from ui_components.methods.ml_methods import inpainting + data_repo = DataRepo() project_settings: InternalSettingObject = data_repo.get_project_setting( project_uuid) @@ -1127,13 +1128,11 @@ def save_audio_file(uploaded_file, project_uuid): def execute_image_edit(type_of_mask_selection, type_of_mask_replacement, background_image, editing_image, prompt, negative_prompt, width, height, layer, timing_uuid) -> InternalFileObject: + from ui_components.methods.ml_methods import inpainting, remove_background, create_depth_mask_image data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( timing_uuid) project = timing.project - app_secret = data_repo.get_app_secrets_from_user_uuid( - timing.project.user_uuid) - index_of_current_item = timing.aux_frame_index if type_of_mask_selection == "Automated Background Selection": removed_background = remove_background(editing_image) @@ -1288,3 +1287,33 @@ def execute_image_edit(type_of_mask_selection, type_of_mask_replacement, editing_image, prompt, negative_prompt, timing_uuid, False) return edited_image + + +# if the output_file is present it adds it to the respective place or else it updates the inference log +def process_inference_output(**kwargs): + data_repo = DataRepo() + + inference_type = kwargs.get('inference_type') + if inference_type == InferenceType.FRAME_TIMING_IMAGE_INFERENCE.value: + output_file = kwargs.get('output_file') + + if output_file: + timing_uuid = kwargs.get('timing_uuid') + promote_new_generation = kwargs.get('promote_new_generation') + + if output_file != None: + add_image_variant(output_file.uuid, timing_uuid) + + if promote_new_generation == True: + timing = data_repo.get_timing_from_uuid(timing_uuid) + variants = timing.alternative_images_list + number_of_variants = len(variants) + if number_of_variants == 1: + print("No new generation to promote") + else: + promote_image_variant(timing_uuid, number_of_variants - 1) + else: + print("No new generation to promote") + else: + log = kwargs.get('log') + data_repo.update_inference_log_origin_data(log.uuid, **kwargs) \ No newline at end of file diff --git a/ui_components/methods/data_logger.py b/ui_components/methods/data_logger.py index f99abc69..329d3d6e 100644 --- a/ui_components/methods/data_logger.py +++ b/ui_components/methods/data_logger.py @@ -1,6 +1,7 @@ import json import streamlit as st import time +from shared.constants import InferenceStatus from shared.logging.constants import LoggingPayload, LoggingType from shared.logging.logging import AppLogger from utils.data_repo.data_repo import DataRepo @@ -17,7 +18,7 @@ def log_model_inference(model: ReplicateModel, time_taken, **kwargs): del kwargs_dict[key] data_str = json.dumps(kwargs_dict) - time_taken = round(time_taken, 2) + time_taken = round(time_taken, 2) if time_taken else None data = { 'model_name': model.name, @@ -43,6 +44,7 @@ def log_model_inference(model: ReplicateModel, time_taken, **kwargs): "input_params" : data_str, "output_details" : json.dumps({"model_name": model.name, "version": model.version}), "total_inference_time" : time_taken, + "status" : InferenceStatus.COMPLETED.value if time_taken else InferenceStatus.QUEUED.value, } log = data_repo.create_inference_log(**log_data) diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index 8d2764a6..0ff7a137 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -8,30 +8,25 @@ import uuid import urllib from backend.models import InternalFileObject -from shared.constants import SERVER, AIModelCategory, InternalFileTag, InternalFileType, ServerType +from shared.constants import SERVER, AIModelCategory, InferenceType, InternalFileType, ServerType from ui_components.constants import MASK_IMG_LOCAL_PATH, TEMP_MASK_FILE +from ui_components.methods.common_methods import process_inference_output from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject from utils.constants import ImageStage, MLQueryObject from utils.data_repo.data_repo import DataRepo from utils.ml_processor.ml_interface import get_ml_client -from utils.ml_processor.replicate.constants import REPLICATE_MODEL +from utils.ml_processor.replicate.constants import REPLICATE_MODEL, ReplicateModel def trigger_restyling_process(timing_uuid, update_inference_settings, \ transformation_stage, promote_new_generation, **kwargs): - from ui_components.methods.common_methods import add_image_variant, promote_image_variant - data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) project_settings: InternalSettingObject = data_repo.get_project_setting(timing.project.uuid) - if transformation_stage == ImageStage.SOURCE_IMAGE.value: - source_image = timing.source_image - else: - variants: List[InternalFileObject] = timing.alternative_images_list - number_of_variants = len(variants) - source_image = timing.primary_image + source_image = timing.source_image if transformation_stage == ImageStage.SOURCE_IMAGE.value else \ + timing.primary_image query_obj = MLQueryObject( timing_uuid, @@ -62,32 +57,26 @@ def trigger_restyling_process(timing_uuid, update_inference_settings, \ ) query_obj.prompt = dynamic_prompting(prompt, source_image) + output, log = restyle_images(query_obj, True) + + inference_data = { + "inference_type": InferenceType.FRAME_TIMING_IMAGE_INFERENCE.value, + "output_file": output, + "log": log, + "timing_uuid": timing_uuid, + "promote_new_generation": promote_new_generation, + } + process_inference_output(**inference_data) + - # TODO: reverse the log creation flow (create log first and then pass the uuid) - output_file = restyle_images(query_obj) - - if output_file != None: - add_image_variant(output_file.uuid, timing_uuid) - - if promote_new_generation == True: - timing = data_repo.get_timing_from_uuid(timing_uuid) - variants = timing.alternative_images_list - number_of_variants = len(variants) - if number_of_variants == 1: - print("No new generation to promote") - else: - promote_image_variant(timing_uuid, number_of_variants - 1) - else: - print("No new generation to promote") - -def restyle_images(query_obj: MLQueryObject) -> InternalFileObject: +def restyle_images(query_obj: MLQueryObject, queue_inference=False) -> InternalFileObject: data_repo = DataRepo() ml_client = get_ml_client() db_model = data_repo.get_ai_model_from_uuid(query_obj.model_uuid) if db_model.category == AIModelCategory.LORA.value: model = REPLICATE_MODEL.clones_lora_training_2 - output, log = ml_client.predict_model_output_standardized(model, query_obj) + output, log = ml_client.predict_model_output_standardized(model, query_obj, queue_inference=queue_inference) elif db_model.category == AIModelCategory.CONTROLNET.value: adapter_type = query_obj.adapter_type @@ -107,68 +96,31 @@ def restyle_images(query_obj: MLQueryObject) -> InternalFileObject: model = REPLICATE_MODEL.jagilley_controlnet_depth2img elif adapter_type == "pose": model = REPLICATE_MODEL.jagilley_controlnet_pose - output, log = ml_client.predict_model_output_standardized(model, query_obj) + output, log = ml_client.predict_model_output_standardized(model, query_obj, queue_inference=queue_inference) elif db_model.category == AIModelCategory.DREAMBOOTH.value: - output, log = prompt_model_dreambooth(query_obj) + output, log = prompt_model_dreambooth(query_obj, queue_inference=queue_inference) else: - model = REPLICATE_MODEL.get_model_by_db_obj(model) # TODO: remove this dependency - output, log = ml_client.predict_model_output_standardized(model, query_obj) - - - filename = str(uuid.uuid4()) + ".png" - output_file = data_repo.create_file( - name=filename, - type=InternalFileType.IMAGE.value, - hosted_url=output[0], - inference_log_id=log.uuid - ) + model = REPLICATE_MODEL.get_model_by_db_obj(db_model) # TODO: remove this dependency + output, log = ml_client.predict_model_output_standardized(model, query_obj, queue_inference=queue_inference) - return output_file + return output, log -def prompt_model_dreambooth(query_obj: MLQueryObject): +def prompt_model_dreambooth(query_obj: MLQueryObject, queue_inference=False): data_repo = DataRepo() + ml_client = get_ml_client() - if not ('dreambooth_model_uuid' in st.session_state and st.session_state['dreambooth_model_uuid']): + model_uuid = query_obj.data.get('dreambooth_model_uuid', None) + if not model_uuid: st.error('No dreambooth model selected') return - timing_uuid = query_obj.timing_uuid - source_image_file: InternalFileObject = data_repo.get_file_from_uuid(query_obj.image_uuid) - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - timing_details: List[InternalFrameTimingObject] = data_repo.get_timing_list_from_project( - timing.project.uuid) - - project_settings: InternalSettingObject = data_repo.get_project_setting( - timing.project.uuid) - - dreambooth_model: InternalAIModelObject = data_repo.get_ai_model_from_uuid(st.session_state['dreambooth_model_uuid']) + dreambooth_model: InternalAIModelObject = data_repo.get_ai_model_from_uuid(model_uuid) model_name = dreambooth_model.name - image_number = timing.aux_frame_index - prompt = timing.prompt - strength = timing.strength - negative_prompt = timing.negative_prompt - guidance_scale = timing.guidance_scale - seed = timing.seed - num_inference_steps = timing.num_inference_steps - model_id = dreambooth_model.replicate_url - ml_client = get_ml_client() - - source_image = source_image_file.location - if timing_details[image_number].adapter_type == "Yes": - if source_image.startswith("http"): - control_image = source_image - else: - control_image = open(source_image, "rb") - else: - control_image = None - - # version of models that were custom created has to be fetched if not dreambooth_model.version: version = ml_client.get_model_version_from_id(model_id) data_repo.update_ai_model(uuid=dreambooth_model.uuid, version=version) @@ -176,40 +128,10 @@ def prompt_model_dreambooth(query_obj: MLQueryObject): version = dreambooth_model.version app_setting = data_repo.get_app_setting_from_uuid() - model_version = ml_client.get_model_by_name( - f"{app_setting.replicate_username}/{model_name}", version) - - if source_image.startswith("http"): - input_image = source_image - else: - input_image = open(source_image, "rb") - - input_data = { - "image": input_image, - "prompt": prompt, - "prompt_strength": float(strength), - "height": int(project_settings.height), - "width": int(project_settings.width), - "disable_safety_check": True, - "negative_prompt": negative_prompt, - "guidance_scale": float(guidance_scale), - "seed": int(seed), - "num_inference_steps": int(num_inference_steps) - } - - if control_image != None: - input_data['control_image'] = control_image - - output = model_version.predict(**input_data) - - for i in output: - filename = str(uuid.uuid4()) + ".png" - image_file = data_repo.create_file( - name=filename, type=InternalFileType.IMAGE.value, hosted_url=i, tag=InternalFileTag.GENERATED_VIDEO.value) - return image_file - - return None + model = ReplicateModel(f"{app_setting.replicate_username}/{model_name}", version) + output, log = ml_client.predict_model_output_standardized(model, query_obj, queue_inference=queue_inference) + return output, log def prompt_clip_interrogator(input_image, which_model, best_or_fast): diff --git a/ui_components/models.py b/ui_components/models.py index d565f0c4..7216b789 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -1,7 +1,7 @@ import datetime import streamlit as st import json -from shared.constants import AnimationStyleType, AnimationToolType +from shared.constants import AnimationStyleType, AnimationToolType, InferenceParamType from ui_components.constants import TEMP_MASK_FILE, DefaultProjectSettingParams, DefaultTimingStyleParams from utils.common_decorators import session_state_attributes @@ -37,8 +37,8 @@ def inference_params(self) -> MLQueryObject: if log and log.input_params: params = json.loads(log.input_params) - if 'query_dict' in params: - return MLQueryObject(**json.loads(params['query_dict'])) + if InferenceParamType.QUERY_DICT.value in params: + return MLQueryObject(**json.loads(params[InferenceParamType.QUERY_DICT.value])) return None @@ -267,7 +267,7 @@ def __init__(self, **kwargs): def to_json(self): return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) - +# input_params = {**input_params, "query_dict": {}, "origin_data": {}, "replicate_inference": {}} class InferenceLogObject: def __init__(self, **kwargs): self.uuid = kwargs['uuid'] if key_present('uuid', kwargs) else None @@ -278,6 +278,7 @@ def __init__(self, **kwargs): self.input_params = kwargs['input_params'] if key_present('input_params', kwargs) else None self.output_details = kwargs['output_details'] if key_present('output_details', kwargs) else None self.total_inference_time = kwargs['total_inference_time'] if key_present('total_inference_time', kwargs) else None + self.status = kwargs['status'] if key_present('status', kwargs) else None def key_present(key, dict): diff --git a/ui_components/setup.py b/ui_components/setup.py index 0dbf1ce4..b726b0cd 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -139,7 +139,7 @@ def setup_app_ui(): "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "green"}}, manual_select=st.session_state["manual_select"]) # TODO: CORRECT-CODE - view_types = ["Individual View", "List View"] + view_types = ["Individual View", "List View", "Log List"] if 'frame_styling_view_type_index' not in st.session_state: st.session_state['frame_styling_view_type_index'] = 0 diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index 4a6383f0..93a7cb36 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -29,11 +29,8 @@ def add_key_frame_element(timing_details, project_uuid): if source_of_starting_image == "Existing Frame": with image2: transformation_stage = st.radio( - "Which stage would you like to use?", - [ - ImageStage.MAIN_VARIANT.value, - ImageStage.SOURCE_IMAGE.value - ], + label="Which stage would you like to use?", + options=ImageStage.value_list(), key="transformation_stage", horizontal=True ) diff --git a/ui_components/widgets/styling_element.py b/ui_components/widgets/styling_element.py index 74bd556e..e0b8d5cb 100644 --- a/ui_components/widgets/styling_element.py +++ b/ui_components/widgets/styling_element.py @@ -374,6 +374,9 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): st.session_state["use_new_settings"] = True + st.session_state["promote_new_generation"] = st.checkbox( + "Promote new generation to main variant", key="promote_new_generation_to_main_variant") + if view_type == ViewType.LIST.value: batch_run_range = st.slider( "Select range:", 1, 1, (1, len(timing_details))) @@ -381,9 +384,7 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): last_batch_run_value = batch_run_range[1] - 1 st.write(batch_run_range) - - st.session_state["promote_new_generation"] = st.checkbox( - "Promote new generation to main variant", key="promote_new_generation_to_main_variant") + st.session_state["use_new_settings"] = st.checkbox( "Use new settings for batch query", key="keep_existing_settings", help="If unchecked, the new settings will be applied to the existing variants.") diff --git a/utils/data_repo/api_repo.py b/utils/data_repo/api_repo.py index 49648440..db904769 100644 --- a/utils/data_repo/api_repo.py +++ b/utils/data_repo/api_repo.py @@ -284,7 +284,12 @@ def delete_inference_log_from_uuid(self, uuid): res = self.db_repo.delete_inference_log_from_uuid(uuid) return InternalResponse(res['payload'], 'success', res['status']).status - # TODO: complete this + def update_inference_log(self, uuid, **kwargs): + kwargs['uuid'] = uuid + res = self.http_put(url=self.LOG_URL, data=kwargs) + return InternalResponse(res['payload'], 'success', res['status']) + + # TODO: complete this: backend def get_ai_model_param_map_from_uuid(self, uuid): pass diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 66bf2f70..502e1717 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -1,6 +1,7 @@ # this repo serves as a middlerware between API backend and the frontend +import json import threading -from shared.constants import InternalFileType, InternalResponse +from shared.constants import InferenceParamType, InternalFileType, InternalResponse from shared.constants import SERVER, ServerType from ui_components.models import InferenceLogObject, InternalAIModelObject, InternalAppSettingObject, InternalBackupObject, InternalFrameTimingObject, InternalProjectObject, InternalFileObject, InternalSettingObject, InternalUserObject from utils.cache.cache_methods import cache_data @@ -190,6 +191,7 @@ def get_all_inference_log_list(self, project_id=None): log_list = self.db_repo.get_all_inference_log_list(project_id).data['data'] return [InferenceLogObject(**log) for log in log_list] if log_list else None + def create_inference_log(self, **kwargs): res = self.db_repo.create_inference_log(**kwargs) log = res.data['data'] if res else None @@ -199,6 +201,21 @@ def delete_inference_log_from_uuid(self, uuid): res = self.db_repo.delete_inference_log_from_uuid(uuid) return res.status + def update_inference_log(self, uuid, **kwargs): + res = self.db_repo.update_inference_log(uuid, **kwargs) + return res.status + + def update_inference_log_origin_data(self, uuid, **kwargs): + res = self.get_inference_log_from_uuid(uuid) + if not res: + return False + + input_params_data = json.loads(res.input_params) + input_params_data[InferenceParamType.ORIGIN_DATA.value] = kwargs[InferenceParamType.ORIGIN_DATA.value] + + res = self.update_inference_log(uuid, input_params=json.dumps(input_params_data)) + return res.status + # ai model param map # TODO: add DTO in the output diff --git a/utils/ml_processor/replicate/constants.py b/utils/ml_processor/replicate/constants.py index 78c4fe90..e2751067 100644 --- a/utils/ml_processor/replicate/constants.py +++ b/utils/ml_processor/replicate/constants.py @@ -1,5 +1,7 @@ from dataclasses import dataclass +from shared.constants import InferenceStatus + @dataclass class ReplicateModel: @@ -63,4 +65,12 @@ def get_model_by_db_obj(model_db_obj): REPLICATE_MODEL.jagilley_controlnet_hough, REPLICATE_MODEL.jagilley_controlnet_depth2img, REPLICATE_MODEL.jagilley_controlnet_pose, -] \ No newline at end of file +] + +replicate_status_map = { + "starting": InferenceStatus.QUEUED.value, + "processing": InferenceStatus.IN_PROGRESS.value, + "succeeded": InferenceStatus.COMPLETED.value, + "failed": InferenceStatus.FAILED.value, + "canceled": InferenceStatus.CANCELED.value +} \ No newline at end of file diff --git a/utils/ml_processor/replicate/replicate.py b/utils/ml_processor/replicate/replicate.py index 69681d9b..acd660e3 100644 --- a/utils/ml_processor/replicate/replicate.py +++ b/utils/ml_processor/replicate/replicate.py @@ -1,7 +1,10 @@ import asyncio import io import time +from shared.constants import InferenceParamType from shared.file_upload.s3 import upload_file +from shared.logging.constants import LoggingType +from shared.logging.logging import AppLogger from utils.constants import MLQueryObject from utils.data_repo.data_repo import DataRepo from utils.ml_processor.ml_interface import MachineLearningProcessor @@ -22,7 +25,7 @@ def __init__(self): data_repo = DataRepo() self.app_settings = data_repo.get_app_secrets_from_user_uuid() - self.logger = None + self.logger = AppLogger() try: os.environ["REPLICATE_API_TOKEN"] = self.app_settings['replicate_key'] except Exception as e: @@ -51,10 +54,10 @@ def get_model_by_name(self, model_name, model_version=None): return model_version # it converts the standardized query_obj into params required by replicate - def predict_model_output_standardized(self, model: ReplicateModel, query_obj: MLQueryObject): + def predict_model_output_standardized(self, model: ReplicateModel, query_obj: MLQueryObject, queue_inference=False): params = get_model_params_from_query_obj(model, query_obj) - params['query_dict'] = query_obj.to_json() - return self.predict_model_output(model, **params) + params[InferenceParamType.QUERY_DICT.value] = query_obj.to_json() + return self.predict_model_output(model, **params) if not queue_inference else self.queue_prediction(model, **params) @check_user_credits def predict_model_output(self, replicate_model: ReplicateModel, **kwargs): @@ -239,13 +242,42 @@ def get_model_version_from_id(self, model_id): return version @check_user_credits - def create_prediction(self, replicate_model: ReplicateModel, **kwargs): - model_version = self.get_model(replicate_model) + def queue_prediction(self, replicate_model: ReplicateModel, **kwargs): + url = "https://api.replicate.com/v1/predictions" headers = { "Authorization": "Token " + os.environ.get("REPLICATE_API_TOKEN"), "Content-Type": "application/json" } - response = r.post(self.dreambooth_training_url, headers=headers, data=json.dumps(kwargs)) + del kwargs['query_dict'] + data = { + "version": replicate_model.version, + "input": dict(kwargs) + } + + if 'image' in data['input'] and not isinstance(data['input']['image'], str): + response = r.post(self.training_data_upload_url, headers=headers) + if response.status_code != 200: + raise Exception(str(response.content)) + upload_url = response.json()["upload_url"] + serving_url = response.json()["serving_url"] + r.put(upload_url, data=data['input']['image'], headers=headers) + data['input']['image'] = serving_url + + response = r.post(url, headers=headers, json=data) response = (response.json()) + if response.status_code == 200: + result = response.json() + data = { + "prediction_id": result['id'], + "error": result['error'], + "status": result['status'], + "created_at": result['created_at'], + "urls": result['urls'], # these contain "cancel" and "get" urls + } + + log = log_model_inference(replicate_model, None, **kwargs) + return None, log + else: + self.logger.log(LoggingType.ERROR, f"Error in creating prediction: {response.content}") \ No newline at end of file diff --git a/utils/ml_processor/replicate/utils.py b/utils/ml_processor/replicate/utils.py index fb9addd4..2613f3dc 100644 --- a/utils/ml_processor/replicate/utils.py +++ b/utils/ml_processor/replicate/utils.py @@ -236,6 +236,25 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): } else: + app_settings = data_repo.get_app_settings() data = query_obj.to_json() + # hackish sol: handling custom dreambooth models + if app_settings.replicate_username in model.name: + data = { + "image": input_image, + "prompt": query_obj.prompt, + "prompt_strength": query_obj.strength, + "height": query_obj.height, + "width": query_obj.width, + "disable_safety_check": True, + "negative_prompt": query_obj.negative_prompt, + "guidance_scale": query_obj.guidance_scale, + "seed": -1, + "num_inference_steps": query_obj.num_inference_steps + } + + if input_image: + data['control_image'] = input_image + return data \ No newline at end of file From 61f238c43854f36b664626c85eb798a4fa5e12e0 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Tue, 10 Oct 2023 13:57:50 +0530 Subject: [PATCH 055/164] wip: queue prediction --- app.py | 10 +++++----- backend/serializers/dao.py | 1 + banodoco_runner.py | 8 ++++---- ui_components/methods/common_methods.py | 1 + ui_components/methods/data_logger.py | 2 +- ui_components/methods/file_methods.py | 19 ++++++++++++++++++- utils/data_repo/data_repo.py | 6 +++--- utils/ml_processor/replicate/replicate.py | 23 ++++++++++++++--------- 8 files changed, 47 insertions(+), 23 deletions(-) diff --git a/app.py b/app.py index 7fe9df48..2032bbb4 100644 --- a/app.py +++ b/app.py @@ -34,11 +34,11 @@ # SENTRY_ENV = ssm.get_parameter(Name='/banodoco-fe/sentry/environment')['Parameter']['Value'] # SENTRY_DSN = ssm.get_parameter(Name='/banodoco-fe/sentry/dsn')['Parameter']['Value'] -sentry_sdk.init( - environment=SENTRY_ENV, - dsn=SENTRY_DSN, - traces_sample_rate=0 -) +# sentry_sdk.init( +# environment=SENTRY_ENV, +# dsn=SENTRY_DSN, +# traces_sample_rate=0 +# ) def start_runner(): if SERVER != ServerType.DEVELOPMENT.value: diff --git a/backend/serializers/dao.py b/backend/serializers/dao.py index 218519c9..2c3bf6fd 100644 --- a/backend/serializers/dao.py +++ b/backend/serializers/dao.py @@ -65,6 +65,7 @@ class CreateInferenceLogDao(serializers.Serializer): input_params = serializers.CharField(required=False) output_details = serializers.CharField(required=False) total_inference_time = serializers.CharField(required=False) + status = serializers.CharField(required=False, default="") class CreateAIModelParamMapDao(serializers.Serializer): diff --git a/banodoco_runner.py b/banodoco_runner.py index 80081c56..0cf3b3ab 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -66,20 +66,20 @@ def check_and_update_db(): input_params = json.loads(log.input_params) replicate_data = input_params.get(InferenceParamType.REPLICATE_INFERENCE.value, None) if replicate_data: - prediction_id = replicate_data['id'] + prediction_id = replicate_data['prediction_id'] url = "https://api.replicate.com/v1/predictions/" + prediction_id headers = { "Authorization": f"Token {replicate_key}" } response = requests.get(url, headers=headers) - if response.status_code == 200: + if response.status_code in [200, 201]: result = response.json() log_status = replicate_status_map[result['status']] if result['status'] in replicate_status_map else InferenceStatus.IN_PROGRESS.value - output_details['output'] = result['output'] + output_details = json.loads(log.output_details) if log_status == InferenceStatus.COMPLETED.value: - output_details = json.loads(log.output_details) + output_details['output'] = result['output'] InferenceLog.objects.filter(id=log.id).update(status=log_status, output_details=json.dumps(output_details)) else: diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 15181f77..871d834e 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -1316,4 +1316,5 @@ def process_inference_output(**kwargs): print("No new generation to promote") else: log = kwargs.get('log') + del kwargs['log'] data_repo.update_inference_log_origin_data(log.uuid, **kwargs) \ No newline at end of file diff --git a/ui_components/methods/data_logger.py b/ui_components/methods/data_logger.py index 329d3d6e..72f4ffcb 100644 --- a/ui_components/methods/data_logger.py +++ b/ui_components/methods/data_logger.py @@ -18,7 +18,7 @@ def log_model_inference(model: ReplicateModel, time_taken, **kwargs): del kwargs_dict[key] data_str = json.dumps(kwargs_dict) - time_taken = round(time_taken, 2) if time_taken else None + time_taken = round(time_taken, 2) if time_taken else 0 data = { 'model_name': model.name, diff --git a/ui_components/methods/file_methods.py b/ui_components/methods/file_methods.py index 39b7fcce..da19f536 100644 --- a/ui_components/methods/file_methods.py +++ b/ui_components/methods/file_methods.py @@ -1,7 +1,9 @@ +import base64 from io import BytesIO import io import json import os +import mimetypes import tempfile from typing import Union from urllib.parse import urlparse @@ -155,4 +157,19 @@ def convert_bytes_to_file(file_location_to_save, mime_type, file_bytes, project_ file = data_repo.create_file(**file_data) - return file \ No newline at end of file + return file + + +def convert_file_to_base64(fh: io.IOBase) -> str: + fh.seek(0) + + b = fh.read() + if isinstance(b, str): + b = b.encode("utf-8") + encoded_body = base64.b64encode(b) + if getattr(fh, "name", None): + mime_type = mimetypes.guess_type(fh.name)[0] # type: ignore + else: + mime_type = "application/octet-stream" + s = encoded_body.decode("utf-8") + return f"data:{mime_type};base64,{s}" \ No newline at end of file diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 502e1717..64174157 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -211,10 +211,10 @@ def update_inference_log_origin_data(self, uuid, **kwargs): return False input_params_data = json.loads(res.input_params) - input_params_data[InferenceParamType.ORIGIN_DATA.value] = kwargs[InferenceParamType.ORIGIN_DATA.value] + input_params_data[InferenceParamType.ORIGIN_DATA.value] = dict(kwargs) - res = self.update_inference_log(uuid, input_params=json.dumps(input_params_data)) - return res.status + status = self.update_inference_log(uuid, input_params=json.dumps(input_params_data)) + return status # ai model param map diff --git a/utils/ml_processor/replicate/replicate.py b/utils/ml_processor/replicate/replicate.py index acd660e3..a893df3f 100644 --- a/utils/ml_processor/replicate/replicate.py +++ b/utils/ml_processor/replicate/replicate.py @@ -1,10 +1,12 @@ import asyncio import io import time +import uuid from shared.constants import InferenceParamType from shared.file_upload.s3 import upload_file from shared.logging.constants import LoggingType from shared.logging.logging import AppLogger +from ui_components.methods.file_methods import convert_file_to_base64 from utils.constants import MLQueryObject from utils.data_repo.data_repo import DataRepo from utils.ml_processor.ml_interface import MachineLearningProcessor @@ -250,24 +252,25 @@ def queue_prediction(self, replicate_model: ReplicateModel, **kwargs): } del kwargs['query_dict'] + keys_to_delete = [] + for k, _ in kwargs.items(): + if kwargs[k] == None: + keys_to_delete.append(k) + + for k in keys_to_delete: + del kwargs[k] + data = { "version": replicate_model.version, "input": dict(kwargs) } if 'image' in data['input'] and not isinstance(data['input']['image'], str): - response = r.post(self.training_data_upload_url, headers=headers) - if response.status_code != 200: - raise Exception(str(response.content)) - upload_url = response.json()["upload_url"] - serving_url = response.json()["serving_url"] - r.put(upload_url, data=data['input']['image'], headers=headers) - data['input']['image'] = serving_url + data['input']['image'] = convert_file_to_base64(data['input']['image']) response = r.post(url, headers=headers, json=data) - response = (response.json()) - if response.status_code == 200: + if response.status_code in [200, 201]: result = response.json() data = { "prediction_id": result['id'], @@ -277,6 +280,8 @@ def queue_prediction(self, replicate_model: ReplicateModel, **kwargs): "urls": result['urls'], # these contain "cancel" and "get" urls } + kwargs[InferenceParamType.REPLICATE_INFERENCE.value] = data + log = log_model_inference(replicate_model, None, **kwargs) return None, log else: From fabcf6ecdafd4a8fdc9f978a490f29138bc4e274 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Tue, 10 Oct 2023 17:05:37 +0530 Subject: [PATCH 056/164] wip: queue prediction --- backend/serializers/dto.py | 3 +- banodoco_runner.py | 22 +++++++++--- banodoco_settings.py | 2 -- shared/constants.py | 3 +- ui_components/components/app_settings_page.py | 13 +------ .../components/frame_styling_page.py | 35 +++++++++++++++---- ui_components/methods/common_methods.py | 11 ++++-- ui_components/methods/ml_methods.py | 2 +- utils/encryption.py | 2 +- 9 files changed, 62 insertions(+), 31 deletions(-) diff --git a/backend/serializers/dto.py b/backend/serializers/dto.py index 6120fb36..5513739e 100644 --- a/backend/serializers/dto.py +++ b/backend/serializers/dto.py @@ -60,7 +60,8 @@ class Meta: "input_params", "output_details", "total_inference_time", - "created_on" + "created_on", + "status" ) diff --git a/banodoco_runner.py b/banodoco_runner.py index 0cf3b3ab..c11c4c1e 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -20,11 +20,14 @@ django.setup() SERVER = os.getenv('SERVER', 'development') +REFRESH_FREQUENCY = 2 # refresh every 2 seconds +MAX_APP_RETRY_CHECK = 3 # if the app is not running after 3 retries then the script will stop + def main(): if SERVER != 'development': return - retries = 3 + retries = MAX_APP_RETRY_CHECK print('runner running') while True: @@ -34,9 +37,9 @@ def main(): return retries -= 1 else: - retries = min(retries + 1, 3) + retries = min(retries + 1, MAX_APP_RETRY_CHECK) - time.sleep(1) + time.sleep(REFRESH_FREQUENCY) check_and_update_db() def is_app_running(): @@ -58,7 +61,7 @@ def check_and_update_db(): app_logger = AppLogger() app_setting = AppSetting.objects.filter(is_disabled=False).first() - replicate_key = app_setting.replicate_key + replicate_key = app_setting.replicate_key_decrypted log_list = InferenceLog.objects.filter(status__in=[InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value], is_disabled=False).all() @@ -72,6 +75,7 @@ def check_and_update_db(): headers = { "Authorization": f"Token {replicate_key}" } + print("replicate key: ", replicate_key) response = requests.get(url, headers=headers) if response.status_code in [200, 201]: result = response.json() @@ -79,11 +83,19 @@ def check_and_update_db(): output_details = json.loads(log.output_details) if log_status == InferenceStatus.COMPLETED.value: - output_details['output'] = result['output'] + output_details['output'] = [result['output'][-1]] if output_details['version'] == \ + "a4a8bafd6089e1716b06057c42b19378250d008b80fe87caa5cd36d40c1eda90" else result['output'] InferenceLog.objects.filter(id=log.id).update(status=log_status, output_details=json.dumps(output_details)) else: app_logger.log(LoggingType.DEBUG, f"Error: {response.content}") + else: + # if not replicate data is present then removing the status + InferenceLog.objects.filter(id=log.id).update(status="") + + if not len(log_list): + app_logger.log(LoggingType.DEBUG, f"No logs found") + return main() \ No newline at end of file diff --git a/banodoco_settings.py b/banodoco_settings.py index 55e3c8be..4cda3c09 100644 --- a/banodoco_settings.py +++ b/banodoco_settings.py @@ -15,8 +15,6 @@ from utils.data_repo.data_repo import DataRepo from utils.ml_processor.replicate.constants import REPLICATE_MODEL -ENCRYPTION_KEY = 'J2684nBgNUYa_K0a6oBr5H8MpSRW0EJ52Qmq7jExE-w=' - logger = AppLogger() def project_init(): diff --git a/shared/constants.py b/shared/constants.py index 2e5a18f4..a3293803 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -97,4 +97,5 @@ class InferenceParamType(ExtendedEnum): AWS_S3_REGION = 'ap-south-1' # TODO: discuss this OFFLINE_MODE = os.getenv('OFFLINE_MODE', False) # for picking up secrets and file storage -LOCAL_DATABASE_NAME = 'banodoco_local.db' \ No newline at end of file +LOCAL_DATABASE_NAME = 'banodoco_local.db' +ENCRYPTION_KEY = 'J2684nBgNUYa_K0a6oBr5H8MpSRW0EJ52Qmq7jExE-w=' \ No newline at end of file diff --git a/ui_components/components/app_settings_page.py b/ui_components/components/app_settings_page.py index d4a1c54d..4262ace1 100644 --- a/ui_components/components/app_settings_page.py +++ b/ui_components/components/app_settings_page.py @@ -7,9 +7,7 @@ def app_settings_page(): - # TODO: automatically pick the current user for fetching related details data_repo = DataRepo() - app_settings = data_repo.get_app_setting_from_uuid() app_secrets = data_repo.get_app_secrets_from_user_uuid() if SERVER == ServerType.DEVELOPMENT.value: @@ -38,13 +36,4 @@ def app_settings_page(): if st.button("Generate payment link"): payment_link = data_repo.generate_payment_link(credits) payment_link = f""" PAYMENT LINK """ - st.markdown(payment_link, unsafe_allow_html=True) - - - # locally_or_hosted = st.radio("Do you want to store your files locally or on AWS?", ("Locally", "AWS"),disabled=True, help="Only local storage is available at the moment, let me know if you need AWS storage - it should be pretty easy.") - - # if locally_or_hosted == "AWS": - # with st.expander("AWS API Keys:"): - # aws_access_key_id = st.text_input("aws_access_key_id", value = app_settings["aws_access_key_id"]) - # aws_secret_access_key = st.text_input("aws_secret_access_key", value = app_settings["aws_secret_access_key"]) - \ No newline at end of file + st.markdown(payment_link, unsafe_allow_html=True) \ No newline at end of file diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 6ca07295..17093cd0 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -1,8 +1,9 @@ +import json import streamlit as st -from shared.constants import InferenceStatus, ViewType +from shared.constants import InferenceParamType, InferenceStatus, ViewType -from ui_components.methods.common_methods import add_key_frame,compare_to_previous_and_next_frame,compare_to_source_frame,style_cloning_element +from ui_components.methods.common_methods import add_key_frame,compare_to_previous_and_next_frame,compare_to_source_frame, process_inference_output,style_cloning_element from ui_components.methods.ml_methods import trigger_restyling_process from ui_components.widgets.cropping_element import cropping_selector_element from ui_components.widgets.frame_clip_generation_elements import current_preview_video_element, update_animation_style_element @@ -247,8 +248,8 @@ def frame_styling_page(mainheader2, project_uuid: str): for log in log_list: if not log.status: continue - - c1, c2, c3 = st.columns([1, 1, 1]) + + c1, c2, c3, c4 = st.columns([1, 1, 1, 1]) with c1: st.write(log.uuid) @@ -256,8 +257,30 @@ def frame_styling_page(mainheader2, project_uuid: str): st.write(log.status) with c3: + output_data = json.loads(log.output_details) + if 'output' in output_data and output_data['output']: + if isinstance(output_data['output'], list): + output_data['output'] = output_data['output'][0] + + if output_data['output'].endswith('png'): + st.image(output_data['output']) + elif output_data['output'].endswith('mp4') or output_data['output'].endswith('gif'): + st.video(output_data['output']) + else: + st.write("No data to display") + else: + st.write("No data to display") + + with c4: if log.status == InferenceStatus.COMPLETED.value: - with st.button("Add to project"): - print("add to project") + output_data = json.loads(log.output_details) + if output_data and ('output' in output_data and output_data['output']): + if st.button("Add to project", key=str(log.uuid)): + origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) + if origin_data: + process_inference_output(**origin_data) + else: + if st.button("Data expired", key=str(log.uuid)): + pass diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 871d834e..49354c08 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -1289,13 +1289,20 @@ def execute_image_edit(type_of_mask_selection, type_of_mask_replacement, return edited_image -# if the output_file is present it adds it to the respective place or else it updates the inference log +# if the output is present it adds it to the respective place or else it updates the inference log def process_inference_output(**kwargs): data_repo = DataRepo() inference_type = kwargs.get('inference_type') if inference_type == InferenceType.FRAME_TIMING_IMAGE_INFERENCE.value: - output_file = kwargs.get('output_file') + output = kwargs.get('output') + filename = str(uuid.uuid4()) + ".png" + output_file = data_repo.create_file( + name=filename, + type=InternalFileType.IMAGE.value, + hosted_url=output[0], + inference_log_id=log.uuid + ) if output_file: timing_uuid = kwargs.get('timing_uuid') diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index 0ff7a137..185871a1 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -61,7 +61,7 @@ def trigger_restyling_process(timing_uuid, update_inference_settings, \ inference_data = { "inference_type": InferenceType.FRAME_TIMING_IMAGE_INFERENCE.value, - "output_file": output, + "output": output, "log": log, "timing_uuid": timing_uuid, "promote_new_generation": promote_new_generation, diff --git a/utils/encryption.py b/utils/encryption.py index 812c2f3c..95bd173f 100644 --- a/utils/encryption.py +++ b/utils/encryption.py @@ -1,6 +1,6 @@ from cryptography.fernet import Fernet +from shared.constants import ENCRYPTION_KEY -from banodoco_settings import ENCRYPTION_KEY class Encryptor: def __init__(self): From 86b2f3fad1248a62cbefd5b8e99a02987b9796db Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Tue, 10 Oct 2023 18:37:42 +0530 Subject: [PATCH 057/164] queue prediction fixes --- shared/utils.py | 10 +++++++- .../components/frame_styling_page.py | 25 ++++++++++++------- ui_components/methods/common_methods.py | 2 ++ 3 files changed, 27 insertions(+), 10 deletions(-) diff --git a/shared/utils.py b/shared/utils.py index 30d462be..f4d93a5d 100644 --- a/shared/utils.py +++ b/shared/utils.py @@ -1,3 +1,4 @@ +import requests from shared.constants import InternalResponse import urllib.parse @@ -11,4 +12,11 @@ def execute_shell_command(command: str): def is_online_file_path(file_path): parsed = urllib.parse.urlparse(file_path) - return parsed.scheme in ('http', 'https', 'ftp') \ No newline at end of file + return parsed.scheme in ('http', 'https', 'ftp') + +def is_url_valid(url): + try: + response = requests.head(url) + return response.status_code in [200, 201] + except Exception as e: + return False diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 17093cd0..7a84a6f8 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -1,6 +1,7 @@ import json import streamlit as st from shared.constants import InferenceParamType, InferenceStatus, ViewType +from shared.utils import is_url_valid from ui_components.methods.common_methods import add_key_frame,compare_to_previous_and_next_frame,compare_to_source_frame, process_inference_output,style_cloning_element @@ -245,8 +246,10 @@ def frame_styling_page(mainheader2, project_uuid: str): # ------- change this ---------- elif st.session_state['frame_styling_view_type'] == "Log List": log_list = data_repo.get_all_inference_log_list(project_uuid) + for log in log_list: - if not log.status: + origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) + if not log.status or not origin_data: continue c1, c2, c3, c4 = st.columns([1, 1, 1, 1]) @@ -258,7 +261,7 @@ def frame_styling_page(mainheader2, project_uuid: str): with c3: output_data = json.loads(log.output_details) - if 'output' in output_data and output_data['output']: + if 'output' in output_data and output_data['output'] and is_url_valid(output_data['output'][0]): if isinstance(output_data['output'], list): output_data['output'] = output_data['output'][0] @@ -274,13 +277,17 @@ def frame_styling_page(mainheader2, project_uuid: str): with c4: if log.status == InferenceStatus.COMPLETED.value: output_data = json.loads(log.output_details) - if output_data and ('output' in output_data and output_data['output']): + if output_data and ('output' in output_data and output_data['output'] and is_url_valid(output_data['output'][0])): if st.button("Add to project", key=str(log.uuid)): - origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) - if origin_data: - process_inference_output(**origin_data) + origin_data['output'] = output_data['output'] + origin_data['log'] = log + process_inference_output(**origin_data) + + # delete origin data (doing this will remove the log from the list) + input_params = json.loads(log.input_params) + del input_params[InferenceParamType.ORIGIN_DATA.value] + data_repo.update_inference_log(log.uuid, input_params=json.dumps(input_params)) + st.rerun() else: - if st.button("Data expired", key=str(log.uuid)): - pass - + st.write("Data expired") diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 49354c08..a71d150c 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -1290,6 +1290,7 @@ def execute_image_edit(type_of_mask_selection, type_of_mask_replacement, # if the output is present it adds it to the respective place or else it updates the inference log +# TODO: handle cases when the origin frame has been moved or deleted def process_inference_output(**kwargs): data_repo = DataRepo() @@ -1297,6 +1298,7 @@ def process_inference_output(**kwargs): if inference_type == InferenceType.FRAME_TIMING_IMAGE_INFERENCE.value: output = kwargs.get('output') filename = str(uuid.uuid4()) + ".png" + log = kwargs.get('log') output_file = data_repo.create_file( name=filename, type=InternalFileType.IMAGE.value, From 444f4a2100468db87e88ef67daac21434964d7e8 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Tue, 10 Oct 2023 20:42:47 +0530 Subject: [PATCH 058/164] inference fixes --- banodoco_runner.py | 3 ++- shared/constants.py | 4 +++- ui_components/components/frame_styling_page.py | 3 ++- ui_components/methods/ml_methods.py | 9 +++++---- ui_components/widgets/styling_element.py | 4 ++++ utils/ml_processor/replicate/replicate.py | 10 ++++++++++ 6 files changed, 26 insertions(+), 7 deletions(-) diff --git a/banodoco_runner.py b/banodoco_runner.py index c11c4c1e..75056a9c 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -94,7 +94,8 @@ def check_and_update_db(): InferenceLog.objects.filter(id=log.id).update(status="") if not len(log_list): - app_logger.log(LoggingType.DEBUG, f"No logs found") + # app_logger.log(LoggingType.DEBUG, f"No logs found") + pass return diff --git a/shared/constants.py b/shared/constants.py index a3293803..1417be78 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -98,4 +98,6 @@ class InferenceParamType(ExtendedEnum): OFFLINE_MODE = os.getenv('OFFLINE_MODE', False) # for picking up secrets and file storage LOCAL_DATABASE_NAME = 'banodoco_local.db' -ENCRYPTION_KEY = 'J2684nBgNUYa_K0a6oBr5H8MpSRW0EJ52Qmq7jExE-w=' \ No newline at end of file +ENCRYPTION_KEY = 'J2684nBgNUYa_K0a6oBr5H8MpSRW0EJ52Qmq7jExE-w=' + +QUEUE_INFERENCE_QUERIES = False \ No newline at end of file diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 7a84a6f8..f822e0b7 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -24,7 +24,6 @@ import math from ui_components.constants import CreativeProcessType, WorkflowStageType -from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo @@ -152,6 +151,7 @@ def frame_styling_page(mainheader2, project_uuid: str): custom_models=st.session_state['custom_models'], adapter_type=st.session_state['adapter_type'], update_inference_settings=True, + add_image_in_params=st.session_state['add_image_in_params'], low_threshold=st.session_state['low_threshold'], high_threshold=st.session_state['high_threshold'], canny_image=st.session_state['canny_image'] if 'canny_image' in st.session_state else None, @@ -245,6 +245,7 @@ def frame_styling_page(mainheader2, project_uuid: str): # ------- change this ---------- elif st.session_state['frame_styling_view_type'] == "Log List": + # TODO: add filtering/pagination when fetching log list log_list = data_repo.get_all_inference_log_list(project_uuid) for log in log_list: diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index 185871a1..4eee2463 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -8,7 +8,7 @@ import uuid import urllib from backend.models import InternalFileObject -from shared.constants import SERVER, AIModelCategory, InferenceType, InternalFileType, ServerType +from shared.constants import QUEUE_INFERENCE_QUERIES, SERVER, AIModelCategory, InferenceType, InternalFileType, ServerType from ui_components.constants import MASK_IMG_LOCAL_PATH, TEMP_MASK_FILE from ui_components.methods.common_methods import process_inference_output from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject @@ -30,7 +30,7 @@ def trigger_restyling_process(timing_uuid, update_inference_settings, \ query_obj = MLQueryObject( timing_uuid, - image_uuid=source_image.uuid, + image_uuid=source_image.uuid if 'add_image_in_params' in kwargs and kwargs['add_image_in_params'] else None, width=project_settings.width, height=project_settings.height, **kwargs @@ -53,11 +53,12 @@ def trigger_restyling_process(timing_uuid, update_inference_settings, \ default_custom_models=query_obj.data.get('custom_models', []), default_adapter_type=query_obj.adapter_type, default_low_threshold=query_obj.low_threshold, - default_high_threshold=query_obj.high_threshold + default_high_threshold=query_obj.high_threshold, + add_image_in_params=st.session_state['add_image_in_params'], ) query_obj.prompt = dynamic_prompting(prompt, source_image) - output, log = restyle_images(query_obj, True) + output, log = restyle_images(query_obj, QUEUE_INFERENCE_QUERIES) inference_data = { "inference_type": InferenceType.FRAME_TIMING_IMAGE_INFERENCE.value, diff --git a/ui_components/widgets/styling_element.py b/ui_components/widgets/styling_element.py index e0b8d5cb..287c3b5c 100644 --- a/ui_components/widgets/styling_element.py +++ b/ui_components/widgets/styling_element.py @@ -66,8 +66,11 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): # -------------------- Model Selection -------------------- # if st.session_state["transformation_stage"] != ImageStage.NONE.value: + st.session_state['add_image_in_params'] = True model_list = data_repo.get_all_ai_model_list(model_type_list=[AIModelType.IMG2IMG.value], custom_trained=False) else: + # when NONE is selected then removing input image from the session state + st.session_state['add_image_in_params'] = False model_list = data_repo.get_all_ai_model_list(model_type_list=[AIModelType.TXT2IMG.value], custom_trained=False) model_name_list = [m.name for m in model_list] @@ -429,6 +432,7 @@ def styling_element(timing_uuid, view_type=ViewType.SINGLE.value): low_threshold=st.session_state['low_threshold'], high_threshold=st.session_state['high_threshold'], canny_image=st.session_state['canny_image'] if 'canny_image' in st.session_state else None, + add_image_in_params=st.session_state['add_image_in_params'], ) st.rerun() diff --git a/utils/ml_processor/replicate/replicate.py b/utils/ml_processor/replicate/replicate.py index a893df3f..e65dd3dc 100644 --- a/utils/ml_processor/replicate/replicate.py +++ b/utils/ml_processor/replicate/replicate.py @@ -64,6 +64,16 @@ def predict_model_output_standardized(self, model: ReplicateModel, query_obj: ML @check_user_credits def predict_model_output(self, replicate_model: ReplicateModel, **kwargs): model_version = self.get_model(replicate_model) + + del kwargs['query_dict'] + keys_to_delete = [] + for k, _ in kwargs.items(): + if kwargs[k] == None: + keys_to_delete.append(k) + + for k in keys_to_delete: + del kwargs[k] + start_time = time.time() output = model_version.predict(**kwargs) end_time = time.time() From 91a8f4b5ca9af1c37187bef3124e7ac0ed4f0dea Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Tue, 10 Oct 2023 22:45:59 +0530 Subject: [PATCH 059/164] timing delete fixed --- .../components/frame_styling_page.py | 17 +++++++---- ui_components/methods/common_methods.py | 28 +++++++++++-------- 2 files changed, 27 insertions(+), 18 deletions(-) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index f822e0b7..1a8d01bc 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -1,4 +1,5 @@ import json +import time import streamlit as st from shared.constants import InferenceParamType, InferenceStatus, ViewType from shared.utils import is_url_valid @@ -282,12 +283,16 @@ def frame_styling_page(mainheader2, project_uuid: str): if st.button("Add to project", key=str(log.uuid)): origin_data['output'] = output_data['output'] origin_data['log'] = log - process_inference_output(**origin_data) - - # delete origin data (doing this will remove the log from the list) - input_params = json.loads(log.input_params) - del input_params[InferenceParamType.ORIGIN_DATA.value] - data_repo.update_inference_log(log.uuid, input_params=json.dumps(input_params)) + status = process_inference_output(**origin_data) + + if status: + # delete origin data (doing this will remove the log from the list) + input_params = json.loads(log.input_params) + del input_params[InferenceParamType.ORIGIN_DATA.value] + data_repo.update_inference_log(log.uuid, input_params=json.dumps(input_params)) + else: + st.write("Failed to add to project, timing deleted") + time.sleep(1) st.rerun() else: st.write("Data expired") diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index a71d150c..fb172d8d 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -1310,20 +1310,24 @@ def process_inference_output(**kwargs): timing_uuid = kwargs.get('timing_uuid') promote_new_generation = kwargs.get('promote_new_generation') - if output_file != None: - add_image_variant(output_file.uuid, timing_uuid) - - if promote_new_generation == True: - timing = data_repo.get_timing_from_uuid(timing_uuid) - variants = timing.alternative_images_list - number_of_variants = len(variants) - if number_of_variants == 1: - print("No new generation to promote") - else: - promote_image_variant(timing_uuid, number_of_variants - 1) + timing = data_repo.get_timing_from_uuid(timing_uuid) + if not timing: + return False + + add_image_variant(output_file.uuid, timing_uuid) + if promote_new_generation == True: + timing = data_repo.get_timing_from_uuid(timing_uuid) + variants = timing.alternative_images_list + number_of_variants = len(variants) + if number_of_variants == 1: + print("No new generation to promote") + else: + promote_image_variant(timing_uuid, number_of_variants - 1) else: print("No new generation to promote") else: log = kwargs.get('log') del kwargs['log'] - data_repo.update_inference_log_origin_data(log.uuid, **kwargs) \ No newline at end of file + data_repo.update_inference_log_origin_data(log.uuid, **kwargs) + + return True \ No newline at end of file From dd5273be040eff1eab302921bc409f3cd0c5b31e Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 11 Oct 2023 16:09:53 +0530 Subject: [PATCH 060/164] wip: queuing inference for multi video --- shared/constants.py | 2 + .../components/frame_styling_page.py | 2 +- ui_components/methods/common_methods.py | 91 +++++++++++++++++-- ui_components/methods/video_methods.py | 56 +++++------- utils/media_processor/interpolator.py | 18 ++-- 5 files changed, 120 insertions(+), 49 deletions(-) diff --git a/shared/constants.py b/shared/constants.py index 1417be78..a38d6961 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -76,6 +76,8 @@ class ViewType(ExtendedEnum): class InferenceType(ExtendedEnum): FRAME_TIMING_IMAGE_INFERENCE = "frame_timing_inference" # for generating variants of a frame FRAME_TIMING_VIDEO_INFERENCE = "frame_timing_video_inference" # for generating variants of a video + SINGLE_PREVIEW_VIDEO = "single_preview_video" # for generating a single preview video + FRAME_INTERPOLATION = "frame_interpolation" # for generating single/multiple interpolated videos class InferenceStatus(ExtendedEnum): QUEUED = "queued" diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 1a8d01bc..8a4cb9b2 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -282,7 +282,7 @@ def frame_styling_page(mainheader2, project_uuid: str): if output_data and ('output' in output_data and output_data['output'] and is_url_valid(output_data['output'][0])): if st.button("Add to project", key=str(log.uuid)): origin_data['output'] = output_data['output'] - origin_data['log'] = log + origin_data['log_uuid'] = log.uuid status = process_inference_output(**origin_data) if status: diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index fb172d8d..511a6c40 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -18,8 +18,8 @@ from pydub import AudioSegment from backend.models import InternalFileObject from ui_components.constants import CROPPED_IMG_LOCAL_PATH, MASK_IMG_LOCAL_PATH, SECOND_MASK_FILE, SECOND_MASK_FILE_PATH, TEMP_MASK_FILE, CreativeProcessType, WorkflowStageType -from ui_components.methods.file_methods import add_temp_file_to_project, generate_pil_image, save_or_host_file, save_or_host_file_bytes -from ui_components.methods.video_methods import calculate_desired_duration_of_individual_clip, create_or_get_single_preview_video +from ui_components.methods.file_methods import add_temp_file_to_project, convert_bytes_to_file, generate_pil_image, generate_temp_file, save_or_host_file, save_or_host_file_bytes +from ui_components.methods.video_methods import calculate_desired_duration_of_individual_clip, create_or_get_single_preview_video, update_speed_of_video_clip from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject from utils.common_utils import reset_styling_settings from utils.constants import ImageStage @@ -1290,15 +1290,16 @@ def execute_image_edit(type_of_mask_selection, type_of_mask_replacement, # if the output is present it adds it to the respective place or else it updates the inference log -# TODO: handle cases when the origin frame has been moved or deleted def process_inference_output(**kwargs): data_repo = DataRepo() inference_type = kwargs.get('inference_type') + # ------------------- FRAME TIMING IMAGE INFERENCE ------------------- if inference_type == InferenceType.FRAME_TIMING_IMAGE_INFERENCE.value: output = kwargs.get('output') filename = str(uuid.uuid4()) + ".png" - log = kwargs.get('log') + log_uuid = kwargs.get('log_uuid') + log = data_repo.get_inference_log_from_uuid(log_uuid) output_file = data_repo.create_file( name=filename, type=InternalFileType.IMAGE.value, @@ -1326,8 +1327,84 @@ def process_inference_output(**kwargs): else: print("No new generation to promote") else: - log = kwargs.get('log') - del kwargs['log'] - data_repo.update_inference_log_origin_data(log.uuid, **kwargs) + log_uuid = kwargs.get('log_uuid') + del kwargs['log_uuid'] + data_repo.update_inference_log_origin_data(log_uuid, **kwargs) + + # --------------------- SINGLE PREVIEW VIDEO INFERENCE ------------------- + elif inference_type == InferenceType.SINGLE_PREVIEW_VIDEO.value: + output = kwargs.get('output') + file_bytes = None + if isinstance(output, str) and output.startswith('http'): + temp_output_file = generate_temp_file(output, '.mp4') + file_bytes = None + with open(temp_output_file.name, 'rb') as f: + file_bytes = f.read() + + os.remove(temp_output_file.name) + + if file_bytes: + file_data = { + "file_location_to_save": kwargs.get('file_location_to_save'), + "mime_type": kwargs.get('mime_type'), + "file_bytes": file_bytes, + "project_uuid": kwargs.get('project_uuid'), + "inference_log_id": kwargs.get('log_uuid') + } + + timing_uuid = kwargs.get('timing_uuid') + timing = data_repo.get_timing_from_uuid(timing_uuid) + if not timing: + return False + + video_fie = convert_bytes_to_file(**file_data) + data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video_fie.uuid) + + else: + log_uuid = kwargs.get('log_uuid') + del kwargs['log_uuid'] + data_repo.update_inference_log_origin_data(log_uuid, **kwargs) + + # --------------------- MULTI VIDEO INFERENCE (INTERPOLATION + MORPHING) ------------------- + elif inference_type == InferenceType.FRAME_INTERPOLATION.value: + output = kwargs.get('output') + + if output: + settings = kwargs.get('settings') + timing_uuid = kwargs.get('timing_uuid') + timing = data_repo.get_timing_from_uuid(timing_uuid) + if not timing: + return False + + # output can also be an url + if isinstance(output, str) and output.startswith("http"): + temp_output_file = generate_temp_file(output, '.mp4') + output = None + with open(temp_output_file.name, 'rb') as f: + output = f.read() + + os.remove(temp_output_file.name) + + if 'normalise_speed' in settings and settings['normalise_speed']: + output = VideoProcessor.update_video_bytes_speed(output, timing.animation_style, timing.clip_duration) + + video_location = "videos/" + str(timing.project.uuid) + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" + video = convert_bytes_to_file( + file_location_to_save=video_location, + mime_type="video/mp4", + file_bytes=output, + project_uuid=timing.project.uuid, + inference_log_id=log.uuid + ) + + data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video.uuid) + if not timing.timed_clip: + output_video = update_speed_of_video_clip(video, timing_uuid) + data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) + + else: + log_uuid = kwargs.get('log_uuid') + del kwargs['log_uuid'] + data_repo.update_inference_log_origin_data(log_uuid, **kwargs) return True \ No newline at end of file diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index a89bb382..d9bc7643 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -11,10 +11,11 @@ from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip from backend.models import InternalFileObject -from shared.constants import AnimationToolType, InternalFileTag +from shared.constants import AnimationToolType, InferenceType, InternalFileTag from shared.file_upload.s3 import is_s3_image_url from ui_components.constants import VideoQuality -from ui_components.methods.file_methods import convert_bytes_to_file +from ui_components.methods.common_methods import process_inference_output +from ui_components.methods.file_methods import convert_bytes_to_file, generate_temp_file from ui_components.models import InternalFrameTimingObject, InternalSettingObject from utils.data_repo.data_repo import DataRepo from utils.media_processor.interpolator import VideoInterpolator @@ -24,7 +25,7 @@ # NOTE: interpolated_clip_uuid signals which clip to promote to timed clip (this is the main variant) # this function returns the 'single' preview_clip, which is basically timed_clip with the frame number def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None): - from ui_components.methods.file_methods import generate_temp_file, save_or_host_file_bytes + from ui_components.methods.file_methods import generate_temp_file from ui_components.methods.common_methods import get_audio_bytes_for_slice data_repo = DataRepo() @@ -38,17 +39,22 @@ def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None) timing.interpolation_steps = 3 next_timing = data_repo.get_next_timing(timing.uuid) img_list = [timing.source_image.location, next_timing.source_image.location] - video_bytes, log = VideoInterpolator.video_through_frame_interpolation(img_list, {"interpolation_steps": timing.interpolation_steps}) - file_data = { + res = VideoInterpolator.video_through_frame_interpolation(img_list, {"interpolation_steps": timing.interpolation_steps}) + + output_url, log = res[0] + + inference_data = { + "inference_type": InferenceType.SINGLE_PREVIEW_VIDEO.value, "file_location_to_save": "videos/" + timing.project.uuid + "/assets/videos" + (str(uuid.uuid4())) + ".mp4", "mime_type": "video/mp4", - "file_bytes": video_bytes, + "output": output_url, "project_uuid": timing.project.uuid, - "inference_log_id": log.uuid + "log_uuid": log.uuid, + "timing_uuid": timing_uuid } + + process_inference_output(**inference_data) - video_fie = convert_bytes_to_file(**file_data) - data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video_fie.uuid) if not timing.timed_clip: interpolated_clip = data_repo.get_file_from_uuid(interpolated_clip_uuid) if interpolated_clip_uuid \ @@ -141,28 +147,16 @@ def create_single_interpolated_clip(timing_uuid, quality, settings={}, variant_c variant_count ) - output_video_list = [] - for (video_bytes, log) in res: - if 'normalise_speed' in settings and settings['normalise_speed']: - video_bytes = VideoProcessor.update_video_bytes_speed(video_bytes, timing.animation_style, timing.clip_duration) - - video_location = "videos/" + str(timing.project.uuid) + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" - video = convert_bytes_to_file( - file_location_to_save=video_location, - mime_type="video/mp4", - file_bytes=video_bytes, - project_uuid=timing.project.uuid, - inference_log_id=log.uuid - ) - - data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video.uuid) - if not timing.timed_clip: - output_video = update_speed_of_video_clip(video, timing_uuid) - data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) - - output_video_list.append(video) + for (output, log) in res: + inference_data = { + "inference_type": InferenceType.FRAME_INTERPOLATION.value, + "output": output, + "log_uuid": log.uuid, + "settings": settings, + "timing_uuid": timing_uuid + } - return output_video_list + process_inference_output(**inference_data) # preview_clips have frame numbers on them. Preview clip is generated from index-2 to index+2 frames @@ -385,8 +379,6 @@ def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileT # creating timed clips if not already present if not timing.timed_clip: - video_clip = None - # creating an interpolated clip if not already present if not len(timing.interpolated_clip_list): next_timing = data_repo.get_next_timing(current_timing.uuid) diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index e5b331b7..b30fdab0 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -88,17 +88,17 @@ def video_through_frame_interpolation(img_location_list, settings, variant_count res = ml_client.predict_model_output_async(REPLICATE_MODEL.ad_interpolation, **data) - final_res = [] - for (output, log) in res: - temp_output_file = generate_temp_file(output, '.mp4') - video_bytes = None - with open(temp_output_file.name, 'rb') as f: - video_bytes = f.read() + # final_res = [] + # for (output, log) in res: + # temp_output_file = generate_temp_file(output, '.mp4') + # video_bytes = None + # with open(temp_output_file.name, 'rb') as f: + # video_bytes = f.read() - os.remove(temp_output_file.name) - final_res.append((video_bytes, log)) + # os.remove(temp_output_file.name) + # final_res.append((video_bytes, log)) - return final_res + return res @staticmethod From 4a0ed40480a8e706ab7066d1829619b598364e7c Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 11 Oct 2023 20:00:33 +0530 Subject: [PATCH 061/164] video inference queue fixed --- banodoco_runner.py | 2 +- shared/constants.py | 2 +- shared/utils.py | 6 +- .../components/frame_styling_page.py | 17 ++- ui_components/methods/common_methods.py | 6 +- ui_components/methods/video_methods.py | 18 ++- .../widgets/animation_style_element.py | 2 +- utils/media_processor/interpolator.py | 55 +++++---- utils/ml_processor/replicate/replicate.py | 107 ++++++++++-------- 9 files changed, 126 insertions(+), 89 deletions(-) diff --git a/banodoco_runner.py b/banodoco_runner.py index 75056a9c..f6c48a4f 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -75,7 +75,7 @@ def check_and_update_db(): headers = { "Authorization": f"Token {replicate_key}" } - print("replicate key: ", replicate_key) + response = requests.get(url, headers=headers) if response.status_code in [200, 201]: result = response.json() diff --git a/shared/constants.py b/shared/constants.py index a38d6961..ec81cac9 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -102,4 +102,4 @@ class InferenceParamType(ExtendedEnum): LOCAL_DATABASE_NAME = 'banodoco_local.db' ENCRYPTION_KEY = 'J2684nBgNUYa_K0a6oBr5H8MpSRW0EJ52Qmq7jExE-w=' -QUEUE_INFERENCE_QUERIES = False \ No newline at end of file +QUEUE_INFERENCE_QUERIES = True \ No newline at end of file diff --git a/shared/utils.py b/shared/utils.py index f4d93a5d..9c20d84f 100644 --- a/shared/utils.py +++ b/shared/utils.py @@ -16,7 +16,9 @@ def is_online_file_path(file_path): def is_url_valid(url): try: - response = requests.head(url) - return response.status_code in [200, 201] + response = requests.head(url, allow_redirects=True) + final_response = response.history[-1] if response.history else response + + return final_response.status_code in [200, 201, 307] # TODO: handle all possible status codes except Exception as e: return False diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 8a4cb9b2..403ed4f3 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -263,14 +263,12 @@ def frame_styling_page(mainheader2, project_uuid: str): with c3: output_data = json.loads(log.output_details) - if 'output' in output_data and output_data['output'] and is_url_valid(output_data['output'][0]): - if isinstance(output_data['output'], list): - output_data['output'] = output_data['output'][0] - - if output_data['output'].endswith('png'): - st.image(output_data['output']) - elif output_data['output'].endswith('mp4') or output_data['output'].endswith('gif'): - st.video(output_data['output']) + output_url = (output_data['output'][0] if isinstance(output_data['output'], list) else output_data['output']) if 'output' in output_data else None + if 'output' in output_data and output_data['output'] and is_url_valid(output_url): + if output_url.endswith('png'): + st.image(output_url) + elif output_url.endswith('mp4') or output_url.endswith('gif'): + st.video(output_url) else: st.write("No data to display") else: @@ -279,7 +277,8 @@ def frame_styling_page(mainheader2, project_uuid: str): with c4: if log.status == InferenceStatus.COMPLETED.value: output_data = json.loads(log.output_details) - if output_data and ('output' in output_data and output_data['output'] and is_url_valid(output_data['output'][0])): + output_url = output_data['output'][0] if isinstance(output_data['output'], list) else output_data['output'] + if output_data and ('output' in output_data and output_data['output'] and is_url_valid(output_url)): if st.button("Add to project", key=str(log.uuid)): origin_data['output'] = output_data['output'] origin_data['log_uuid'] = log.uuid diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 511a6c40..810cd9a8 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -1368,7 +1368,8 @@ def process_inference_output(**kwargs): # --------------------- MULTI VIDEO INFERENCE (INTERPOLATION + MORPHING) ------------------- elif inference_type == InferenceType.FRAME_INTERPOLATION.value: output = kwargs.get('output') - + log_uuid = kwargs.get('log_uuid') + if output: settings = kwargs.get('settings') timing_uuid = kwargs.get('timing_uuid') @@ -1394,7 +1395,7 @@ def process_inference_output(**kwargs): mime_type="video/mp4", file_bytes=output, project_uuid=timing.project.uuid, - inference_log_id=log.uuid + inference_log_id=log_uuid ) data_repo.add_interpolated_clip(timing_uuid, interpolated_clip_id=video.uuid) @@ -1403,7 +1404,6 @@ def process_inference_output(**kwargs): data_repo.update_specific_timing(timing_uuid, timed_clip_id=output_video.uuid) else: - log_uuid = kwargs.get('log_uuid') del kwargs['log_uuid'] data_repo.update_inference_log_origin_data(log_uuid, **kwargs) diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index d9bc7643..a917e7b7 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -14,7 +14,6 @@ from shared.constants import AnimationToolType, InferenceType, InternalFileTag from shared.file_upload.s3 import is_s3_image_url from ui_components.constants import VideoQuality -from ui_components.methods.common_methods import process_inference_output from ui_components.methods.file_methods import convert_bytes_to_file, generate_temp_file from ui_components.models import InternalFrameTimingObject, InternalSettingObject from utils.data_repo.data_repo import DataRepo @@ -27,6 +26,8 @@ def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None): from ui_components.methods.file_methods import generate_temp_file from ui_components.methods.common_methods import get_audio_bytes_for_slice + from ui_components.methods.common_methods import process_inference_output + from shared.constants import QUEUE_INFERENCE_QUERIES data_repo = DataRepo() @@ -39,7 +40,9 @@ def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None) timing.interpolation_steps = 3 next_timing = data_repo.get_next_timing(timing.uuid) img_list = [timing.source_image.location, next_timing.source_image.location] - res = VideoInterpolator.video_through_frame_interpolation(img_list, {"interpolation_steps": timing.interpolation_steps}) + res = VideoInterpolator.video_through_frame_interpolation(img_list, \ + {"interpolation_steps": timing.interpolation_steps}, 1, \ + QUEUE_INFERENCE_QUERIES) output_url, log = res[0] @@ -122,6 +125,9 @@ def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None) # this includes all the animation styles [direct morphing, interpolation, image to video] def create_single_interpolated_clip(timing_uuid, quality, settings={}, variant_count=1): + from ui_components.methods.common_methods import process_inference_output + from shared.constants import QUEUE_INFERENCE_QUERIES + data_repo = DataRepo() timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) next_timing: InternalFrameTimingObject = data_repo.get_next_timing(timing_uuid) @@ -144,7 +150,8 @@ def create_single_interpolated_clip(timing_uuid, quality, settings={}, variant_c img_list, timing.animation_style, settings, - variant_count + variant_count, + QUEUE_INFERENCE_QUERIES ) for (output, log) in res: @@ -344,6 +351,7 @@ def add_audio_to_video_slice(video_file, audio_bytes): def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileTag.GENERATED_VIDEO.value): from ui_components.methods.common_methods import update_clip_duration_of_all_timing_frames from ui_components.methods.file_methods import convert_bytes_to_file, generate_temp_file + from shared.constants import QUEUE_INFERENCE_QUERIES data_repo = DataRepo() @@ -377,6 +385,7 @@ def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileT data_repo.update_specific_timing( current_timing.uuid, interpolation_steps=3) + # TODO: add this flow in the async inference as well # creating timed clips if not already present if not timing.timed_clip: # creating an interpolated clip if not already present @@ -391,7 +400,8 @@ def render_video(final_video_name, project_uuid, quality, file_tag=InternalFileT img_location_list=[current_timing.source_image.location, next_timing.source_image.location], animation_style=current_timing.animation_style, settings=settings, - interpolation_steps=current_timing.interpolation_steps + variant_count=1, + queue_inference=QUEUE_INFERENCE_QUERIES ) video_bytes, log = res[0] diff --git a/ui_components/widgets/animation_style_element.py b/ui_components/widgets/animation_style_element.py index adb1b863..a2ca859c 100644 --- a/ui_components/widgets/animation_style_element.py +++ b/ui_components/widgets/animation_style_element.py @@ -42,7 +42,7 @@ def animation_style_element(timing_uuid, project_uuid): positive_prompt = st.text_area("Positive Prompt:", value=project_settings.default_prompt, key="positive_prompt") with prompt_column_2: - negative_prompt = st.text_area("Negative Prompt:", value=project_settings.default_prompt, key="negative_prompt") + negative_prompt = st.text_area("Negative Prompt:", value=project_settings.default_negative_prompt, key="negative_prompt") animate_col_1, animate_col_2, _ = st.columns([1, 1, 2]) diff --git a/utils/media_processor/interpolator.py b/utils/media_processor/interpolator.py index b30fdab0..2872e9cd 100644 --- a/utils/media_processor/interpolator.py +++ b/utils/media_processor/interpolator.py @@ -33,7 +33,7 @@ def calculate_dynamic_interpolations_steps(clip_duration): return interpolation_steps @staticmethod - def create_interpolated_clip(img_location_list, animation_style, settings, variant_count=1): + def create_interpolated_clip(img_location_list, animation_style, settings, variant_count=1, queue_inference=False): data_repo = DataRepo() if not animation_style: project_setting = data_repo.get_project_setting(st.session_state["project_uuid"]) @@ -43,7 +43,8 @@ def create_interpolated_clip(img_location_list, animation_style, settings, varia return VideoInterpolator.video_through_frame_interpolation( img_location_list, settings, - variant_count + variant_count, + queue_inference ) elif animation_style == AnimationStyleType.DIRECT_MORPHING.value: @@ -55,7 +56,7 @@ def create_interpolated_clip(img_location_list, animation_style, settings, varia # returns a video bytes generated through interpolating frames between the given list of frames @staticmethod - def video_through_frame_interpolation(img_location_list, settings, variant_count): + def video_through_frame_interpolation(img_location_list, settings, variant_count, queue_inference=False): # TODO: extend this for more than two images img1 = img_location_list[0] img2 = img_location_list[1] @@ -69,24 +70,34 @@ def video_through_frame_interpolation(img_location_list, settings, variant_count ml_client = get_ml_client() animation_tool = settings['animation_tool'] if 'animation_tool' in settings else AnimationToolType.G_FILM.value - if animation_tool == AnimationToolType.G_FILM.value: - res = ml_client.predict_model_output_async(REPLICATE_MODEL.google_frame_interpolation, frame1=img1, frame2=img2, - times_to_interpolate=settings['interpolation_steps'], variant_count=variant_count) - - # since workflows can have multiple input params it's not standardized yet - elif animation_tool == AnimationToolType.ANIMATEDIFF.value: - data = { - "positive_prompt": settings['positive_prompt'], - "negative_prompt": settings['negative_prompt'], - "image_dimension": settings['image_dimension'], - "starting_image_path": img1, - "ending_image_path": img2, - "sampling_steps": settings['sampling_steps'], - "motion_module": settings['motion_module'], - "model": settings['model'], - } - - res = ml_client.predict_model_output_async(REPLICATE_MODEL.ad_interpolation, **data) + final_res = [] + for _ in range(variant_count): + if animation_tool == AnimationToolType.G_FILM.value: + res = ml_client.predict_model_output( + REPLICATE_MODEL.google_frame_interpolation, + frame1=img1, + frame2=img2, + times_to_interpolate=settings['interpolation_steps'], + queue_inference=queue_inference + ) + + # since workflows can have multiple input params it's not standardized yet + elif animation_tool == AnimationToolType.ANIMATEDIFF.value: + data = { + "positive_prompt": settings['positive_prompt'], + "negative_prompt": settings['negative_prompt'], + "image_dimension": settings['image_dimension'], + "starting_image_path": img1, + "ending_image_path": img2, + "sampling_steps": settings['sampling_steps'], + "motion_module": settings['motion_module'], + "model": settings['model'], + "queue_inference": queue_inference + } + + res = ml_client.predict_model_output(REPLICATE_MODEL.ad_interpolation, **data) + + final_res.append(res) # final_res = [] # for (output, log) in res: @@ -98,7 +109,7 @@ def video_through_frame_interpolation(img_location_list, settings, variant_count # os.remove(temp_output_file.name) # final_res.append((video_bytes, log)) - return res + return final_res @staticmethod diff --git a/utils/ml_processor/replicate/replicate.py b/utils/ml_processor/replicate/replicate.py index e65dd3dc..87fb1c69 100644 --- a/utils/ml_processor/replicate/replicate.py +++ b/utils/ml_processor/replicate/replicate.py @@ -63,9 +63,15 @@ def predict_model_output_standardized(self, model: ReplicateModel, query_obj: ML @check_user_credits def predict_model_output(self, replicate_model: ReplicateModel, **kwargs): + # TODO: make unified interface for directing to queue_prediction + queue_inference = kwargs.get('queue_inference', False) + if queue_inference: + return self.queue_prediction(replicate_model, **kwargs) + model_version = self.get_model(replicate_model) - del kwargs['query_dict'] + if 'query_dict' in kwargs: + del kwargs['query_dict'] keys_to_delete = [] for k, _ in kwargs.items(): if kwargs[k] == None: @@ -93,6 +99,59 @@ def predict_model_output(self, replicate_model: ReplicateModel, **kwargs): return output, log + @check_user_credits + def queue_prediction(self, replicate_model: ReplicateModel, **kwargs): + url = "https://api.replicate.com/v1/predictions" + headers = { + "Authorization": "Token " + os.environ.get("REPLICATE_API_TOKEN"), + "Content-Type": "application/json" + } + + if 'query_dict' in kwargs: + del kwargs['query_dict'] + + keys_to_delete = [] + for k, _ in kwargs.items(): + if kwargs[k] == None: + keys_to_delete.append(k) + + for k in keys_to_delete: + del kwargs[k] + + data = { + "version": replicate_model.version, + "input": dict(kwargs) + } + + # converting io buffers to base64 format + for k, v in data['input'].items(): + if not isinstance(v, (int, str, list, dict)): + data['input'][k] = convert_file_to_base64(v) + + response = r.post(url, headers=headers, json=data) + + if response.status_code in [200, 201]: + result = response.json() + data = { + "prediction_id": result['id'], + "error": result['error'], + "status": result['status'], + "created_at": result['created_at'], + "urls": result['urls'], # these contain "cancel" and "get" urls + } + + kwargs[InferenceParamType.REPLICATE_INFERENCE.value] = data + + # hackish fix for now, will update replicate model later + if 'model' in kwargs: + kwargs['inf_model'] = kwargs['model'] + del kwargs['model'] + + log = log_model_inference(replicate_model, None, **kwargs) + return None, log + else: + self.logger.log(LoggingType.ERROR, f"Error in creating prediction: {response.content}") + @check_user_credits def predict_model_output_async(self, replicate_model: ReplicateModel, **kwargs): res = asyncio.run(self._multi_async_prediction(replicate_model, **kwargs)) @@ -251,48 +310,4 @@ def get_model_version_from_id(self, model_id): # version = (response.json()["version"]) version = (response.json())['results'][0]['id'] - return version - - @check_user_credits - def queue_prediction(self, replicate_model: ReplicateModel, **kwargs): - url = "https://api.replicate.com/v1/predictions" - headers = { - "Authorization": "Token " + os.environ.get("REPLICATE_API_TOKEN"), - "Content-Type": "application/json" - } - - del kwargs['query_dict'] - keys_to_delete = [] - for k, _ in kwargs.items(): - if kwargs[k] == None: - keys_to_delete.append(k) - - for k in keys_to_delete: - del kwargs[k] - - data = { - "version": replicate_model.version, - "input": dict(kwargs) - } - - if 'image' in data['input'] and not isinstance(data['input']['image'], str): - data['input']['image'] = convert_file_to_base64(data['input']['image']) - - response = r.post(url, headers=headers, json=data) - - if response.status_code in [200, 201]: - result = response.json() - data = { - "prediction_id": result['id'], - "error": result['error'], - "status": result['status'], - "created_at": result['created_at'], - "urls": result['urls'], # these contain "cancel" and "get" urls - } - - kwargs[InferenceParamType.REPLICATE_INFERENCE.value] = data - - log = log_model_inference(replicate_model, None, **kwargs) - return None, log - else: - self.logger.log(LoggingType.ERROR, f"Error in creating prediction: {response.content}") \ No newline at end of file + return version \ No newline at end of file From 602a7b67eb047e63bd8797a7c256e0b4ee1105f5 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 11 Oct 2023 20:37:48 +0530 Subject: [PATCH 062/164] log list fixes --- shared/utils.py | 2 +- .../components/frame_styling_page.py | 26 +++++++++++-------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/shared/utils.py b/shared/utils.py index 9c20d84f..8b2e494d 100644 --- a/shared/utils.py +++ b/shared/utils.py @@ -19,6 +19,6 @@ def is_url_valid(url): response = requests.head(url, allow_redirects=True) final_response = response.history[-1] if response.history else response - return final_response.status_code in [200, 201, 307] # TODO: handle all possible status codes + return final_response.status_code in [200, 201, 307] # TODO: handle all possible status codes except Exception as e: return False diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 403ed4f3..ee181ca0 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -248,11 +248,17 @@ def frame_styling_page(mainheader2, project_uuid: str): elif st.session_state['frame_styling_view_type'] == "Log List": # TODO: add filtering/pagination when fetching log list log_list = data_repo.get_all_inference_log_list(project_uuid) - - for log in log_list: + valid_url = [False] * len(log_list) + for idx, log in enumerate(log_list): origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) if not log.status or not origin_data: continue + + output_url = None + output_data = json.loads(log.output_details) + if 'output' in output_data and output_data['output']: + output_url = output_data['output'][0] if isinstance(output_data['output'], list) else output_data['output'] + valid_url[idx] = is_url_valid(output_url) c1, c2, c3, c4 = st.columns([1, 1, 1, 1]) with c1: @@ -262,13 +268,13 @@ def frame_styling_page(mainheader2, project_uuid: str): st.write(log.status) with c3: - output_data = json.loads(log.output_details) - output_url = (output_data['output'][0] if isinstance(output_data['output'], list) else output_data['output']) if 'output' in output_data else None - if 'output' in output_data and output_data['output'] and is_url_valid(output_url): - if output_url.endswith('png'): + + + if valid_url[idx]: + if output_url.endswith('png') or output_url.endswith('jpg') or output_url.endswith('jpeg') or output_url.endswith('gif'): st.image(output_url) - elif output_url.endswith('mp4') or output_url.endswith('gif'): - st.video(output_url) + elif output_url.endswith('mp4'): + st.video(output_url, format='mp4', start_time=0) else: st.write("No data to display") else: @@ -276,9 +282,7 @@ def frame_styling_page(mainheader2, project_uuid: str): with c4: if log.status == InferenceStatus.COMPLETED.value: - output_data = json.loads(log.output_details) - output_url = output_data['output'][0] if isinstance(output_data['output'], list) else output_data['output'] - if output_data and ('output' in output_data and output_data['output'] and is_url_valid(output_url)): + if valid_url[idx]: if st.button("Add to project", key=str(log.uuid)): origin_data['output'] = output_data['output'] origin_data['log_uuid'] = log.uuid From d57f1892edec708f4b2ca3904cc652f6c6c523e5 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 11 Oct 2023 22:51:32 +0530 Subject: [PATCH 063/164] inference fixes --- .../components/frame_styling_page.py | 5 +++++ ui_components/constants.py | 2 +- ui_components/methods/common_methods.py | 22 +++++++++---------- ui_components/methods/ml_methods.py | 2 +- utils/data_repo/data_repo.py | 3 ++- utils/ml_processor/replicate/replicate.py | 2 +- utils/ml_processor/replicate/utils.py | 2 +- 7 files changed, 22 insertions(+), 16 deletions(-) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index ee181ca0..9e504f59 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -300,3 +300,8 @@ def frame_styling_page(mainheader2, project_uuid: str): else: st.write("Data expired") + + if st.button("Delete", key=f"delete_{log.uuid}"): + data_repo.update_inference_log(log.uuid, status="") + st.rerun() + diff --git a/ui_components/constants.py b/ui_components/constants.py index cd946500..5ec8c3b2 100644 --- a/ui_components/constants.py +++ b/ui_components/constants.py @@ -21,7 +21,7 @@ class DefaultTimingStyleParams: prompt = "" negative_prompt = "bad image, worst quality" strength = 1 - guidance_scale = 0.5 + guidance_scale = 7.5 seed = 0 num_inference_steps = 25 low_threshold = 100 diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 810cd9a8..4dd8f660 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -1297,17 +1297,7 @@ def process_inference_output(**kwargs): # ------------------- FRAME TIMING IMAGE INFERENCE ------------------- if inference_type == InferenceType.FRAME_TIMING_IMAGE_INFERENCE.value: output = kwargs.get('output') - filename = str(uuid.uuid4()) + ".png" - log_uuid = kwargs.get('log_uuid') - log = data_repo.get_inference_log_from_uuid(log_uuid) - output_file = data_repo.create_file( - name=filename, - type=InternalFileType.IMAGE.value, - hosted_url=output[0], - inference_log_id=log.uuid - ) - - if output_file: + if output: timing_uuid = kwargs.get('timing_uuid') promote_new_generation = kwargs.get('promote_new_generation') @@ -1315,6 +1305,16 @@ def process_inference_output(**kwargs): if not timing: return False + filename = str(uuid.uuid4()) + ".png" + log_uuid = kwargs.get('log_uuid') + log = data_repo.get_inference_log_from_uuid(log_uuid) + output_file = data_repo.create_file( + name=filename, + type=InternalFileType.IMAGE.value, + hosted_url=output[0], + inference_log_id=log.uuid + ) + add_image_variant(output_file.uuid, timing_uuid) if promote_new_generation == True: timing = data_repo.get_timing_from_uuid(timing_uuid) diff --git a/ui_components/methods/ml_methods.py b/ui_components/methods/ml_methods.py index 4eee2463..b2214765 100644 --- a/ui_components/methods/ml_methods.py +++ b/ui_components/methods/ml_methods.py @@ -63,7 +63,7 @@ def trigger_restyling_process(timing_uuid, update_inference_settings, \ inference_data = { "inference_type": InferenceType.FRAME_TIMING_IMAGE_INFERENCE.value, "output": output, - "log": log, + "log_uuid": log.uuid, "timing_uuid": timing_uuid, "promote_new_generation": promote_new_generation, } diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 64174157..9eb02703 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -184,7 +184,8 @@ def delete_ai_model_from_uuid(self, uuid): # inference log def get_inference_log_from_uuid(self, uuid): - log = self.db_repo.get_inference_log_from_uuid(uuid).data['data'] + res = self.db_repo.get_inference_log_from_uuid(uuid) + log = res.data['data'] if res else None return InferenceLogObject(**log) if log else None def get_all_inference_log_list(self, project_id=None): diff --git a/utils/ml_processor/replicate/replicate.py b/utils/ml_processor/replicate/replicate.py index 87fb1c69..2c03ae8e 100644 --- a/utils/ml_processor/replicate/replicate.py +++ b/utils/ml_processor/replicate/replicate.py @@ -125,7 +125,7 @@ def queue_prediction(self, replicate_model: ReplicateModel, **kwargs): # converting io buffers to base64 format for k, v in data['input'].items(): - if not isinstance(v, (int, str, list, dict)): + if not isinstance(v, (int, str, list, dict, float)): data['input'][k] = convert_file_to_base64(v) response = r.post(url, headers=headers, json=data) diff --git a/utils/ml_processor/replicate/utils.py b/utils/ml_processor/replicate/utils.py index 2613f3dc..a724f573 100644 --- a/utils/ml_processor/replicate/utils.py +++ b/utils/ml_processor/replicate/utils.py @@ -97,7 +97,7 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): 'prompt': query_obj.prompt, 'negative_prompt': query_obj.negative_prompt, 'strength': query_obj.strength, - 'guidance_scale': query_obj.guidance_scale, + 'guidance_scale': min( query_obj.guidance_scale, 1), 'num_inference_steps': query_obj.num_inference_steps, 'upscale': 1, 'seed': query_obj.seed, From 654b9553d241430032de64ab1797ea3610b82b19 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 11 Oct 2023 23:16:04 +0530 Subject: [PATCH 064/164] preview generation fixed --- banodoco_runner.py | 5 +++-- ui_components/methods/video_methods.py | 4 ++-- utils/ml_processor/replicate/replicate.py | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/banodoco_runner.py b/banodoco_runner.py index f6c48a4f..45a3a49b 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -83,8 +83,9 @@ def check_and_update_db(): output_details = json.loads(log.output_details) if log_status == InferenceStatus.COMPLETED.value: - output_details['output'] = [result['output'][-1]] if output_details['version'] == \ - "a4a8bafd6089e1716b06057c42b19378250d008b80fe87caa5cd36d40c1eda90" else result['output'] + output_details['output'] = result['output'] if (output_details['version'] == \ + "a4a8bafd6089e1716b06057c42b19378250d008b80fe87caa5cd36d40c1eda90" or \ + isinstance(output_details['version'], str)) else [result['output'][-1]] InferenceLog.objects.filter(id=log.id).update(status=log_status, output_details=json.dumps(output_details)) else: diff --git a/ui_components/methods/video_methods.py b/ui_components/methods/video_methods.py index a917e7b7..8cc74c02 100644 --- a/ui_components/methods/video_methods.py +++ b/ui_components/methods/video_methods.py @@ -42,7 +42,7 @@ def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None) img_list = [timing.source_image.location, next_timing.source_image.location] res = VideoInterpolator.video_through_frame_interpolation(img_list, \ {"interpolation_steps": timing.interpolation_steps}, 1, \ - QUEUE_INFERENCE_QUERIES) + False) # TODO: queuing is not enabled here output_url, log = res[0] @@ -58,7 +58,7 @@ def create_or_get_single_preview_video(timing_uuid, interpolated_clip_uuid=None) process_inference_output(**inference_data) - + timing = data_repo.get_timing_from_uuid(timing_uuid) if not timing.timed_clip: interpolated_clip = data_repo.get_file_from_uuid(interpolated_clip_uuid) if interpolated_clip_uuid \ else timing.interpolated_clip_list[0] diff --git a/utils/ml_processor/replicate/replicate.py b/utils/ml_processor/replicate/replicate.py index 2c03ae8e..353a2bab 100644 --- a/utils/ml_processor/replicate/replicate.py +++ b/utils/ml_processor/replicate/replicate.py @@ -95,7 +95,7 @@ def predict_model_output(self, replicate_model: ReplicateModel, **kwargs): if replicate_model == REPLICATE_MODEL.clip_interrogator: output = output # adding this for organisation purpose else: - output = [output[-1]] + output = [output[-1]] if isinstance(output, list) else output return output, log From ad8a963db32a15617df9f2106cbcebcf9a67a70b Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Wed, 11 Oct 2023 23:57:37 +0530 Subject: [PATCH 065/164] inference fixes + log page speed up --- backend/serializers/dto.py | 1 + banodoco_runner.py | 2 +- ui_components/components/frame_styling_page.py | 7 ++++++- ui_components/models.py | 1 + utils/ml_processor/replicate/constants.py | 16 ++++++++-------- utils/ml_processor/replicate/utils.py | 1 + 6 files changed, 18 insertions(+), 10 deletions(-) diff --git a/backend/serializers/dto.py b/backend/serializers/dto.py index 5513739e..f9f78630 100644 --- a/backend/serializers/dto.py +++ b/backend/serializers/dto.py @@ -61,6 +61,7 @@ class Meta: "output_details", "total_inference_time", "created_on", + "updated_on", "status" ) diff --git a/banodoco_runner.py b/banodoco_runner.py index 45a3a49b..ed727a3c 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -85,7 +85,7 @@ def check_and_update_db(): if log_status == InferenceStatus.COMPLETED.value: output_details['output'] = result['output'] if (output_details['version'] == \ "a4a8bafd6089e1716b06057c42b19378250d008b80fe87caa5cd36d40c1eda90" or \ - isinstance(output_details['version'], str)) else [result['output'][-1]] + isinstance(result['output'], str)) else [result['output'][-1]] InferenceLog.objects.filter(id=log.id).update(status=log_status, output_details=json.dumps(output_details)) else: diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 9e504f59..4fb7eec4 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -1,3 +1,4 @@ +from datetime import timedelta import json import time import streamlit as st @@ -246,8 +247,12 @@ def frame_styling_page(mainheader2, project_uuid: str): # ------- change this ---------- elif st.session_state['frame_styling_view_type'] == "Log List": + if st.button("Refresh log list"): + st.rerun() + # TODO: add filtering/pagination when fetching log list log_list = data_repo.get_all_inference_log_list(project_uuid) + log_list = log_list[::-1] valid_url = [False] * len(log_list) for idx, log in enumerate(log_list): origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) @@ -258,7 +263,7 @@ def frame_styling_page(mainheader2, project_uuid: str): output_data = json.loads(log.output_details) if 'output' in output_data and output_data['output']: output_url = output_data['output'][0] if isinstance(output_data['output'], list) else output_data['output'] - valid_url[idx] = is_url_valid(output_url) + valid_url[idx] = log.updated_on.timestamp() + 60*45 > time.time() c1, c2, c3, c4 = st.columns([1, 1, 1, 1]) with c1: diff --git a/ui_components/models.py b/ui_components/models.py index 7216b789..29e92aa5 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -279,6 +279,7 @@ def __init__(self, **kwargs): self.output_details = kwargs['output_details'] if key_present('output_details', kwargs) else None self.total_inference_time = kwargs['total_inference_time'] if key_present('total_inference_time', kwargs) else None self.status = kwargs['status'] if key_present('status', kwargs) else None + self.updated_on = datetime.datetime.fromisoformat(kwargs['updated_on']) if key_present('updated_on', kwargs) else None def key_present(key, dict): diff --git a/utils/ml_processor/replicate/constants.py b/utils/ml_processor/replicate/constants.py index e2751067..8849aa69 100644 --- a/utils/ml_processor/replicate/constants.py +++ b/utils/ml_processor/replicate/constants.py @@ -25,14 +25,14 @@ class REPLICATE_MODEL: phamquiluan_face_recognition = ReplicateModel("phamquiluan/facial-expression-recognition", "b16694d5bfed43612f1bfad7015cf2b7883b732651c383fe174d4b7783775ff5") arielreplicate = ReplicateModel("arielreplicate/instruct-pix2pix", "10e63b0e6361eb23a0374f4d9ee145824d9d09f7a31dcd70803193ebc7121430") cjwbw_midas = ReplicateModel("cjwbw/midas", "a6ba5798f04f80d3b314de0f0a62277f21ab3503c60c84d4817de83c5edfdae0") - jagilley_controlnet_normal = ReplicateModel("jagilley/controlnet-normal", None) - jagilley_controlnet_canny = ReplicateModel("jagilley/controlnet-canny", None) - jagilley_controlnet_hed = ReplicateModel("jagilley/controlnet-hed", None) - jagilley_controlnet_scribble = ReplicateModel("jagilley/controlnet-scribble", None) - jagilley_controlnet_seg = ReplicateModel("jagilley/controlnet-seg", None) - jagilley_controlnet_hough = ReplicateModel("jagilley/controlnet-hough", None) - jagilley_controlnet_depth2img = ReplicateModel("jagilley/controlnet-depth2img", None) - jagilley_controlnet_pose = ReplicateModel("jagilley/controlnet-pose", None) + jagilley_controlnet_normal = ReplicateModel("jagilley/controlnet-normal", "cc8066f617b6c99fdb134bc1195c5291cf2610875da4985a39de50ee1f46d81c") + jagilley_controlnet_canny = ReplicateModel("jagilley/controlnet-canny", "aff48af9c68d162388d230a2ab003f68d2638d88307bdaf1c2f1ac95079c9613") + jagilley_controlnet_hed = ReplicateModel("jagilley/controlnet-hed", "cde353130c86f37d0af4060cd757ab3009cac68eb58df216768f907f0d0a0653") + jagilley_controlnet_scribble = ReplicateModel("jagilley/controlnet-scribble", "435061a1b5a4c1e26740464bf786efdfa9cb3a3ac488595a2de23e143fdb0117") + jagilley_controlnet_seg = ReplicateModel("jagilley/controlnet-seg", "f967b165f4cd2e151d11e7450a8214e5d22ad2007f042f2f891ca3981dbfba0d") + jagilley_controlnet_hough = ReplicateModel("jagilley/controlnet-hough", "854e8727697a057c525cdb45ab037f64ecca770a1769cc52287c2e56472a247b") + jagilley_controlnet_depth2img = ReplicateModel("jagilley/controlnet-depth2img", "922c7bb67b87ec32cbc2fd11b1d5f94f0ba4f5519c4dbd02856376444127cc60") + jagilley_controlnet_pose = ReplicateModel("jagilley/controlnet-pose", "0304f7f774ba7341ef754231f794b1ba3d129e3c46af3022241325ae0c50fb99") real_esrgan_upscale = ReplicateModel("cjwbw/real-esrgan", "d0ee3d708c9b911f122a4ad90046c5d26a0293b99476d697f6bb7f2e251ce2d4") controlnet_1_1_x_realistic_vision_v2_0 = ReplicateModel("usamaehsan/controlnet-1.1-x-realistic-vision-v2.0", "7fbf4c86671738f97896c9cb4922705adfcdcf54a6edab193bb8c176c6b34a69") urpm = ReplicateModel("mcai/urpm-v1.3-img2img", "4df956e8dbfebf1afaf0c3ee98ad426ec58c4262d24360d054582e5eab2cb5f6") diff --git a/utils/ml_processor/replicate/utils.py b/utils/ml_processor/replicate/utils.py index a724f573..5868fe46 100644 --- a/utils/ml_processor/replicate/utils.py +++ b/utils/ml_processor/replicate/utils.py @@ -179,6 +179,7 @@ def get_model_params_from_query_obj(model, query_obj: MLQueryObject): data = { 'image': input_image, + 'input_image': input_image, 'prompt': query_obj.prompt, 'num_samples': "1", 'image_resolution': str(query_obj.width), From 375a11f7990760ca2526f0925e79065c75c4fffb Mon Sep 17 00:00:00 2001 From: peter942 Date: Fri, 13 Oct 2023 01:41:55 +0200 Subject: [PATCH 066/164] Logging view and more --- .../components/frame_styling_page.py | 130 ++++++++++-------- ui_components/methods/common_methods.py | 40 +++--- ui_components/setup.py | 7 +- .../widgets/add_key_frame_element.py | 4 +- ui_components/widgets/frame_selector.py | 31 ++--- ui_components/widgets/inpainting_element.py | 4 +- 6 files changed, 114 insertions(+), 102 deletions(-) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 4fb7eec4..9734150d 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -236,9 +236,9 @@ def frame_styling_page(mainheader2, project_uuid: str): elif st.session_state['list_view_type'] == "Timeline View": - with st.sidebar: + with st.sidebar: + with st.expander("🌀 Batch Styling", expanded=True): styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.LIST.value) - if st.session_state['page'] == "Styling": timeline_view(shift_frames_setting, project_uuid, "Styling", header_col_3, header_col_4) @@ -246,67 +246,77 @@ def frame_styling_page(mainheader2, project_uuid: str): timeline_view(shift_frames_setting, project_uuid, "Motion", header_col_3, header_col_4) # ------- change this ---------- - elif st.session_state['frame_styling_view_type'] == "Log List": - if st.button("Refresh log list"): - st.rerun() - - # TODO: add filtering/pagination when fetching log list - log_list = data_repo.get_all_inference_log_list(project_uuid) - log_list = log_list[::-1] - valid_url = [False] * len(log_list) - for idx, log in enumerate(log_list): - origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) - if not log.status or not origin_data: - continue - - output_url = None - output_data = json.loads(log.output_details) - if 'output' in output_data and output_data['output']: - output_url = output_data['output'][0] if isinstance(output_data['output'], list) else output_data['output'] - valid_url[idx] = log.updated_on.timestamp() + 60*45 > time.time() - - c1, c2, c3, c4 = st.columns([1, 1, 1, 1]) - with c1: - st.write(log.uuid) + + with st.sidebar: + with st.expander("🔍 Inference Logging", expanded=True): - with c2: - st.write(log.status) - - with c3: + def display_sidebar_log_list(data_repo, project_uuid): + a1, _, a3 = st.columns([1, 0.2, 1]) + log_list = data_repo.get_all_inference_log_list(project_uuid) + refresh_disabled = not any(log.status in [InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value] for log in log_list) + + if a1.button("Refresh log", disabled=refresh_disabled): st.rerun() + a3.button("Jump to full log view") + + b1, b2 = st.columns([1, 1]) + items_per_page = b2.slider("Items per page", min_value=1, max_value=20, value=5, step=1) + page_number = b1.number_input('Page number', min_value=1, max_value=math.ceil(len(log_list) / items_per_page), value=1, step=1) - if valid_url[idx]: - if output_url.endswith('png') or output_url.endswith('jpg') or output_url.endswith('jpeg') or output_url.endswith('gif'): - st.image(output_url) - elif output_url.endswith('mp4'): - st.video(output_url, format='mp4', start_time=0) - else: - st.write("No data to display") - else: - st.write("No data to display") - - with c4: - if log.status == InferenceStatus.COMPLETED.value: - if valid_url[idx]: - if st.button("Add to project", key=str(log.uuid)): - origin_data['output'] = output_data['output'] - origin_data['log_uuid'] = log.uuid - status = process_inference_output(**origin_data) - - if status: - # delete origin data (doing this will remove the log from the list) - input_params = json.loads(log.input_params) - del input_params[InferenceParamType.ORIGIN_DATA.value] - data_repo.update_inference_log(log.uuid, input_params=json.dumps(input_params)) - else: - st.write("Failed to add to project, timing deleted") - time.sleep(1) - st.rerun() - else: - st.write("Data expired") + log_list = log_list[::-1][(page_number - 1) * items_per_page : page_number * items_per_page] + + st.markdown("---") + for idx, log in enumerate(log_list): + + origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) + if not log.status or not origin_data: + continue + + output_url = None + output_data = json.loads(log.output_details) + if 'output' in output_data and output_data['output']: + output_url = output_data['output'][0] if isinstance(output_data['output'], list) else output_data['output'] + + c1, c2, c3 = st.columns([1, 1 if output_url else 0.01, 1]) + + with c1: + input_params = json.loads(log.input_params) + st.caption(f"Prompt:") + prompt = input_params.get('prompt', 'No prompt found') + st.write(f'"{prompt[:30]}..."' if len(prompt) > 30 else f'"{prompt}"') + st.caption(f"Model:") + st.write(json.loads(log.output_details)['model_name'].split('/')[-1]) + + with c2: + if output_url: + if output_url.endswith('png') or output_url.endswith('jpg') or output_url.endswith('jpeg') or output_url.endswith('gif'): + st.image(output_url) + elif output_url.endswith('mp4'): + st.video(output_url, format='mp4', start_time=0) + else: + st.info("No data to display") + + with c3: + if log.status == InferenceStatus.COMPLETED.value: + st.success("Completed") + elif log.status == InferenceStatus.FAILED.value: + st.warning("Failed") + elif log.status == InferenceStatus.QUEUED.value: + st.info("Queued") + elif log.status == InferenceStatus.IN_PROGRESS.value: + st.info("In progress") + elif log.status == InferenceStatus.CANCELED.value: + st.warning("Canceled") + + if output_url: + if st.button(f"Jump to frame {idx}"): + st.info("Fix this.") + + # if st.button("Delete", key=f"delete_{log.uuid}"): + # data_repo.update_inference_log(log.uuid, status="") + # st.rerun() - if st.button("Delete", key=f"delete_{log.uuid}"): - data_repo.update_inference_log(log.uuid, status="") - st.rerun() + st.markdown("---") + display_sidebar_log_list(data_repo, project_uuid) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 4dd8f660..b97d88fd 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -746,27 +746,29 @@ def replace_image_widget(timing_uuid, stage): if stage == "source": uploaded_file = st.file_uploader("Upload Source Image", type=[ "png", "jpeg"], accept_multiple_files=False) - if st.button("Upload Source Image"): - if uploaded_file: - timing = data_repo.get_timing_from_uuid(timing.uuid) - if save_uploaded_image(uploaded_file, timing.project.uuid, timing.uuid, "source"): - time.sleep(1.5) - st.rerun() + if uploaded_file != None: + if st.button("Upload Source Image"): + if uploaded_file: + timing = data_repo.get_timing_from_uuid(timing.uuid) + if save_uploaded_image(uploaded_file, timing.project.uuid, timing.uuid, "source"): + time.sleep(1.5) + st.rerun() else: - replacement_frame = st.file_uploader("Upload a replacement frame here", type=[ + replacement_frame = st.file_uploader("Upload Styled Image", type=[ "png", "jpeg"], accept_multiple_files=False, key=f"replacement_frame_upload_{stage}") - if st.button("Replace frame", disabled=False): - images_for_model = [] - timing = data_repo.get_timing_from_uuid(timing.uuid) - if replacement_frame: - saved_file = save_uploaded_image(replacement_frame, timing.project.uuid, timing.uuid, "styled") - if saved_file: - number_of_image_variants = add_image_variant(saved_file.uuid, timing.uuid) - promote_image_variant( - timing.uuid, number_of_image_variants - 1) - st.success("Replaced") - time.sleep(1) - st.rerun() + if replacement_frame != None: + if st.button("Replace frame", disabled=False): + images_for_model = [] + timing = data_repo.get_timing_from_uuid(timing.uuid) + if replacement_frame: + saved_file = save_uploaded_image(replacement_frame, timing.project.uuid, timing.uuid, "styled") + if saved_file: + number_of_image_variants = add_image_variant(saved_file.uuid, timing.uuid) + promote_image_variant( + timing.uuid, number_of_image_variants - 1) + st.success("Replaced") + time.sleep(1) + st.rerun() def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): data_repo = DataRepo() diff --git a/ui_components/setup.py b/ui_components/setup.py index b726b0cd..6d9e99a9 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -139,7 +139,7 @@ def setup_app_ui(): "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "green"}}, manual_select=st.session_state["manual_select"]) # TODO: CORRECT-CODE - view_types = ["Individual View", "List View", "Log List"] + view_types = ["Individual View", "List View"] if 'frame_styling_view_type_index' not in st.session_state: st.session_state['frame_styling_view_type_index'] = 0 @@ -184,7 +184,7 @@ def on_change_view_type(key): elif st.session_state["main_view_type"] == "Tools & Settings": with st.sidebar: - tool_pages = ["Custom Models", "Project Settings"] + tool_pages = ["Query Logger", "Custom Models", "Project Settings"] if st.session_state["page"] not in tool_pages: st.session_state["page"] = tool_pages[0] @@ -192,7 +192,8 @@ def on_change_view_type(key): st.session_state['page'] = option_menu(None, tool_pages, icons=['pencil', 'palette', "hourglass", 'stopwatch'], menu_icon="cast", orientation="horizontal", key="secti2on_selector", styles={ "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "green"}}, manual_select=st.session_state["manual_select"]) - + if st.session_state["page"] == "Query Logger": + st.info("Query Logger will appear here.") if st.session_state["page"] == "Custom Models": custom_models_page(st.session_state["project_uuid"]) elif st.session_state["page"] == "Project Settings": diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index 93a7cb36..9eba6261 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -31,7 +31,7 @@ def add_key_frame_element(timing_details, project_uuid): transformation_stage = st.radio( label="Which stage would you like to use?", options=ImageStage.value_list(), - key="transformation_stage", + key="transformation_stage-bottom", horizontal=True ) image_idx = st.number_input( @@ -43,7 +43,7 @@ def add_key_frame_element(timing_details, project_uuid): key="image_idx" ) if transformation_stage == ImageStage.SOURCE_IMAGE.value: - if timing_details[image_idx - 1].source_image != "": + if timing_details[image_idx - 1].source_image is not None and timing_details[image_idx - 1].source_image != "": selected_image_location = timing_details[image_idx - 1].source_image.location else: selected_image_location = "" diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index cb42294f..375fb9bc 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -40,21 +40,20 @@ def frame_selector_widget(): - - image_1, image_2 = st.columns([1,1]) - with image_1: - st.warning(f"Guidance Image:") - display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value, clickable=False) - with st.expander("Replace guidance image"): + with st.expander("🖼️ Frame Details"): + image_1, image_2 = st.columns([1,1]) + with image_1: + st.warning(f"Guidance Image:") + display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value, clickable=False) + st.caption("Replace guidance image") replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value) - with image_2: - st.success(f"Main Styled Image:") - display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) - with st.expander("Replace styled image"): + with image_2: + st.success(f"Main Styled Image:") + display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) + st.caption("Replace styled image") replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value) - - st.markdown("***") - - if st.button("Delete key frame"): - delete_frame(st.session_state['current_frame_uuid']) - st.rerun() \ No newline at end of file + + + if st.button("Delete key frame"): + delete_frame(st.session_state['current_frame_uuid']) + st.rerun() \ No newline at end of file diff --git a/ui_components/widgets/inpainting_element.py b/ui_components/widgets/inpainting_element.py index d3cf65a2..cfa56c7b 100644 --- a/ui_components/widgets/inpainting_element.py +++ b/ui_components/widgets/inpainting_element.py @@ -53,10 +53,10 @@ def inpainting_element(timing_uuid): st.info("You need to add a style first in the Style Selection section.") else: if stage == WorkflowStageType.SOURCE.value: - editing_image = timing.source_image.location + editing_image = timing.source_image.location if timing.source_image is not None else "" elif stage == WorkflowStageType.STYLED.value: variants = timing.alternative_images_list - editing_image = timing.primary_image_location + editing_image = timing.primary_image_location if timing.primary_image_location is not None else "" width = int(project_settings.width) height = int(project_settings.height) From ae8a68575ca4a36f7559dfd6ca67f229dc24f174 Mon Sep 17 00:00:00 2001 From: peter942 Date: Fri, 13 Oct 2023 02:57:36 +0200 Subject: [PATCH 067/164] Logging view and more --- .../components/frame_styling_page.py | 93 ++----------------- ui_components/widgets/frame_selector.py | 21 +++-- ui_components/widgets/sidebar_logger.py | 86 +++++++++++++++++ 3 files changed, 110 insertions(+), 90 deletions(-) create mode 100644 ui_components/widgets/sidebar_logger.py diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 9734150d..a2aaef39 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -1,10 +1,6 @@ -from datetime import timedelta -import json -import time -import streamlit as st -from shared.constants import InferenceParamType, InferenceStatus, ViewType -from shared.utils import is_url_valid +import streamlit as st +from shared.constants import ViewType from ui_components.methods.common_methods import add_key_frame,compare_to_previous_and_next_frame,compare_to_source_frame, process_inference_output,style_cloning_element from ui_components.methods.ml_methods import trigger_restyling_process @@ -20,11 +16,10 @@ from ui_components.widgets.animation_style_element import animation_style_element from ui_components.widgets.inpainting_element import inpainting_element from ui_components.widgets.drawing_element import drawing_element +from ui_components.widgets.sidebar_logger import sidebar_logger from ui_components.widgets.list_view import list_view_set_up, page_toggle, styling_list_view,motion_list_view from utils import st_memory - -import math from ui_components.constants import CreativeProcessType, WorkflowStageType from utils.data_repo.data_repo import DataRepo @@ -236,87 +231,17 @@ def frame_styling_page(mainheader2, project_uuid: str): elif st.session_state['list_view_type'] == "Timeline View": - with st.sidebar: - with st.expander("🌀 Batch Styling", expanded=True): - styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.LIST.value) + if st.session_state['page'] == "Styling": + with st.sidebar: + with st.expander("🌀 Batch Styling", expanded=False): + styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.LIST.value) timeline_view(shift_frames_setting, project_uuid, "Styling", header_col_3, header_col_4) elif st.session_state['page'] == "Motion": timeline_view(shift_frames_setting, project_uuid, "Motion", header_col_3, header_col_4) - - # ------- change this ---------- - + with st.sidebar: with st.expander("🔍 Inference Logging", expanded=True): - - def display_sidebar_log_list(data_repo, project_uuid): - a1, _, a3 = st.columns([1, 0.2, 1]) - - log_list = data_repo.get_all_inference_log_list(project_uuid) - refresh_disabled = not any(log.status in [InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value] for log in log_list) - - if a1.button("Refresh log", disabled=refresh_disabled): st.rerun() - a3.button("Jump to full log view") - - b1, b2 = st.columns([1, 1]) - items_per_page = b2.slider("Items per page", min_value=1, max_value=20, value=5, step=1) - page_number = b1.number_input('Page number', min_value=1, max_value=math.ceil(len(log_list) / items_per_page), value=1, step=1) - - log_list = log_list[::-1][(page_number - 1) * items_per_page : page_number * items_per_page] - - st.markdown("---") - - for idx, log in enumerate(log_list): - - origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) - if not log.status or not origin_data: - continue - - output_url = None - output_data = json.loads(log.output_details) - if 'output' in output_data and output_data['output']: - output_url = output_data['output'][0] if isinstance(output_data['output'], list) else output_data['output'] - - c1, c2, c3 = st.columns([1, 1 if output_url else 0.01, 1]) - - with c1: - input_params = json.loads(log.input_params) - st.caption(f"Prompt:") - prompt = input_params.get('prompt', 'No prompt found') - st.write(f'"{prompt[:30]}..."' if len(prompt) > 30 else f'"{prompt}"') - st.caption(f"Model:") - st.write(json.loads(log.output_details)['model_name'].split('/')[-1]) - - with c2: - if output_url: - if output_url.endswith('png') or output_url.endswith('jpg') or output_url.endswith('jpeg') or output_url.endswith('gif'): - st.image(output_url) - elif output_url.endswith('mp4'): - st.video(output_url, format='mp4', start_time=0) - else: - st.info("No data to display") - - with c3: - if log.status == InferenceStatus.COMPLETED.value: - st.success("Completed") - elif log.status == InferenceStatus.FAILED.value: - st.warning("Failed") - elif log.status == InferenceStatus.QUEUED.value: - st.info("Queued") - elif log.status == InferenceStatus.IN_PROGRESS.value: - st.info("In progress") - elif log.status == InferenceStatus.CANCELED.value: - st.warning("Canceled") - - if output_url: - if st.button(f"Jump to frame {idx}"): - st.info("Fix this.") - # if st.button("Delete", key=f"delete_{log.uuid}"): - # data_repo.update_inference_log(log.uuid, status="") - # st.rerun() - - st.markdown("---") - - display_sidebar_log_list(data_repo, project_uuid) + sidebar_logger(data_repo, project_uuid) diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index 375fb9bc..7849bcbd 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -40,19 +40,28 @@ def frame_selector_widget(): - with st.expander("🖼️ Frame Details"): - image_1, image_2 = st.columns([1,1]) - with image_1: + with st.expander(f"🖼️ Frame #{st.session_state['current_frame_index']} Details"): + a1, a2 = st.columns([1,1]) + with a1: st.warning(f"Guidance Image:") display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value, clickable=False) - st.caption("Replace guidance image") - replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value) - with image_2: + + with a2: st.success(f"Main Styled Image:") display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) + + st.markdown("---") + + b1, b2 = st.columns([1,1]) + with b1: + st.caption("Replace guidance image") + replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value) + + with b2: st.caption("Replace styled image") replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value) + st.markdown("---") if st.button("Delete key frame"): delete_frame(st.session_state['current_frame_uuid']) diff --git a/ui_components/widgets/sidebar_logger.py b/ui_components/widgets/sidebar_logger.py new file mode 100644 index 00000000..155a0500 --- /dev/null +++ b/ui_components/widgets/sidebar_logger.py @@ -0,0 +1,86 @@ +import streamlit as st + +from shared.constants import InferenceParamType, InferenceStatus + +import json +import math + +def sidebar_logger(data_repo, project_uuid): + a1, _, a3 = st.columns([1, 0.2, 1]) + + log_list = data_repo.get_all_inference_log_list(project_uuid) + refresh_disabled = not any(log.status in [InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value] for log in log_list) + + if a1.button("Refresh log", disabled=refresh_disabled): st.rerun() + a3.button("Jump to full log view") + + # Add radio button for status selection + status_option = st.radio("Statuses to display:", options=["All", "In Progress", "Succeeded", "Failed"], key="status_option", index=0, horizontal=True) + + # Filter log_list based on selected status + if status_option == "In Progress": + log_list = [log for log in log_list if log.status in [InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value]] + elif status_option == "Succeeded": + log_list = [log for log in log_list if log.status == InferenceStatus.COMPLETED.value] + elif status_option == "Failed": + log_list = [log for log in log_list if log.status == InferenceStatus.FAILED.value] + + b1, b2 = st.columns([1, 1]) + items_per_page = b2.slider("Items per page", min_value=1, max_value=20, value=5, step=1) + page_number = b1.number_input('Page number', min_value=1, max_value=math.ceil(len(log_list) / items_per_page), value=1, step=1) + + log_list = log_list[::-1][(page_number - 1) * items_per_page : page_number * items_per_page] + + st.markdown("---") + + for idx, log in enumerate(log_list): + + origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) + if not log.status or not origin_data: + continue + + output_url = None + output_data = json.loads(log.output_details) + if 'output' in output_data and output_data['output']: + output_url = output_data['output'][0] if isinstance(output_data['output'], list) else output_data['output'] + + c1, c2, c3 = st.columns([1, 1 if output_url else 0.01, 1]) + + with c1: + input_params = json.loads(log.input_params) + st.caption(f"Prompt:") + prompt = input_params.get('prompt', 'No prompt found') + st.write(f'"{prompt[:30]}..."' if len(prompt) > 30 else f'"{prompt}"') + st.caption(f"Model:") + st.write(json.loads(log.output_details)['model_name'].split('/')[-1]) + + with c2: + if output_url: + if output_url.endswith('png') or output_url.endswith('jpg') or output_url.endswith('jpeg') or output_url.endswith('gif'): + st.image(output_url) + elif output_url.endswith('mp4'): + st.video(output_url, format='mp4', start_time=0) + else: + st.info("No data to display") + + with c3: + if log.status == InferenceStatus.COMPLETED.value: + st.success("Completed") + elif log.status == InferenceStatus.FAILED.value: + st.warning("Failed") + elif log.status == InferenceStatus.QUEUED.value: + st.info("Queued") + elif log.status == InferenceStatus.IN_PROGRESS.value: + st.info("In progress") + elif log.status == InferenceStatus.CANCELED.value: + st.warning("Canceled") + + if output_url: + if st.button(f"Jump to frame {idx}"): + st.info("Fix this.") + + # if st.button("Delete", key=f"delete_{log.uuid}"): + # data_repo.update_inference_log(log.uuid, status="") + # st.rerun() + + st.markdown("---") \ No newline at end of file From 3712b720be2f7b9136f9e10d7addafea869a0d99 Mon Sep 17 00:00:00 2001 From: peter942 Date: Fri, 13 Oct 2023 20:57:40 +0200 Subject: [PATCH 068/164] Mood Board view --- .../components/frame_styling_page.py | 16 ++- ui_components/components/mood_board_page.py | 126 ++++++++++++++++++ ui_components/setup.py | 6 +- .../widgets/variant_comparison_element.py | 2 +- .../widgets/variant_comparison_grid.py | 36 +++++ 5 files changed, 181 insertions(+), 5 deletions(-) create mode 100644 ui_components/components/mood_board_page.py create mode 100644 ui_components/widgets/variant_comparison_grid.py diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index a2aaef39..8e6035f8 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -2,7 +2,7 @@ import streamlit as st from shared.constants import ViewType -from ui_components.methods.common_methods import add_key_frame,compare_to_previous_and_next_frame,compare_to_source_frame, process_inference_output,style_cloning_element +from ui_components.methods.common_methods import add_key_frame,compare_to_previous_and_next_frame,compare_to_source_frame, process_inference_output,style_cloning_element, promote_image_variant from ui_components.methods.ml_methods import trigger_restyling_process from ui_components.widgets.cropping_element import cropping_selector_element from ui_components.widgets.frame_clip_generation_elements import current_preview_video_element, update_animation_style_element @@ -17,9 +17,12 @@ from ui_components.widgets.inpainting_element import inpainting_element from ui_components.widgets.drawing_element import drawing_element from ui_components.widgets.sidebar_logger import sidebar_logger +from ui_components.widgets.variant_comparison_grid import variant_comparison_grid from ui_components.widgets.list_view import list_view_set_up, page_toggle, styling_list_view,motion_list_view from utils import st_memory +import time + from ui_components.constants import CreativeProcessType, WorkflowStageType from utils.data_repo.data_repo import DataRepo @@ -95,13 +98,19 @@ def frame_styling_page(mainheader2, project_uuid: str): elif st.session_state['page'] == CreativeProcessType.STYLING.value: # carousal_of_images_element(project_uuid, stage=WorkflowStageType.STYLED.value) comparison_values = [ - "Other Variants", "Source Frame", "Previous & Next Frame", "None"] + "Single Variants", "All Other Variants","Source Frame", "Previous & Next Frame", "None"] st.session_state['show_comparison'] = st_memory.radio("Show comparison to:", options=comparison_values, horizontal=True, key="show_comparison_radio") - if st.session_state['show_comparison'] == "Other Variants": + if st.session_state['show_comparison'] == "Single Variants": variant_comparison_element(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) + + elif st.session_state['show_comparison'] == "All Other Variants": + + + + variant_comparison_grid(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) elif st.session_state['show_comparison'] == "Source Frame": compare_to_source_frame(timing_details) @@ -242,6 +251,7 @@ def frame_styling_page(mainheader2, project_uuid: str): timeline_view(shift_frames_setting, project_uuid, "Motion", header_col_3, header_col_4) with st.sidebar: + with st.expander("🔍 Inference Logging", expanded=True): sidebar_logger(data_repo, project_uuid) diff --git a/ui_components/components/mood_board_page.py b/ui_components/components/mood_board_page.py new file mode 100644 index 00000000..19905967 --- /dev/null +++ b/ui_components/components/mood_board_page.py @@ -0,0 +1,126 @@ +import streamlit as st +from utils.data_repo.data_repo import DataRepo +from shared.constants import AIModelType + + +def mood_board_page(project_uuid): + + def get_varied_text(styling_instructions="", character_instructions="", action_instructions="", scene_instructions=""): + text_list = [] + + if styling_instructions: + system_instructions = "PLACEHOLDER_STYLING" + # result = query_model(styling_instructions, system_instructions) + result = "Styling instructions" + text_list.append(result) + + if character_instructions: + system_instructions = "PLACEHOLDER_CHARACTER" + # result = query_model(character_instructions, system_instructions) + result = "Character instructions" + text_list.append(result) + + if action_instructions: + system_instructions = "PLACEHOLDER_ACTION" + # result = query_model(action_instructions, system_instructions) + result = "Action instructions" + text_list.append(result) + + if scene_instructions: + system_instructions = "PLACEHOLDER_SCENE" + # result = query_model(scene_instructions, system_instructions) + result = "Scene instructions" + text_list.append(result) + + return ", ".join(text_list) + + data_repo = DataRepo() + st.subheader("Mood Board") + a1, a2, a3 = st.columns([0.5, 1, 0.5]) + with a2: + prompt = st.text_area("What's your prompt?", key="prompt") + + + b1, b2, b3, b4 = st.columns([1, 1, 1, 1]) + with b1: + variate_styling = st.checkbox("Variate styling", key="variate_styling") + if variate_styling: + styling_instructions = st.text_area("How would you like to variate styling?", key="variate_styling_textarea") + else: + styling_instructions = "" + + with b2: + variate_character = st.checkbox("Variate character", key="variate_character") + if variate_character: + character_instructions = st.text_area("How would you like to variate character?", key="variate_character_textarea") + else: + character_instructions = "" + + with b3: + variate_action = st.checkbox("Variate action", key="variate_action") + if variate_action: + action_instructions = st.text_area("How would you like to variate action?", key="variate_action_textarea") + else: + action_instructions = "" + + with b4: + variate_scene = st.checkbox("Variate scene", key="variate_scene") + if variate_scene: + scene_instructions = st.text_area("How would you like to variate the scene?", key="variate_scene_textarea") + else: + scene_instructions = "" + + model_list = data_repo.get_all_ai_model_list(model_type_list=[AIModelType.TXT2IMG.value], custom_trained=False) + model_name_list = list(set([m.name for m in model_list])) + + c1, c2, c3 = st.columns([0.25, 1, 0.25]) + with c2: + models_to_use = st.multiselect("Which models would you like to use?", model_name_list, key="models_to_use", default=model_name_list) + + d1, d2, d3 = st.columns([0.5, 1, 0.5]) + with d2: + number_to_generate = st.slider("How many images would you like to generate?", min_value=1, max_value=100, value=10, step=1, key="number_to_generate") + + if st.button("Generate images", key="generate_images", use_container_width=True, type="primary"): + st.info("Generating images...") + counter = 0 + varied_text = "" + for _ in range(number_to_generate): + for model_name in models_to_use: + if counter % 4 == 0 and (styling_instructions or character_instructions or action_instructions or scene_instructions): + varied_text = get_varied_text(styling_instructions, character_instructions, action_instructions, scene_instructions) + prompt_with_variations = f"{prompt}, {varied_text}" if prompt else varied_text + st.write(f"Prompt: '{prompt_with_variations}'") + st.write(f"Model: {model_name}") + counter += 1 + + timing = data_repo.get_timing_from_uuid("c414f700-680b-4712-a9c5-22c9935d7855") + + variants = timing.alternative_images_list + + st.markdown("***") + num_columns = st.slider('Number of columns', min_value=1, max_value=10, value=4) + + num_items_per_page = 30 + num_pages = len(variants) // num_items_per_page + if len(variants) % num_items_per_page > 0: + num_pages += 1 # Add extra page if there are remaining items + + page_number = st.radio("Select page", options=range(1, num_pages + 1)) + + start_index = (page_number - 1) * num_items_per_page + end_index = start_index + num_items_per_page + + for i in range(start_index, min(end_index, len(variants)), num_columns): + cols = st.columns(num_columns) + for j in range(num_columns): + if i + j < len(variants): + with cols[j]: + st.image(variants[i + j].location, use_column_width=True) + with st.expander(f'Variant #{i + j + 1}', False): + st.info("Instructions: PLACEHOLDER") + + if st.button(f"Add to timeline", key=f"Promote Variant #{i + j + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): + promote_image_variant(timing.uuid, i + j) + st.rerun() + st.markdown("***") \ No newline at end of file diff --git a/ui_components/setup.py b/ui_components/setup.py index 6d9e99a9..5656cb19 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -11,6 +11,7 @@ from ui_components.components.new_project_page import new_project_page from ui_components.components.project_settings_page import project_settings_page from ui_components.components.video_rendering_page import video_rendering_page +from ui_components.components.mood_board_page import mood_board_page from streamlit_option_menu import option_menu from ui_components.constants import CreativeProcessType from ui_components.models import InternalAppSettingObject @@ -184,7 +185,7 @@ def on_change_view_type(key): elif st.session_state["main_view_type"] == "Tools & Settings": with st.sidebar: - tool_pages = ["Query Logger", "Custom Models", "Project Settings"] + tool_pages = ["Query Logger", "Mood Board", "Custom Models", "Project Settings"] if st.session_state["page"] not in tool_pages: st.session_state["page"] = tool_pages[0] @@ -198,6 +199,9 @@ def on_change_view_type(key): custom_models_page(st.session_state["project_uuid"]) elif st.session_state["page"] == "Project Settings": project_settings_page(st.session_state["project_uuid"]) + elif st.session_state["page"] == "Mood Board": + mood_board_page(st.session_state["project_uuid"]) + elif st.session_state["main_view_type"] == "Video Rendering": diff --git a/ui_components/widgets/variant_comparison_element.py b/ui_components/widgets/variant_comparison_element.py index 40af5807..2b51224f 100644 --- a/ui_components/widgets/variant_comparison_element.py +++ b/ui_components/widgets/variant_comparison_element.py @@ -4,7 +4,7 @@ import uuid from typing import List from ui_components.constants import CreativeProcessType -from ui_components.methods.common_methods import promote_image_variant, promote_video_variant +from ui_components.methods.common_methods import promote_image_variant from utils.data_repo.data_repo import DataRepo diff --git a/ui_components/widgets/variant_comparison_grid.py b/ui_components/widgets/variant_comparison_grid.py new file mode 100644 index 00000000..f88fa9f5 --- /dev/null +++ b/ui_components/widgets/variant_comparison_grid.py @@ -0,0 +1,36 @@ +import streamlit as st +from ui_components.constants import CreativeProcessType +from ui_components.methods.common_methods import promote_image_variant +from utils.data_repo.data_repo import DataRepo + + +def variant_comparison_grid(timing_uuid, stage=CreativeProcessType.MOTION.value): + data_repo = DataRepo() + + timing = data_repo.get_timing_from_uuid(timing_uuid) + variants = timing.alternative_images_list + + + + current_variant = timing.primary_interpolated_video_index if stage == CreativeProcessType.MOTION.value else int( + timing.primary_variant_index) + + num_columns = st.slider('Number of columns', min_value=1, max_value=10, value=4) + + for i in range(0, len(variants), num_columns): + cols = st.columns(num_columns) + for j in range(num_columns): + if i + j < len(variants): + with cols[j]: + if stage == CreativeProcessType.MOTION.value: + st.video(variants[i + j].location, format='mp4', start_time=0) if variants[i + j] else st.error("No video present") + else: + st.image(variants[i + j].location, use_column_width=True) + + if i + j == current_variant: + st.success("**Main variant**") + else: + if st.button(f"Promote Variant #{i + j + 1}", key=f"Promote Variant #{i + j + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): + + promote_image_variant(timing.uuid, i + j) + st.rerun() \ No newline at end of file From 2647416e00bab74276238f5d8bdfeb7ff5752205 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 14 Oct 2023 12:51:09 +0530 Subject: [PATCH 069/164] wip: auto db update --- app.py | 1 + backend/db_repo.py | 1 + .../migrations/0010_project_metadata_added.py | 18 + backend/models.py | 1 + backend/serializers/dto.py | 2 +- banodoco_runner.py | 32 +- shared/constants.py | 3 + .../components/frame_styling_page.py | 7 +- ui_components/methods/common_methods.py | 523 +----------------- ui_components/models.py | 8 +- ui_components/setup.py | 17 +- .../widgets/add_key_frame_element.py | 58 +- ui_components/widgets/cropping_element.py | 6 +- .../widgets/frame_movement_widgets.py | 176 ++++++ ui_components/widgets/frame_selector.py | 2 +- .../widgets/frame_style_clone_element.py | 41 ++ ui_components/widgets/image_zoom_widgets.py | 105 ++++ ui_components/widgets/inpainting_element.py | 105 +++- ui_components/widgets/list_view.py | 3 +- ui_components/widgets/timeline_view.py | 3 +- .../widgets/variant_comparison_element.py | 58 +- utils/common_utils.py | 5 + utils/constants.py | 2 +- utils/data_repo/data_repo.py | 3 - 24 files changed, 632 insertions(+), 548 deletions(-) create mode 100644 backend/migrations/0010_project_metadata_added.py create mode 100644 ui_components/widgets/frame_movement_widgets.py create mode 100644 ui_components/widgets/frame_style_clone_element.py create mode 100644 ui_components/widgets/image_zoom_widgets.py diff --git a/app.py b/app.py index 2032bbb4..45473d6c 100644 --- a/app.py +++ b/app.py @@ -49,6 +49,7 @@ def start_runner(): if not is_process_active(RUNNER_PROCESS_NAME): app_logger.info("Starting runner") + # _ = subprocess.Popen(["streamlit", "run", "banodoco_runner.py", "--runner.fastReruns", "false", "--server.port", "5502", "--server.headless", "true"]) _ = subprocess.Popen(["python", "banodoco_runner.py"]) while not is_process_active(RUNNER_PROCESS_NAME): time.sleep(0.1) diff --git a/backend/db_repo.py b/backend/db_repo.py index 8eb6e945..a21e62f7 100644 --- a/backend/db_repo.py +++ b/backend/db_repo.py @@ -27,6 +27,7 @@ logger = AppLogger() +# @measure_execution_time class DBRepo: _instance = None _count = 0 diff --git a/backend/migrations/0010_project_metadata_added.py b/backend/migrations/0010_project_metadata_added.py new file mode 100644 index 00000000..ed1a24ae --- /dev/null +++ b/backend/migrations/0010_project_metadata_added.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.1 on 2023-10-14 01:55 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('backend', '0009_log_status_added'), + ] + + operations = [ + migrations.AddField( + model_name='project', + name='meta_data', + field=models.TextField(default=None, null=True), + ), + ] diff --git a/backend/models.py b/backend/models.py index 6fdaf097..6083c00e 100644 --- a/backend/models.py +++ b/backend/models.py @@ -40,6 +40,7 @@ class Project(BaseModel): name = models.CharField(max_length=255, default="") user = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True) temp_file_list = models.TextField(default=None, null=True) + meta_data = models.TextField(default=None, null=True) class Meta: app_label = 'backend' diff --git a/backend/serializers/dto.py b/backend/serializers/dto.py index f9f78630..871917b7 100644 --- a/backend/serializers/dto.py +++ b/backend/serializers/dto.py @@ -19,7 +19,7 @@ class ProjectDto(serializers.ModelSerializer): user_uuid = serializers.SerializerMethodField() class Meta: model = Project - fields = ('uuid', 'name', 'user_uuid', 'created_on', 'temp_file_list') + fields = ('uuid', 'name', 'user_uuid', 'created_on', 'temp_file_list', 'meta_data') def get_user_uuid(self, obj): return obj.user.uuid diff --git a/banodoco_runner.py b/banodoco_runner.py index ed727a3c..11e7fca8 100644 --- a/banodoco_runner.py +++ b/banodoco_runner.py @@ -5,9 +5,10 @@ import setproctitle from dotenv import load_dotenv import django -from shared.constants import InferenceParamType, InferenceStatus +from shared.constants import InferenceParamType, InferenceStatus, ProjectMetaData from shared.logging.constants import LoggingType from shared.logging.logging import AppLogger +from utils.data_repo.data_repo import DataRepo from utils.ml_processor.replicate.constants import replicate_status_map from utils.constants import RUNNER_PROCESS_NAME @@ -41,6 +42,12 @@ def main(): time.sleep(REFRESH_FREQUENCY) check_and_update_db() + # test_data_repo() + +def test_data_repo(): + data_repo = DataRepo() + app_settings = data_repo.get_app_setting_from_uuid() + print(app_settings.replicate_username) def is_app_running(): url = 'http://localhost:5500/healthz' @@ -65,6 +72,7 @@ def check_and_update_db(): log_list = InferenceLog.objects.filter(status__in=[InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value], is_disabled=False).all() + timing_update_list = {} # {project_id: [timing_uuids]} for log in log_list: input_params = json.loads(log.input_params) replicate_data = input_params.get(InferenceParamType.REPLICATE_INFERENCE.value, None) @@ -88,12 +96,34 @@ def check_and_update_db(): isinstance(result['output'], str)) else [result['output'][-1]] InferenceLog.objects.filter(id=log.id).update(status=log_status, output_details=json.dumps(output_details)) + origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) + if origin_data and log_status == InferenceStatus.COMPLETED.value: + from ui_components.methods.common_methods import process_inference_output + + origin_data['output'] = output_details['output'] + origin_data['log_uuid'] = log.uuid + print("processing inference output") + process_inference_output(**origin_data) + if str(log.project.uuid) not in timing_update_list: + timing_update_list[str(log.project.uuid)] = [] + + timing_update_list[str(log.project.uuid)].append(origin_data['timing_uuid']) + else: app_logger.log(LoggingType.DEBUG, f"Error: {response.content}") else: # if not replicate data is present then removing the status InferenceLog.objects.filter(id=log.id).update(status="") + # adding update_data in the project + from backend.models import Project + from django.db import transaction + + for project_uuid, val in timing_update_list.items(): + with transaction.atomic(): + val = list(set(val)) + _ = Project.objects.filter(uuid=project_uuid).update(meta_data=json.dumps({ProjectMetaData.DATA_UPDATE.value: val})) + if not len(log_list): # app_logger.log(LoggingType.DEBUG, f"No logs found") pass diff --git a/shared/constants.py b/shared/constants.py index ec81cac9..4adae1e1 100644 --- a/shared/constants.py +++ b/shared/constants.py @@ -91,6 +91,9 @@ class InferenceParamType(ExtendedEnum): QUERY_DICT = "query_dict" # query dict of standardized inference params ORIGIN_DATA = "origin_data" # origin data - used to store file once inference is completed +class ProjectMetaData(ExtendedEnum): + DATA_UPDATE = "data_update" # info regarding cache/data update when runner updates the db + ##################### global constants ##################### SERVER = os.getenv('SERVER', ServerType.PRODUCTION.value) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 4fb7eec4..e6bbc03d 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -6,17 +6,18 @@ from shared.utils import is_url_valid -from ui_components.methods.common_methods import add_key_frame,compare_to_previous_and_next_frame,compare_to_source_frame, process_inference_output,style_cloning_element +from ui_components.methods.common_methods import process_inference_output from ui_components.methods.ml_methods import trigger_restyling_process from ui_components.widgets.cropping_element import cropping_selector_element from ui_components.widgets.frame_clip_generation_elements import current_preview_video_element, update_animation_style_element from ui_components.widgets.frame_selector import frame_selector_widget +from ui_components.widgets.frame_style_clone_element import style_cloning_element from ui_components.widgets.image_carousal import display_image from ui_components.widgets.prompt_finder import prompt_finder_element -from ui_components.widgets.add_key_frame_element import add_key_frame_element +from ui_components.widgets.add_key_frame_element import add_key_frame, add_key_frame_element from ui_components.widgets.styling_element import styling_element from ui_components.widgets.timeline_view import timeline_view -from ui_components.widgets.variant_comparison_element import variant_comparison_element +from ui_components.widgets.variant_comparison_element import compare_to_previous_and_next_frame, compare_to_source_frame, variant_comparison_element from ui_components.widgets.animation_style_element import animation_style_element from ui_components.widgets.inpainting_element import inpainting_element from ui_components.widgets.drawing_element import drawing_element diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index 4dd8f660..cfa36a55 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -1,9 +1,7 @@ import io from typing import List -import streamlit as st import os from PIL import Image, ImageDraw, ImageOps, ImageFilter -from datetime import datetime from moviepy.editor import * import cv2 import requests as r @@ -14,171 +12,21 @@ from io import BytesIO import numpy as np import urllib3 -from shared.constants import SERVER, AIModelCategory, AIModelType, InferenceType, InternalFileType, ServerType +from shared.constants import SERVER, InferenceType, InternalFileType, ServerType from pydub import AudioSegment from backend.models import InternalFileObject -from ui_components.constants import CROPPED_IMG_LOCAL_PATH, MASK_IMG_LOCAL_PATH, SECOND_MASK_FILE, SECOND_MASK_FILE_PATH, TEMP_MASK_FILE, CreativeProcessType, WorkflowStageType +from ui_components.constants import SECOND_MASK_FILE, SECOND_MASK_FILE_PATH, WorkflowStageType from ui_components.methods.file_methods import add_temp_file_to_project, convert_bytes_to_file, generate_pil_image, generate_temp_file, save_or_host_file, save_or_host_file_bytes -from ui_components.methods.video_methods import calculate_desired_duration_of_individual_clip, create_or_get_single_preview_video, update_speed_of_video_clip +from ui_components.methods.video_methods import calculate_desired_duration_of_individual_clip, update_speed_of_video_clip from ui_components.models import InternalAIModelObject, InternalFrameTimingObject, InternalSettingObject -from utils.common_utils import reset_styling_settings -from utils.constants import ImageStage from utils.data_repo.data_repo import DataRepo from shared.constants import AnimationStyleType -from ui_components.widgets.image_carousal import display_image -from streamlit_image_comparison import image_comparison - from ui_components.models import InternalFileObject -from datetime import datetime from typing import Union -def compare_to_source_frame(timing_details): - if timing_details[st.session_state['current_frame_index']- 1].primary_image: - img2 = timing_details[st.session_state['current_frame_index'] - 1].primary_image_location - else: - img2 = 'https://i.ibb.co/GHVfjP0/Image-Not-Yet-Created.png' - - img1 = timing_details[st.session_state['current_frame_index'] - 1].source_image.location if timing_details[st.session_state['current_frame_index'] - 1].source_image else 'https://i.ibb.co/GHVfjP0/Image-Not-Yet-Created.png' - - image_comparison(starting_position=50, - img1=img1, - img2=img2, make_responsive=False, label1=WorkflowStageType.SOURCE.value, label2=WorkflowStageType.STYLED.value) - from utils.media_processor.video import VideoProcessor - - -def compare_to_previous_and_next_frame(project_uuid, timing_details): - data_repo = DataRepo() - mainimages1, mainimages2, mainimages3 = st.columns([1, 1, 1]) - - with mainimages1: - if st.session_state['current_frame_index'] - 2 >= 0: - previous_image = data_repo.get_timing_from_frame_number(project_uuid, frame_number=st.session_state['current_frame_index'] - 2) - st.info(f"Previous image:") - display_image( - timing_uuid=previous_image.uuid, stage=WorkflowStageType.STYLED.value, clickable=False) - - if st.button(f"Preview Interpolation From #{st.session_state['current_frame_index']-1} to #{st.session_state['current_frame_index']}", key=f"Preview Interpolation From #{st.session_state['current_frame_index']-1} to #{st.session_state['current_frame_index']}", use_container_width=True): - prev_frame_timing = data_repo.get_prev_timing(st.session_state['current_frame_uuid']) - create_or_get_single_preview_video(prev_frame_timing.uuid) - prev_frame_timing = data_repo.get_timing_from_uuid(prev_frame_timing.uuid) - if prev_frame_timing.preview_video: - st.video(prev_frame_timing.preview_video.location) - - with mainimages2: - st.success(f"Current image:") - display_image( - timing_uuid=st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) - - with mainimages3: - if st.session_state['current_frame_index'] + 1 <= len(timing_details): - next_image = data_repo.get_timing_from_frame_number(project_uuid, frame_number=st.session_state['current_frame_index']) - st.info(f"Next image") - display_image(timing_uuid=next_image.uuid, stage=WorkflowStageType.STYLED.value, clickable=False) - - if st.button(f"Preview Interpolation From #{st.session_state['current_frame_index']} to #{st.session_state['current_frame_index']+1}", key=f"Preview Interpolation From #{st.session_state['current_frame_index']} to #{st.session_state['current_frame_index']+1}", use_container_width=True): - create_or_get_single_preview_video(st.session_state['current_frame_uuid']) - current_frame = data_repo.get_timing_from_uuid(st.session_state['current_frame_uuid']) - st.video(current_frame.timed_clip.location) - - - -def style_cloning_element(timing_details): - open_copier = st.checkbox("Copy styling settings from another frame") - if open_copier is True: - copy1, copy2 = st.columns([1, 1]) - with copy1: - frame_index = st.number_input("Which frame would you like to copy styling settings from?", min_value=1, max_value=len( - timing_details), value=st.session_state['current_frame_index'], step=1) - if st.button("Copy styling settings from this frame"): - clone_styling_settings(frame_index - 1, st.session_state['current_frame_uuid']) - reset_styling_settings(st.session_state['current_frame_uuid']) - st.rerun() - - with copy2: - display_image(timing_details[frame_index - 1].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) - - if timing_details[frame_index - 1].primary_image.inference_params: - st.text("Prompt: ") - st.caption(timing_details[frame_index - 1].primary_image.inference_params.prompt) - st.text("Negative Prompt: ") - st.caption(timing_details[frame_index - 1].primary_image.inference_params.negative_prompt) - - if timing_details[frame_index - 1].primary_image.inference_params.model_uuid: - data_repo = DataRepo() - model: InternalAIModelObject = data_repo.get_ai_model_from_uuid(timing_details[frame_index - 1].primary_image.inference_params.model_uuid) - - st.text("Model:") - st.caption(model.name) - - if model.category.lower() == AIModelCategory.CONTROLNET.value: - st.text("Adapter Type:") - st.caption(timing_details[frame_index - 1].primary_image.inference_params.adapter_type) - -def jump_to_single_frame_view_button(display_number, timing_details): - if st.button(f"Jump to #{display_number}"): - st.session_state['prev_frame_index'] = display_number - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid - st.session_state['frame_styling_view_type'] = "Individual View" - st.session_state['change_view_type'] = True - st.rerun() - - -def add_key_frame(selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image): - data_repo = DataRepo() - project_uuid = st.session_state['project_uuid'] - timing_details = data_repo.get_timing_list_from_project(project_uuid) - project_settings = data_repo.get_project_setting(project_uuid) - - - if len(timing_details) == 0: - index_of_current_item = 1 - else: - index_of_current_item = min(len(timing_details), st.session_state['current_frame_index']) - - timing_details = data_repo.get_timing_list_from_project(project_uuid) - - if len(timing_details) == 0: - key_frame_time = 0.0 - elif index_of_current_item == len(timing_details): - key_frame_time = float(timing_details[index_of_current_item - 1].frame_time) + how_long_after - else: - key_frame_time = (float(timing_details[index_of_current_item - 1].frame_time) + float( - timing_details[index_of_current_item].frame_time)) / 2.0 - - if len(timing_details) == 0: - new_timing = create_timings_row_at_frame_number(project_uuid, 0) - else: - new_timing = create_timings_row_at_frame_number(project_uuid, index_of_current_item, frame_time=key_frame_time) - - clip_duration = calculate_desired_duration_of_individual_clip(new_timing.uuid) - data_repo.update_specific_timing(new_timing.uuid, clip_duration=clip_duration) - - timing_details = data_repo.get_timing_list_from_project(project_uuid) - if selected_image: - save_uploaded_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, "source") - save_uploaded_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, "styled") - - if inherit_styling_settings == "Yes": - clone_styling_settings(index_of_current_item - 1, timing_details[index_of_current_item].uuid) - - timing_details[index_of_current_item].animation_style = project_settings.default_animation_style - - if len(timing_details) == 1: - st.session_state['current_frame_index'] = 1 - st.session_state['current_frame_uuid'] = timing_details[0].uuid - else: - st.session_state['prev_frame_index'] = min(len(timing_details), st.session_state['current_frame_index']+1) - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index']].uuid - - st.session_state['page'] = CreativeProcessType.STYLING.value - st.session_state['section_index'] = 0 - st.rerun() - - -# TODO: work with source_frame_uuid, instead of source_frame_number def clone_styling_settings(source_frame_number, target_frame_uuid): data_repo = DataRepo() target_timing = data_repo.get_timing_from_uuid(target_frame_uuid) @@ -372,213 +220,17 @@ def apply_image_transformations(image, zoom_level, rotation_angle, x_shift, y_sh return cropped_image -def fetch_image_by_stage(project_uuid, stage): +def fetch_image_by_stage(project_uuid, stage, frame_idx): data_repo = DataRepo() timing_details = data_repo.get_timing_list_from_project(project_uuid) if stage == WorkflowStageType.SOURCE.value: - return timing_details[st.session_state['current_frame_index'] - 1].source_image + return timing_details[frame_idx].source_image elif stage == WorkflowStageType.STYLED.value: - return timing_details[st.session_state['current_frame_index'] - 1].primary_image + return timing_details[frame_idx].primary_image else: return None -def zoom_inputs(position='in-frame', horizontal=False): - if horizontal: - col1, col2, col3, col4 = st.columns(4) - else: - col1 = col2 = col3 = col4 = st - - zoom_level_input = col1.number_input( - "Zoom Level (%)", min_value=10, max_value=1000, step=10, key=f"zoom_level_input_key_{position}", value=st.session_state.get('zoom_level_input', 100)) - - rotation_angle_input = col2.number_input( - "Rotation Angle", min_value=-360, max_value=360, step=5, key=f"rotation_angle_input_key_{position}", value=st.session_state.get('rotation_angle_input', 0)) - - x_shift = col3.number_input( - "Shift Left/Right", min_value=-1000, max_value=1000, step=5, key=f"x_shift_key_{position}", value=st.session_state.get('x_shift', 0)) - - y_shift = col4.number_input( - "Shift Up/Down", min_value=-1000, max_value=1000, step=5, key=f"y_shift_key_{position}", value=st.session_state.get('y_shift', 0)) - - # Assign values to st.session_state - st.session_state['zoom_level_input'] = zoom_level_input - st.session_state['rotation_angle_input'] = rotation_angle_input - st.session_state['x_shift'] = x_shift - st.session_state['y_shift'] = y_shift - -def save_zoomed_image(image, timing_uuid, stage, promote=False): - data_repo = DataRepo() - timing = data_repo.get_timing_from_uuid(timing_uuid) - project_uuid = timing.project.uuid - - file_name = str(uuid.uuid4()) + ".png" - - if stage == WorkflowStageType.SOURCE.value: - save_location = f"videos/{project_uuid}/assets/frames/1_selected/{file_name}" - hosted_url = save_or_host_file(image, save_location) - file_data = { - "name": file_name, - "type": InternalFileType.IMAGE.value, - "project_id": project_uuid - } - - if hosted_url: - file_data.update({'hosted_url': hosted_url}) - else: - file_data.update({'local_path': save_location}) - - source_image: InternalFileObject = data_repo.create_file(**file_data) - data_repo.update_specific_timing( - st.session_state['current_frame_uuid'], source_image_id=source_image.uuid) - elif stage == WorkflowStageType.STYLED.value: - save_location = f"videos/{project_uuid}/assets/frames/2_character_pipeline_completed/{file_name}" - hosted_url = save_or_host_file(image, save_location) - file_data = { - "name": file_name, - "type": InternalFileType.IMAGE.value, - "project_id": project_uuid - } - - if hosted_url: - file_data.update({'hosted_url': hosted_url}) - else: - file_data.update({'local_path': save_location}) - - styled_image: InternalFileObject = data_repo.create_file(**file_data) - - number_of_image_variants = add_image_variant( - styled_image.uuid, timing_uuid) - if promote: - promote_image_variant(timing_uuid, number_of_image_variants - 1) - - project_update_data = { - "zoom_level": st.session_state['zoom_level_input'], - "rotation_angle_value": st.session_state['rotation_angle_input'], - "x_shift": st.session_state['x_shift'], - "y_shift": st.session_state['y_shift'] - } - - data_repo.update_project_setting(project_uuid, **project_update_data) - - # TODO: **CORRECT-CODE - make a proper column for zoom details - timing_update_data = { - "zoom_details": f"{st.session_state['zoom_level_input']},{st.session_state['rotation_angle_input']},{st.session_state['x_shift']},{st.session_state['y_shift']}", - - } - data_repo.update_specific_timing(timing_uuid, **timing_update_data) - -def reset_zoom_element(): - st.session_state['zoom_level_input_key'] = 100 - st.session_state['rotation_angle_input_key'] = 0 - st.session_state['x_shift_key'] = 0 - st.session_state['y_shift_key'] = 0 - st.session_state['zoom_level_input'] = 100 - st.session_state['rotation_angle_input'] = 0 - st.session_state['x_shift'] = 0 - st.session_state['y_shift'] = 0 - st.rerun() - - - -# cropped_img here is a PIL image object -def inpaint_in_black_space_element(cropped_img, project_uuid, stage=WorkflowStageType.SOURCE.value): - from ui_components.methods.ml_methods import inpainting - - data_repo = DataRepo() - project_settings: InternalSettingObject = data_repo.get_project_setting( - project_uuid) - - st.markdown("##### Inpaint in black space:") - - inpaint_prompt = st.text_area( - "Prompt", value=project_settings.default_prompt) - inpaint_negative_prompt = st.text_input( - "Negative Prompt", value='edge,branches, frame, fractals, text' + project_settings.default_negative_prompt) - if 'precision_cropping_inpainted_image_uuid' not in st.session_state: - st.session_state['precision_cropping_inpainted_image_uuid'] = "" - - if st.button("Inpaint"): - width = int(project_settings.width) - height = int(project_settings.height) - - saved_cropped_img = cropped_img.resize( - (width, height), Image.ANTIALIAS) - - hosted_cropped_img_path = save_or_host_file(saved_cropped_img, CROPPED_IMG_LOCAL_PATH) - - # Convert image to grayscale - # Create a new image with the same size as the cropped image - mask = Image.new('RGB', cropped_img.size) - - # Get the width and height of the image - width, height = cropped_img.size - - for x in range(width): - for y in range(height): - # Get the RGB values of the pixel - pixel = cropped_img.getpixel((x, y)) - - # If the image is RGB, unpack the pixel into r, g, and b - if cropped_img.mode == 'RGB': - r, g, b = pixel - # If the image is RGBA, unpack the pixel into r, g, b, and a - elif cropped_img.mode == 'RGBA': - r, g, b, a = pixel - # If the image is grayscale ('L' for luminosity), there's only one channel - elif cropped_img.mode == 'L': - brightness = pixel - else: - raise ValueError( - f'Unsupported image mode: {cropped_img.mode}') - - # If the pixel is black, set it and its adjacent pixels to black in the new image - if r == 0 and g == 0 and b == 0: - mask.putpixel((x, y), (0, 0, 0)) # Black - # Adjust these values to change the range of adjacent pixels - for i in range(-2, 3): - for j in range(-2, 3): - # Check that the pixel is within the image boundaries - if 0 <= x + i < width and 0 <= y + j < height: - mask.putpixel((x + i, y + j), - (0, 0, 0)) # Black - # Otherwise, make the pixel white in the new image - else: - mask.putpixel((x, y), (255, 255, 255)) # White - # Save the mask image - hosted_url = save_or_host_file(mask, MASK_IMG_LOCAL_PATH) - if hosted_url: - add_temp_file_to_project(project_uuid, TEMP_MASK_FILE, hosted_url) - - cropped_img_path = hosted_cropped_img_path if hosted_cropped_img_path else CROPPED_IMG_LOCAL_PATH - inpainted_file = inpainting(cropped_img_path, inpaint_prompt, - inpaint_negative_prompt, st.session_state['current_frame_uuid'], True, pass_mask=True) - - st.session_state['precision_cropping_inpainted_image_uuid'] = inpainted_file.uuid - - if st.session_state['precision_cropping_inpainted_image_uuid']: - img_file = data_repo.get_file_from_uuid( - st.session_state['precision_cropping_inpainted_image_uuid']) - st.image(img_file.location, caption="Inpainted Image", - use_column_width=True, width=200) - - if stage == WorkflowStageType.SOURCE.value: - if st.button("Make Source Image"): - data_repo.update_specific_timing( - st.session_state['current_frame_uuid'], source_image_id=img_file.uuid) - st.session_state['precision_cropping_inpainted_image_uuid'] = "" - st.rerun() - - elif stage == WorkflowStageType.STYLED.value: - if st.button("Save + Promote Image"): - timing_details = data_repo.get_timing_list_from_project( - project_uuid) - number_of_image_variants = add_image_variant( - st.session_state['precision_cropping_inpainted_image_uuid'], st.session_state['current_frame_uuid']) - promote_image_variant( - st.session_state['current_frame_uuid'], number_of_image_variants - 1) - st.session_state['precision_cropping_inpainted_image_uuid'] = "" - st.rerun() # returns a PIL image object def rotate_image(location, degree): @@ -608,166 +260,6 @@ def update_timings_in_order(project_uuid): data_repo.update_specific_timing(timing.uuid, frame_time=float(i)) -def change_frame_position_input(timing_uuid, src): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) - - min_value = 1 - max_value = len(timing_list) - - new_position = st.number_input("Move to new position:", min_value=min_value, max_value=max_value, - value=timing.aux_frame_index + 1, step=1, key=f"new_position_{timing.uuid}_{src}") - - if st.button('Update Position',key=f"change_frame_position_{timing.uuid}_{src}"): - data_repo.update_specific_timing(timing.uuid, aux_frame_index=new_position - 1) - st.rerun() - - -def move_frame(direction, timing_uuid): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - if direction == "Up": - if timing.aux_frame_index == 0: - st.error("This is the first frame") - time.sleep(1) - return - - data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index - 1) - elif direction == "Down": - timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) - if timing.aux_frame_index == len(timing_list) - 1: - st.error("This is the last frame") - time.sleep(1) - return - - data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index + 1) - -def move_frame_back_button(timing_uuid, orientation): - direction = "Up" - if orientation == "side-to-side": - arrow = "⬅️" - else: # up-down - arrow = "⬆️" - if st.button(arrow, key=f"move_frame_back_{timing_uuid}", help="Move frame back"): - move_frame(direction, timing_uuid) - st.rerun() - - -def move_frame_forward_button(timing_uuid, orientation): - direction = "Down" - if orientation == "side-to-side": - arrow = "➡️" - else: # up-down - arrow = "⬇️" - - if st.button(arrow, key=f"move_frame_forward_{timing_uuid}", help="Move frame forward"): - move_frame(direction, timing_uuid) - st.rerun() - - -def delete_frame_button(timing_uuid, show_label=False): - if show_label: - label = "Delete Frame 🗑️" - else: - label = "🗑️" - - if st.button(label, key=f"delete_frame_{timing_uuid}", help="Delete frame"): - delete_frame(timing_uuid) - st.rerun() - -def delete_frame(timing_uuid): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - next_timing = data_repo.get_next_timing(timing_uuid) - timing_details = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) - - if next_timing: - data_repo.update_specific_timing( - next_timing.uuid, - interpolated_clip_list=None, - preview_video_id=None, - timed_clip_id=None - ) - - data_repo.delete_timing_from_uuid(timing.uuid) - - if timing.aux_frame_index == len(timing_details) - 1: - st.session_state['current_frame_index'] = max(1, st.session_state['current_frame_index'] - 1) - st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid - -def replace_image_widget(timing_uuid, stage): - data_repo = DataRepo() - timing = data_repo.get_timing_from_uuid(timing_uuid) - timing_details = data_repo.get_timing_list_from_project(timing.project.uuid) - - replace_with = st.radio("Replace with:", [ - "Uploaded Frame", "Other Frame"], horizontal=True, key=f"replace_with_what_{stage}") - - - if replace_with == "Other Frame": - - which_stage_to_use_for_replacement = st.radio("Select stage to use:", [ - ImageStage.MAIN_VARIANT.value, ImageStage.SOURCE_IMAGE.value], key=f"which_stage_to_use_for_replacement_{stage}", horizontal=True) - which_image_to_use_for_replacement = st.number_input("Select image to use:", min_value=0, max_value=len( - timing_details)-1, value=0, key=f"which_image_to_use_for_replacement_{stage}") - - if which_stage_to_use_for_replacement == ImageStage.SOURCE_IMAGE.value: - selected_image = timing_details[which_image_to_use_for_replacement].source_image - - - elif which_stage_to_use_for_replacement == ImageStage.MAIN_VARIANT.value: - selected_image = timing_details[which_image_to_use_for_replacement].primary_image - - - st.image(selected_image.local_path, use_column_width=True) - - if st.button("Replace with selected frame", disabled=False,key=f"replace_with_selected_frame_{stage}"): - if stage == "source": - - data_repo.update_specific_timing(timing.uuid, source_image_id=selected_image.uuid) - st.success("Replaced") - time.sleep(1) - st.rerun() - - else: - number_of_image_variants = add_image_variant( - selected_image.uuid, timing.uuid) - promote_image_variant( - timing.uuid, number_of_image_variants - 1) - st.success("Replaced") - time.sleep(1) - st.rerun() - - elif replace_with == "Uploaded Frame": - if stage == "source": - uploaded_file = st.file_uploader("Upload Source Image", type=[ - "png", "jpeg"], accept_multiple_files=False) - if st.button("Upload Source Image"): - if uploaded_file: - timing = data_repo.get_timing_from_uuid(timing.uuid) - if save_uploaded_image(uploaded_file, timing.project.uuid, timing.uuid, "source"): - time.sleep(1.5) - st.rerun() - else: - replacement_frame = st.file_uploader("Upload a replacement frame here", type=[ - "png", "jpeg"], accept_multiple_files=False, key=f"replacement_frame_upload_{stage}") - if st.button("Replace frame", disabled=False): - images_for_model = [] - timing = data_repo.get_timing_from_uuid(timing.uuid) - if replacement_frame: - saved_file = save_uploaded_image(replacement_frame, timing.project.uuid, timing.uuid, "styled") - if saved_file: - number_of_image_variants = add_image_variant(saved_file.uuid, timing.uuid) - promote_image_variant( - timing.uuid, number_of_image_variants - 1) - st.success("Replaced") - time.sleep(1) - st.rerun() - def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) @@ -906,8 +398,6 @@ def create_or_update_mask(timing_uuid, image) -> InternalFileObject: return timing.mask.location # adds the image file in variant (alternative images) list - - def add_image_variant(image_file_uuid: str, timing_uuid: str): data_repo = DataRepo() image_file: InternalFileObject = data_repo.get_file_from_uuid( @@ -1290,6 +780,7 @@ def execute_image_edit(type_of_mask_selection, type_of_mask_replacement, # if the output is present it adds it to the respective place or else it updates the inference log +# NOTE: every function used in this should not change/modify session state in anyway def process_inference_output(**kwargs): data_repo = DataRepo() diff --git a/ui_components/models.py b/ui_components/models.py index 29e92aa5..e6ab31d3 100644 --- a/ui_components/models.py +++ b/ui_components/models.py @@ -1,9 +1,8 @@ import datetime -import streamlit as st import json -from shared.constants import AnimationStyleType, AnimationToolType, InferenceParamType +from shared.constants import InferenceParamType -from ui_components.constants import TEMP_MASK_FILE, DefaultProjectSettingParams, DefaultTimingStyleParams +from ui_components.constants import DefaultProjectSettingParams, DefaultTimingStyleParams from utils.common_decorators import session_state_attributes from utils.constants import MLQueryObject @@ -44,12 +43,13 @@ def inference_params(self) -> MLQueryObject: class InternalProjectObject: - def __init__(self, uuid, name, user_uuid, created_on, temp_file_list): + def __init__(self, uuid, name, user_uuid, created_on, temp_file_list, meta_data=None): self.uuid = uuid self.name = name self.user_uuid = user_uuid self.created_on = created_on self.temp_file_list = temp_file_list + self.meta_data = meta_data @property def project_temp_file_list(self): diff --git a/ui_components/setup.py b/ui_components/setup.py index b726b0cd..a9947851 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -1,9 +1,10 @@ +import json import time import streamlit as st import os import math from moviepy.editor import * -from shared.constants import SERVER, ServerType +from shared.constants import SERVER, ProjectMetaData, ServerType from ui_components.components.app_settings_page import app_settings_page from ui_components.components.custom_models_page import custom_models_page @@ -55,8 +56,7 @@ def setup_app_ui(): } ) - project_list = data_repo.get_all_project_list( - user_id=get_current_user_uuid()) + project_list = data_repo.get_all_project_list(user_id=get_current_user_uuid()) if st.session_state["section"] == "Open Project": @@ -86,6 +86,17 @@ def setup_app_ui(): st.session_state["project_uuid"] = project_list[selected_index].uuid + # checking for project metadata (like cache updates) + # project_update_data is of the format {"data_update": [{"timing_uuid": timing_uuid}]} + project_update_data = json.loads(project_list[selected_index].meta_data).\ + get(ProjectMetaData.DATA_UPDATE.value, None) if project_list[selected_index].meta_data else None + if project_update_data: + for timing_uuid in project_update_data: + _ = data_repo.get_timing_from_uuid(timing_uuid) + + # removing the metadata after processing + data_repo.update_project(uuid=project_list[selected_index].uuid, meta_data=json.dumps({ProjectMetaData.DATA_UPDATE.value: []})) + if "current_frame_index" not in st.session_state: st.session_state['current_frame_index'] = 1 diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index 93a7cb36..739a9207 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -1,4 +1,7 @@ import streamlit as st +from ui_components.constants import CreativeProcessType +from ui_components.methods.video_methods import calculate_desired_duration_of_individual_clip +from ui_components.widgets.image_zoom_widgets import zoom_inputs from utils import st_memory @@ -6,7 +9,7 @@ from utils.constants import ImageStage from ui_components.methods.file_methods import generate_pil_image,save_or_host_file -from ui_components.methods.common_methods import apply_image_transformations,zoom_inputs +from ui_components.methods.common_methods import apply_image_transformations, clone_styling_settings, create_timings_row_at_frame_number, save_uploaded_image from PIL import Image @@ -91,4 +94,55 @@ def add_key_frame_element(timing_details, project_uuid): else: st.error("No Starting Image Found") - return selected_image, inherit_styling_settings, how_long_after, transformation_stage \ No newline at end of file + return selected_image, inherit_styling_settings, how_long_after, transformation_stage + +def add_key_frame(selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image): + data_repo = DataRepo() + project_uuid = st.session_state['project_uuid'] + timing_details = data_repo.get_timing_list_from_project(project_uuid) + project_settings = data_repo.get_project_setting(project_uuid) + + + if len(timing_details) == 0: + index_of_current_item = 1 + else: + index_of_current_item = min(len(timing_details), st.session_state['current_frame_index']) + + timing_details = data_repo.get_timing_list_from_project(project_uuid) + + if len(timing_details) == 0: + key_frame_time = 0.0 + elif index_of_current_item == len(timing_details): + key_frame_time = float(timing_details[index_of_current_item - 1].frame_time) + how_long_after + else: + key_frame_time = (float(timing_details[index_of_current_item - 1].frame_time) + float( + timing_details[index_of_current_item].frame_time)) / 2.0 + + if len(timing_details) == 0: + new_timing = create_timings_row_at_frame_number(project_uuid, 0) + else: + new_timing = create_timings_row_at_frame_number(project_uuid, index_of_current_item, frame_time=key_frame_time) + + clip_duration = calculate_desired_duration_of_individual_clip(new_timing.uuid) + data_repo.update_specific_timing(new_timing.uuid, clip_duration=clip_duration) + + timing_details = data_repo.get_timing_list_from_project(project_uuid) + if selected_image: + save_uploaded_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, "source") + save_uploaded_image(selected_image, project_uuid, timing_details[index_of_current_item].uuid, "styled") + + if inherit_styling_settings == "Yes": + clone_styling_settings(index_of_current_item - 1, timing_details[index_of_current_item].uuid) + + timing_details[index_of_current_item].animation_style = project_settings.default_animation_style + + if len(timing_details) == 1: + st.session_state['current_frame_index'] = 1 + st.session_state['current_frame_uuid'] = timing_details[0].uuid + else: + st.session_state['prev_frame_index'] = min(len(timing_details), st.session_state['current_frame_index']+1) + st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index']].uuid + + st.session_state['page'] = CreativeProcessType.STYLING.value + st.session_state['section_index'] = 0 + st.rerun() \ No newline at end of file diff --git a/ui_components/widgets/cropping_element.py b/ui_components/widgets/cropping_element.py index 26ebcf06..495a7352 100644 --- a/ui_components/widgets/cropping_element.py +++ b/ui_components/widgets/cropping_element.py @@ -8,10 +8,12 @@ from backend.models import InternalFileObject from shared.constants import InternalFileType -from ui_components.methods.common_methods import apply_image_transformations, fetch_image_by_stage, inpaint_in_black_space_element, reset_zoom_element, save_zoomed_image, zoom_inputs +from ui_components.methods.common_methods import apply_image_transformations, fetch_image_by_stage from ui_components.constants import WorkflowStageType from ui_components.methods.file_methods import generate_pil_image, save_or_host_file from ui_components.models import InternalProjectObject, InternalSettingObject +from ui_components.widgets.image_zoom_widgets import reset_zoom_element, save_zoomed_image, zoom_inputs +from ui_components.widgets.inpainting_element import inpaint_in_black_space_element from utils.data_repo.data_repo import DataRepo from utils import st_memory @@ -40,7 +42,7 @@ def precision_cropping_element(stage, project_uuid): project_uuid) - input_image = fetch_image_by_stage(project_uuid, stage) + input_image = fetch_image_by_stage(project_uuid, stage, st.session_state['current_frame_index'] - 1) # TODO: CORRECT-CODE check if this code works if not input_image: diff --git a/ui_components/widgets/frame_movement_widgets.py b/ui_components/widgets/frame_movement_widgets.py new file mode 100644 index 00000000..bc410d3e --- /dev/null +++ b/ui_components/widgets/frame_movement_widgets.py @@ -0,0 +1,176 @@ +import time +import streamlit as st +from ui_components.methods.common_methods import add_image_variant, promote_image_variant, save_uploaded_image +from ui_components.models import InternalFrameTimingObject +from utils.constants import ImageStage + +from utils.data_repo.data_repo import DataRepo + +def change_frame_position_input(timing_uuid, src): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) + + min_value = 1 + max_value = len(timing_list) + + new_position = st.number_input("Move to new position:", min_value=min_value, max_value=max_value, + value=timing.aux_frame_index + 1, step=1, key=f"new_position_{timing.uuid}_{src}") + + if st.button('Update Position',key=f"change_frame_position_{timing.uuid}_{src}"): + data_repo.update_specific_timing(timing.uuid, aux_frame_index=new_position - 1) + st.rerun() + + +def move_frame(direction, timing_uuid): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + + if direction == "Up": + if timing.aux_frame_index == 0: + st.error("This is the first frame") + time.sleep(1) + return + + data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index - 1) + elif direction == "Down": + timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) + if timing.aux_frame_index == len(timing_list) - 1: + st.error("This is the last frame") + time.sleep(1) + return + + data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index + 1) + +def move_frame_back_button(timing_uuid, orientation): + direction = "Up" + if orientation == "side-to-side": + arrow = "⬅️" + else: # up-down + arrow = "⬆️" + if st.button(arrow, key=f"move_frame_back_{timing_uuid}", help="Move frame back"): + move_frame(direction, timing_uuid) + st.rerun() + + +def move_frame_forward_button(timing_uuid, orientation): + direction = "Down" + if orientation == "side-to-side": + arrow = "➡️" + else: # up-down + arrow = "⬇️" + + if st.button(arrow, key=f"move_frame_forward_{timing_uuid}", help="Move frame forward"): + move_frame(direction, timing_uuid) + st.rerun() + + +def delete_frame_button(timing_uuid, show_label=False): + if show_label: + label = "Delete Frame 🗑️" + else: + label = "🗑️" + + if st.button(label, key=f"delete_frame_{timing_uuid}", help="Delete frame"): + delete_frame(timing_uuid) + st.rerun() + +def delete_frame(timing_uuid): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + next_timing = data_repo.get_next_timing(timing_uuid) + timing_details = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) + + if next_timing: + data_repo.update_specific_timing( + next_timing.uuid, + interpolated_clip_list=None, + preview_video_id=None, + timed_clip_id=None + ) + + data_repo.delete_timing_from_uuid(timing.uuid) + + if timing.aux_frame_index == len(timing_details) - 1: + st.session_state['current_frame_index'] = max(1, st.session_state['current_frame_index'] - 1) + st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] + st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid + +def replace_image_widget(timing_uuid, stage): + data_repo = DataRepo() + timing = data_repo.get_timing_from_uuid(timing_uuid) + timing_details = data_repo.get_timing_list_from_project(timing.project.uuid) + + replace_with = st.radio("Replace with:", [ + "Uploaded Frame", "Other Frame"], horizontal=True, key=f"replace_with_what_{stage}") + + + if replace_with == "Other Frame": + + which_stage_to_use_for_replacement = st.radio("Select stage to use:", [ + ImageStage.MAIN_VARIANT.value, ImageStage.SOURCE_IMAGE.value], key=f"which_stage_to_use_for_replacement_{stage}", horizontal=True) + which_image_to_use_for_replacement = st.number_input("Select image to use:", min_value=0, max_value=len( + timing_details)-1, value=0, key=f"which_image_to_use_for_replacement_{stage}") + + if which_stage_to_use_for_replacement == ImageStage.SOURCE_IMAGE.value: + selected_image = timing_details[which_image_to_use_for_replacement].source_image + + + elif which_stage_to_use_for_replacement == ImageStage.MAIN_VARIANT.value: + selected_image = timing_details[which_image_to_use_for_replacement].primary_image + + + st.image(selected_image.local_path, use_column_width=True) + + if st.button("Replace with selected frame", disabled=False,key=f"replace_with_selected_frame_{stage}"): + if stage == "source": + + data_repo.update_specific_timing(timing.uuid, source_image_id=selected_image.uuid) + st.success("Replaced") + time.sleep(1) + st.rerun() + + else: + number_of_image_variants = add_image_variant( + selected_image.uuid, timing.uuid) + promote_image_variant( + timing.uuid, number_of_image_variants - 1) + st.success("Replaced") + time.sleep(1) + st.rerun() + + elif replace_with == "Uploaded Frame": + if stage == "source": + uploaded_file = st.file_uploader("Upload Source Image", type=[ + "png", "jpeg"], accept_multiple_files=False) + if st.button("Upload Source Image"): + if uploaded_file: + timing = data_repo.get_timing_from_uuid(timing.uuid) + if save_uploaded_image(uploaded_file, timing.project.uuid, timing.uuid, "source"): + time.sleep(1.5) + st.rerun() + else: + replacement_frame = st.file_uploader("Upload a replacement frame here", type=[ + "png", "jpeg"], accept_multiple_files=False, key=f"replacement_frame_upload_{stage}") + if st.button("Replace frame", disabled=False): + images_for_model = [] + timing = data_repo.get_timing_from_uuid(timing.uuid) + if replacement_frame: + saved_file = save_uploaded_image(replacement_frame, timing.project.uuid, timing.uuid, "styled") + if saved_file: + number_of_image_variants = add_image_variant(saved_file.uuid, timing.uuid) + promote_image_variant( + timing.uuid, number_of_image_variants - 1) + st.success("Replaced") + time.sleep(1) + st.rerun() + + +def jump_to_single_frame_view_button(display_number, timing_details): + if st.button(f"Jump to #{display_number}"): + st.session_state['prev_frame_index'] = display_number + st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid + st.session_state['frame_styling_view_type'] = "Individual View" + st.session_state['change_view_type'] = True + st.rerun() diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index cb42294f..f0f422eb 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -1,9 +1,9 @@ import streamlit as st +from ui_components.widgets.frame_movement_widgets import delete_frame, replace_image_widget from ui_components.widgets.frame_time_selector import single_frame_time_selector from ui_components.widgets.image_carousal import display_image from utils.data_repo.data_repo import DataRepo from ui_components.constants import WorkflowStageType -from ui_components.methods.common_methods import delete_frame, replace_image_widget def frame_selector_widget(): diff --git a/ui_components/widgets/frame_style_clone_element.py b/ui_components/widgets/frame_style_clone_element.py new file mode 100644 index 00000000..0f2c5371 --- /dev/null +++ b/ui_components/widgets/frame_style_clone_element.py @@ -0,0 +1,41 @@ +import streamlit as st +from shared.constants import AIModelCategory +from ui_components.constants import WorkflowStageType +from ui_components.methods.common_methods import clone_styling_settings +from ui_components.models import InternalAIModelObject +from ui_components.widgets.image_carousal import display_image +from utils.common_utils import reset_styling_settings + +from utils.data_repo.data_repo import DataRepo + +def style_cloning_element(timing_details): + open_copier = st.checkbox("Copy styling settings from another frame") + if open_copier is True: + copy1, copy2 = st.columns([1, 1]) + with copy1: + frame_index = st.number_input("Which frame would you like to copy styling settings from?", min_value=1, max_value=len( + timing_details), value=st.session_state['current_frame_index'], step=1) + if st.button("Copy styling settings from this frame"): + clone_styling_settings(frame_index - 1, st.session_state['current_frame_uuid']) + reset_styling_settings(st.session_state['current_frame_uuid']) + st.rerun() + + with copy2: + display_image(timing_details[frame_index - 1].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) + + if timing_details[frame_index - 1].primary_image.inference_params: + st.text("Prompt: ") + st.caption(timing_details[frame_index - 1].primary_image.inference_params.prompt) + st.text("Negative Prompt: ") + st.caption(timing_details[frame_index - 1].primary_image.inference_params.negative_prompt) + + if timing_details[frame_index - 1].primary_image.inference_params.model_uuid: + data_repo = DataRepo() + model: InternalAIModelObject = data_repo.get_ai_model_from_uuid(timing_details[frame_index - 1].primary_image.inference_params.model_uuid) + + st.text("Model:") + st.caption(model.name) + + if model.category.lower() == AIModelCategory.CONTROLNET.value: + st.text("Adapter Type:") + st.caption(timing_details[frame_index - 1].primary_image.inference_params.adapter_type) \ No newline at end of file diff --git a/ui_components/widgets/image_zoom_widgets.py b/ui_components/widgets/image_zoom_widgets.py new file mode 100644 index 00000000..54cf2fec --- /dev/null +++ b/ui_components/widgets/image_zoom_widgets.py @@ -0,0 +1,105 @@ +import uuid +import streamlit as st +from backend.models import InternalFileObject +from shared.constants import InternalFileType +from ui_components.constants import WorkflowStageType +from ui_components.methods.common_methods import add_image_variant, promote_image_variant +from ui_components.methods.file_methods import save_or_host_file + +from utils.data_repo.data_repo import DataRepo + +def zoom_inputs(position='in-frame', horizontal=False): + if horizontal: + col1, col2, col3, col4 = st.columns(4) + else: + col1 = col2 = col3 = col4 = st + + zoom_level_input = col1.number_input( + "Zoom Level (%)", min_value=10, max_value=1000, step=10, key=f"zoom_level_input_key_{position}", value=st.session_state.get('zoom_level_input', 100)) + + rotation_angle_input = col2.number_input( + "Rotation Angle", min_value=-360, max_value=360, step=5, key=f"rotation_angle_input_key_{position}", value=st.session_state.get('rotation_angle_input', 0)) + + x_shift = col3.number_input( + "Shift Left/Right", min_value=-1000, max_value=1000, step=5, key=f"x_shift_key_{position}", value=st.session_state.get('x_shift', 0)) + + y_shift = col4.number_input( + "Shift Up/Down", min_value=-1000, max_value=1000, step=5, key=f"y_shift_key_{position}", value=st.session_state.get('y_shift', 0)) + + # Assign values to st.session_state + st.session_state['zoom_level_input'] = zoom_level_input + st.session_state['rotation_angle_input'] = rotation_angle_input + st.session_state['x_shift'] = x_shift + st.session_state['y_shift'] = y_shift + +def save_zoomed_image(image, timing_uuid, stage, promote=False): + data_repo = DataRepo() + timing = data_repo.get_timing_from_uuid(timing_uuid) + project_uuid = timing.project.uuid + + file_name = str(uuid.uuid4()) + ".png" + + if stage == WorkflowStageType.SOURCE.value: + save_location = f"videos/{project_uuid}/assets/frames/1_selected/{file_name}" + hosted_url = save_or_host_file(image, save_location) + file_data = { + "name": file_name, + "type": InternalFileType.IMAGE.value, + "project_id": project_uuid + } + + if hosted_url: + file_data.update({'hosted_url': hosted_url}) + else: + file_data.update({'local_path': save_location}) + + source_image: InternalFileObject = data_repo.create_file(**file_data) + data_repo.update_specific_timing( + st.session_state['current_frame_uuid'], source_image_id=source_image.uuid) + elif stage == WorkflowStageType.STYLED.value: + save_location = f"videos/{project_uuid}/assets/frames/2_character_pipeline_completed/{file_name}" + hosted_url = save_or_host_file(image, save_location) + file_data = { + "name": file_name, + "type": InternalFileType.IMAGE.value, + "project_id": project_uuid + } + + if hosted_url: + file_data.update({'hosted_url': hosted_url}) + else: + file_data.update({'local_path': save_location}) + + styled_image: InternalFileObject = data_repo.create_file(**file_data) + + number_of_image_variants = add_image_variant( + styled_image.uuid, timing_uuid) + if promote: + promote_image_variant(timing_uuid, number_of_image_variants - 1) + + project_update_data = { + "zoom_level": st.session_state['zoom_level_input'], + "rotation_angle_value": st.session_state['rotation_angle_input'], + "x_shift": st.session_state['x_shift'], + "y_shift": st.session_state['y_shift'] + } + + data_repo.update_project_setting(project_uuid, **project_update_data) + + # TODO: **CORRECT-CODE - make a proper column for zoom details + timing_update_data = { + "zoom_details": f"{st.session_state['zoom_level_input']},{st.session_state['rotation_angle_input']},{st.session_state['x_shift']},{st.session_state['y_shift']}", + + } + data_repo.update_specific_timing(timing_uuid, **timing_update_data) + +def reset_zoom_element(): + st.session_state['zoom_level_input_key'] = 100 + st.session_state['rotation_angle_input_key'] = 0 + st.session_state['x_shift_key'] = 0 + st.session_state['y_shift_key'] = 0 + st.session_state['zoom_level_input'] = 100 + st.session_state['rotation_angle_input'] = 0 + st.session_state['x_shift'] = 0 + st.session_state['y_shift'] = 0 + st.rerun() \ No newline at end of file diff --git a/ui_components/widgets/inpainting_element.py b/ui_components/widgets/inpainting_element.py index d3cf65a2..0e8c0820 100644 --- a/ui_components/widgets/inpainting_element.py +++ b/ui_components/widgets/inpainting_element.py @@ -7,7 +7,8 @@ from PIL import Image import streamlit as st from streamlit_drawable_canvas import st_canvas -from ui_components.constants import WorkflowStageType +from ui_components.constants import CROPPED_IMG_LOCAL_PATH, MASK_IMG_LOCAL_PATH, TEMP_MASK_FILE, WorkflowStageType +from ui_components.methods.file_methods import add_temp_file_to_project, save_or_host_file from utils.data_repo.data_repo import DataRepo from utils import st_memory @@ -321,4 +322,104 @@ def inpainting_element(timing_uuid): st.session_state['edited_image'] = "" st.success("Image promoted!") - st.rerun() \ No newline at end of file + st.rerun() + + +# cropped_img here is a PIL image object +def inpaint_in_black_space_element(cropped_img, project_uuid, stage=WorkflowStageType.SOURCE.value): + from ui_components.methods.ml_methods import inpainting + + data_repo = DataRepo() + project_settings: InternalSettingObject = data_repo.get_project_setting( + project_uuid) + + st.markdown("##### Inpaint in black space:") + + inpaint_prompt = st.text_area( + "Prompt", value=project_settings.default_prompt) + inpaint_negative_prompt = st.text_input( + "Negative Prompt", value='edge,branches, frame, fractals, text' + project_settings.default_negative_prompt) + if 'precision_cropping_inpainted_image_uuid' not in st.session_state: + st.session_state['precision_cropping_inpainted_image_uuid'] = "" + + if st.button("Inpaint"): + width = int(project_settings.width) + height = int(project_settings.height) + + saved_cropped_img = cropped_img.resize( + (width, height), Image.ANTIALIAS) + + hosted_cropped_img_path = save_or_host_file(saved_cropped_img, CROPPED_IMG_LOCAL_PATH) + + # Convert image to grayscale + # Create a new image with the same size as the cropped image + mask = Image.new('RGB', cropped_img.size) + + # Get the width and height of the image + width, height = cropped_img.size + + for x in range(width): + for y in range(height): + # Get the RGB values of the pixel + pixel = cropped_img.getpixel((x, y)) + + # If the image is RGB, unpack the pixel into r, g, and b + if cropped_img.mode == 'RGB': + r, g, b = pixel + # If the image is RGBA, unpack the pixel into r, g, b, and a + elif cropped_img.mode == 'RGBA': + r, g, b, a = pixel + # If the image is grayscale ('L' for luminosity), there's only one channel + elif cropped_img.mode == 'L': + brightness = pixel + else: + raise ValueError( + f'Unsupported image mode: {cropped_img.mode}') + + # If the pixel is black, set it and its adjacent pixels to black in the new image + if r == 0 and g == 0 and b == 0: + mask.putpixel((x, y), (0, 0, 0)) # Black + # Adjust these values to change the range of adjacent pixels + for i in range(-2, 3): + for j in range(-2, 3): + # Check that the pixel is within the image boundaries + if 0 <= x + i < width and 0 <= y + j < height: + mask.putpixel((x + i, y + j), + (0, 0, 0)) # Black + # Otherwise, make the pixel white in the new image + else: + mask.putpixel((x, y), (255, 255, 255)) # White + # Save the mask image + hosted_url = save_or_host_file(mask, MASK_IMG_LOCAL_PATH) + if hosted_url: + add_temp_file_to_project(project_uuid, TEMP_MASK_FILE, hosted_url) + + cropped_img_path = hosted_cropped_img_path if hosted_cropped_img_path else CROPPED_IMG_LOCAL_PATH + inpainted_file = inpainting(cropped_img_path, inpaint_prompt, + inpaint_negative_prompt, st.session_state['current_frame_uuid'], True, pass_mask=True) + + st.session_state['precision_cropping_inpainted_image_uuid'] = inpainted_file.uuid + + if st.session_state['precision_cropping_inpainted_image_uuid']: + img_file = data_repo.get_file_from_uuid( + st.session_state['precision_cropping_inpainted_image_uuid']) + st.image(img_file.location, caption="Inpainted Image", + use_column_width=True, width=200) + + if stage == WorkflowStageType.SOURCE.value: + if st.button("Make Source Image"): + data_repo.update_specific_timing( + st.session_state['current_frame_uuid'], source_image_id=img_file.uuid) + st.session_state['precision_cropping_inpainted_image_uuid'] = "" + st.rerun() + + elif stage == WorkflowStageType.STYLED.value: + if st.button("Save + Promote Image"): + timing_details = data_repo.get_timing_list_from_project( + project_uuid) + number_of_image_variants = add_image_variant( + st.session_state['precision_cropping_inpainted_image_uuid'], st.session_state['current_frame_uuid']) + promote_image_variant( + st.session_state['current_frame_uuid'], number_of_image_variants - 1) + st.session_state['precision_cropping_inpainted_image_uuid'] = "" + st.rerun() \ No newline at end of file diff --git a/ui_components/widgets/list_view.py b/ui_components/widgets/list_view.py index 3e4e4588..c7e20e8d 100644 --- a/ui_components/widgets/list_view.py +++ b/ui_components/widgets/list_view.py @@ -1,12 +1,11 @@ import streamlit as st from ui_components.constants import WorkflowStageType +from ui_components.widgets.frame_movement_widgets import delete_frame, jump_to_single_frame_view_button, move_frame from utils.data_repo.data_repo import DataRepo from ui_components.widgets.frame_time_selector import single_frame_time_selector, single_frame_time_duration_setter from ui_components.widgets.image_carousal import display_image -from ui_components.methods.common_methods import delete_frame, move_frame,jump_to_single_frame_view_button,delete_frame_button,move_frame_back_button,move_frame_forward_button import math from utils.data_repo.data_repo import DataRepo -from ui_components.methods.common_methods import delete_frame from ui_components.widgets.frame_clip_generation_elements import current_individual_clip_element, current_preview_video_element, update_animation_style_element from ui_components.widgets.frame_time_selector import single_frame_time_selector, single_frame_time_duration_setter from ui_components.widgets.image_carousal import display_image diff --git a/ui_components/widgets/timeline_view.py b/ui_components/widgets/timeline_view.py index df493f44..afd8d101 100644 --- a/ui_components/widgets/timeline_view.py +++ b/ui_components/widgets/timeline_view.py @@ -1,7 +1,6 @@ import streamlit as st -from ui_components.methods.common_methods import delete_frame, jump_to_single_frame_view_button, move_frame,delete_frame_button,move_frame_back_button,move_frame_forward_button,change_frame_position_input,update_clip_duration_of_all_timing_frames +from ui_components.widgets.frame_movement_widgets import change_frame_position_input, delete_frame_button, jump_to_single_frame_view_button, move_frame_back_button, move_frame_forward_button from ui_components.widgets.frame_time_selector import single_frame_time_selector, single_frame_time_duration_setter -from typing import List from ui_components.widgets.image_carousal import display_image from utils.data_repo.data_repo import DataRepo from ui_components.widgets.frame_clip_generation_elements import update_animation_style_element diff --git a/ui_components/widgets/variant_comparison_element.py b/ui_components/widgets/variant_comparison_element.py index 40af5807..b5bc95f2 100644 --- a/ui_components/widgets/variant_comparison_element.py +++ b/ui_components/widgets/variant_comparison_element.py @@ -1,10 +1,10 @@ import time -import json import streamlit as st -import uuid -from typing import List -from ui_components.constants import CreativeProcessType +from streamlit_image_comparison import image_comparison +from ui_components.constants import CreativeProcessType, WorkflowStageType from ui_components.methods.common_methods import promote_image_variant, promote_video_variant +from ui_components.methods.video_methods import create_or_get_single_preview_video +from ui_components.widgets.image_carousal import display_image from utils.data_repo.data_repo import DataRepo @@ -85,4 +85,52 @@ def variant_comparison_element(timing_uuid, stage=CreativeProcessType.MOTION.val else: promote_image_variant(timing.uuid, which_variant - 1) time.sleep(0.5) - st.rerun() \ No newline at end of file + st.rerun() + + +def compare_to_previous_and_next_frame(project_uuid, timing_details): + data_repo = DataRepo() + mainimages1, mainimages2, mainimages3 = st.columns([1, 1, 1]) + + with mainimages1: + if st.session_state['current_frame_index'] - 2 >= 0: + previous_image = data_repo.get_timing_from_frame_number(project_uuid, frame_number=st.session_state['current_frame_index'] - 2) + st.info(f"Previous image:") + display_image( + timing_uuid=previous_image.uuid, stage=WorkflowStageType.STYLED.value, clickable=False) + + if st.button(f"Preview Interpolation From #{st.session_state['current_frame_index']-1} to #{st.session_state['current_frame_index']}", key=f"Preview Interpolation From #{st.session_state['current_frame_index']-1} to #{st.session_state['current_frame_index']}", use_container_width=True): + prev_frame_timing = data_repo.get_prev_timing(st.session_state['current_frame_uuid']) + create_or_get_single_preview_video(prev_frame_timing.uuid) + prev_frame_timing = data_repo.get_timing_from_uuid(prev_frame_timing.uuid) + if prev_frame_timing.preview_video: + st.video(prev_frame_timing.preview_video.location) + + with mainimages2: + st.success(f"Current image:") + display_image( + timing_uuid=st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) + + with mainimages3: + if st.session_state['current_frame_index'] + 1 <= len(timing_details): + next_image = data_repo.get_timing_from_frame_number(project_uuid, frame_number=st.session_state['current_frame_index']) + st.info(f"Next image") + display_image(timing_uuid=next_image.uuid, stage=WorkflowStageType.STYLED.value, clickable=False) + + if st.button(f"Preview Interpolation From #{st.session_state['current_frame_index']} to #{st.session_state['current_frame_index']+1}", key=f"Preview Interpolation From #{st.session_state['current_frame_index']} to #{st.session_state['current_frame_index']+1}", use_container_width=True): + create_or_get_single_preview_video(st.session_state['current_frame_uuid']) + current_frame = data_repo.get_timing_from_uuid(st.session_state['current_frame_uuid']) + st.video(current_frame.timed_clip.location) + + +def compare_to_source_frame(timing_details): + if timing_details[st.session_state['current_frame_index']- 1].primary_image: + img2 = timing_details[st.session_state['current_frame_index'] - 1].primary_image_location + else: + img2 = 'https://i.ibb.co/GHVfjP0/Image-Not-Yet-Created.png' + + img1 = timing_details[st.session_state['current_frame_index'] - 1].source_image.location if timing_details[st.session_state['current_frame_index'] - 1].source_image else 'https://i.ibb.co/GHVfjP0/Image-Not-Yet-Created.png' + + image_comparison(starting_position=50, + img1=img1, + img2=img2, make_responsive=False, label1=WorkflowStageType.SOURCE.value, label2=WorkflowStageType.STYLED.value) \ No newline at end of file diff --git a/utils/common_utils.py b/utils/common_utils.py index 122c26a3..c3850f37 100644 --- a/utils/common_utils.py +++ b/utils/common_utils.py @@ -170,9 +170,14 @@ def reset_styling_settings(timing_uuid): def is_process_active(custom_process_name): + # this caching assumes that the runner won't interupt or break once started + if custom_process_name + "_process_state" in st.session_state and st.session_state[custom_process_name + "_process_state"]: + return True + try: ps_output = subprocess.check_output(["ps", "aux"]).decode("utf-8") if custom_process_name in ps_output: + st.session_state[custom_process_name + "_process_state"] = True return True except subprocess.CalledProcessError: return False diff --git a/utils/constants.py b/utils/constants.py index 9b5be43b..be762234 100644 --- a/utils/constants.py +++ b/utils/constants.py @@ -7,7 +7,7 @@ AUTH_TOKEN = 'auth_details' -RUNNER_PROCESS_NAME = 'banodoco_runner_SFX8T' +RUNNER_PROCESS_NAME = 'banodoco_runner' class ImageStage(ExtendedEnum): SOURCE_IMAGE = 'Source Image' diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 9eb02703..6ef2ec9b 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -1,12 +1,9 @@ # this repo serves as a middlerware between API backend and the frontend import json -import threading from shared.constants import InferenceParamType, InternalFileType, InternalResponse from shared.constants import SERVER, ServerType from ui_components.models import InferenceLogObject, InternalAIModelObject, InternalAppSettingObject, InternalBackupObject, InternalFrameTimingObject, InternalProjectObject, InternalFileObject, InternalSettingObject, InternalUserObject from utils.cache.cache_methods import cache_data -from utils.common_decorators import count_calls -import streamlit as st import wrapt from utils.data_repo.api_repo import APIRepo From b27ca77facd1fa1b4030a4e1d24472932e547c19 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 14 Oct 2023 17:41:58 +0530 Subject: [PATCH 070/164] direct db updated fixed --- ui_components/methods/common_methods.py | 2 +- ui_components/methods/file_methods.py | 2 +- ui_components/setup.py | 2 +- utils/cache/cache_methods.py | 14 +++++++++----- utils/data_repo/data_repo.py | 2 +- 5 files changed, 13 insertions(+), 9 deletions(-) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index cfa36a55..d9b09674 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -878,7 +878,7 @@ def process_inference_output(**kwargs): os.remove(temp_output_file.name) if 'normalise_speed' in settings and settings['normalise_speed']: - output = VideoProcessor.update_video_bytes_speed(output, timing.animation_style, timing.clip_duration) + output = VideoProcessor.update_video_bytes_speed(output, AnimationStyleType.INTERPOLATION.value, timing.clip_duration) video_location = "videos/" + str(timing.project.uuid) + "/assets/videos/0_raw/" + str(uuid.uuid4()) + ".mp4" video = convert_bytes_to_file( diff --git a/ui_components/methods/file_methods.py b/ui_components/methods/file_methods.py index da19f536..666d90a1 100644 --- a/ui_components/methods/file_methods.py +++ b/ui_components/methods/file_methods.py @@ -146,7 +146,7 @@ def convert_bytes_to_file(file_location_to_save, mime_type, file_bytes, project_ "name": str(uuid.uuid4()) + "." + mime_type.split("/")[1] if not filename else filename, "type": InternalFileType.VIDEO.value if 'video' in mime_type else (InternalFileType.AUDIO.value if 'audio' in mime_type else InternalFileType.IMAGE.value), "project_id": project_uuid, - "inference_log_id": inference_log_id, + "inference_log_id": str(inference_log_id), "tag": tag } diff --git a/ui_components/setup.py b/ui_components/setup.py index a9947851..b14019d9 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -92,7 +92,7 @@ def setup_app_ui(): get(ProjectMetaData.DATA_UPDATE.value, None) if project_list[selected_index].meta_data else None if project_update_data: for timing_uuid in project_update_data: - _ = data_repo.get_timing_from_uuid(timing_uuid) + _ = data_repo.get_timing_from_uuid(timing_uuid, invalidate_cache=True) # removing the metadata after processing data_repo.update_project(uuid=project_list[selected_index].uuid, meta_data=json.dumps({ProjectMetaData.DATA_UPDATE.value: []})) diff --git a/utils/cache/cache_methods.py b/utils/cache/cache_methods.py index 81456c1f..552dc891 100644 --- a/utils/cache/cache_methods.py +++ b/utils/cache/cache_methods.py @@ -187,15 +187,19 @@ def _cache_update_specific_timing(self, *args, **kwargs): setattr(cls, "update_specific_timing", _cache_update_specific_timing) def _cache_get_timing_from_uuid(self, *args, **kwargs): - timing_list = StCache.get_all(CacheKey.TIMING_DETAILS.value) - if timing_list and len(timing_list) and len(args) > 0: - for timing in timing_list: - if timing.uuid == args[0]: - return timing + if kwargs.get('invalidate_cache', None): + timing_list = StCache.get_all(CacheKey.TIMING_DETAILS.value) + if timing_list and len(timing_list) and len(args) > 0: + for timing in timing_list: + if timing.uuid == args[0]: + return timing original_func = getattr(cls, '_original_get_timing_from_uuid') timing = original_func(self, *args, **kwargs) + if kwargs.get('invalidate_cache', None): + StCache.add(timing, CacheKey.TIMING_DETAILS.value) + return timing setattr(cls, '_original_get_timing_from_uuid', cls.get_timing_from_uuid) diff --git a/utils/data_repo/data_repo.py b/utils/data_repo/data_repo.py index 6ef2ec9b..88b23669 100644 --- a/utils/data_repo/data_repo.py +++ b/utils/data_repo/data_repo.py @@ -231,7 +231,7 @@ def delete_ai_model(self, uuid): # timing - def get_timing_from_uuid(self, uuid): + def get_timing_from_uuid(self, uuid, invalidate_cache=False): timing = self.db_repo.get_timing_from_uuid(uuid).data['data'] return InternalFrameTimingObject(**timing) if timing else None From af677bf53541339799f0346c8b986973cca24328 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 14 Oct 2023 17:56:06 +0530 Subject: [PATCH 071/164] minor cache fix --- utils/cache/cache_methods.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/utils/cache/cache_methods.py b/utils/cache/cache_methods.py index 552dc891..228a311f 100644 --- a/utils/cache/cache_methods.py +++ b/utils/cache/cache_methods.py @@ -187,7 +187,7 @@ def _cache_update_specific_timing(self, *args, **kwargs): setattr(cls, "update_specific_timing", _cache_update_specific_timing) def _cache_get_timing_from_uuid(self, *args, **kwargs): - if kwargs.get('invalidate_cache', None): + if not kwargs.get('invalidate_cache', False): timing_list = StCache.get_all(CacheKey.TIMING_DETAILS.value) if timing_list and len(timing_list) and len(args) > 0: for timing in timing_list: @@ -197,8 +197,7 @@ def _cache_get_timing_from_uuid(self, *args, **kwargs): original_func = getattr(cls, '_original_get_timing_from_uuid') timing = original_func(self, *args, **kwargs) - if kwargs.get('invalidate_cache', None): - StCache.add(timing, CacheKey.TIMING_DETAILS.value) + StCache.add(timing, CacheKey.TIMING_DETAILS.value) return timing From e22a6260f85f31dd626a99d964f4ca3fae223b5c Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 14 Oct 2023 18:11:20 +0530 Subject: [PATCH 072/164] rebase changes --- .../components/frame_styling_page.py | 130 +++++++------- ui_components/methods/common_methods.py | 162 ++++++++++++++++++ ui_components/setup.py | 7 +- .../widgets/add_key_frame_element.py | 4 +- ui_components/widgets/frame_selector.py | 31 ++-- ui_components/widgets/inpainting_element.py | 4 +- 6 files changed, 255 insertions(+), 83 deletions(-) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index e6bbc03d..8fed3b68 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -237,9 +237,9 @@ def frame_styling_page(mainheader2, project_uuid: str): elif st.session_state['list_view_type'] == "Timeline View": - with st.sidebar: + with st.sidebar: + with st.expander("🌀 Batch Styling", expanded=True): styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.LIST.value) - if st.session_state['page'] == "Styling": timeline_view(shift_frames_setting, project_uuid, "Styling", header_col_3, header_col_4) @@ -247,67 +247,77 @@ def frame_styling_page(mainheader2, project_uuid: str): timeline_view(shift_frames_setting, project_uuid, "Motion", header_col_3, header_col_4) # ------- change this ---------- - elif st.session_state['frame_styling_view_type'] == "Log List": - if st.button("Refresh log list"): - st.rerun() - - # TODO: add filtering/pagination when fetching log list - log_list = data_repo.get_all_inference_log_list(project_uuid) - log_list = log_list[::-1] - valid_url = [False] * len(log_list) - for idx, log in enumerate(log_list): - origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) - if not log.status or not origin_data: - continue - - output_url = None - output_data = json.loads(log.output_details) - if 'output' in output_data and output_data['output']: - output_url = output_data['output'][0] if isinstance(output_data['output'], list) else output_data['output'] - valid_url[idx] = log.updated_on.timestamp() + 60*45 > time.time() - - c1, c2, c3, c4 = st.columns([1, 1, 1, 1]) - with c1: - st.write(log.uuid) + + with st.sidebar: + with st.expander("🔍 Inference Logging", expanded=True): - with c2: - st.write(log.status) - - with c3: + def display_sidebar_log_list(data_repo, project_uuid): + a1, _, a3 = st.columns([1, 0.2, 1]) + log_list = data_repo.get_all_inference_log_list(project_uuid) + refresh_disabled = not any(log.status in [InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value] for log in log_list) + + if a1.button("Refresh log", disabled=refresh_disabled): st.rerun() + a3.button("Jump to full log view") + + b1, b2 = st.columns([1, 1]) + items_per_page = b2.slider("Items per page", min_value=1, max_value=20, value=5, step=1) + page_number = b1.number_input('Page number', min_value=1, max_value=math.ceil(len(log_list) / items_per_page), value=1, step=1) - if valid_url[idx]: - if output_url.endswith('png') or output_url.endswith('jpg') or output_url.endswith('jpeg') or output_url.endswith('gif'): - st.image(output_url) - elif output_url.endswith('mp4'): - st.video(output_url, format='mp4', start_time=0) - else: - st.write("No data to display") - else: - st.write("No data to display") - - with c4: - if log.status == InferenceStatus.COMPLETED.value: - if valid_url[idx]: - if st.button("Add to project", key=str(log.uuid)): - origin_data['output'] = output_data['output'] - origin_data['log_uuid'] = log.uuid - status = process_inference_output(**origin_data) - - if status: - # delete origin data (doing this will remove the log from the list) - input_params = json.loads(log.input_params) - del input_params[InferenceParamType.ORIGIN_DATA.value] - data_repo.update_inference_log(log.uuid, input_params=json.dumps(input_params)) - else: - st.write("Failed to add to project, timing deleted") - time.sleep(1) - st.rerun() - else: - st.write("Data expired") + log_list = log_list[::-1][(page_number - 1) * items_per_page : page_number * items_per_page] + + st.markdown("---") + for idx, log in enumerate(log_list): + + origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) + if not log.status or not origin_data: + continue + + output_url = None + output_data = json.loads(log.output_details) + if 'output' in output_data and output_data['output']: + output_url = output_data['output'][0] if isinstance(output_data['output'], list) else output_data['output'] + + c1, c2, c3 = st.columns([1, 1 if output_url else 0.01, 1]) + + with c1: + input_params = json.loads(log.input_params) + st.caption(f"Prompt:") + prompt = input_params.get('prompt', 'No prompt found') + st.write(f'"{prompt[:30]}..."' if len(prompt) > 30 else f'"{prompt}"') + st.caption(f"Model:") + st.write(json.loads(log.output_details)['model_name'].split('/')[-1]) + + with c2: + if output_url: + if output_url.endswith('png') or output_url.endswith('jpg') or output_url.endswith('jpeg') or output_url.endswith('gif'): + st.image(output_url) + elif output_url.endswith('mp4'): + st.video(output_url, format='mp4', start_time=0) + else: + st.info("No data to display") + + with c3: + if log.status == InferenceStatus.COMPLETED.value: + st.success("Completed") + elif log.status == InferenceStatus.FAILED.value: + st.warning("Failed") + elif log.status == InferenceStatus.QUEUED.value: + st.info("Queued") + elif log.status == InferenceStatus.IN_PROGRESS.value: + st.info("In progress") + elif log.status == InferenceStatus.CANCELED.value: + st.warning("Canceled") + + if output_url: + if st.button(f"Jump to frame {idx}"): + st.info("Fix this.") + + # if st.button("Delete", key=f"delete_{log.uuid}"): + # data_repo.update_inference_log(log.uuid, status="") + # st.rerun() - if st.button("Delete", key=f"delete_{log.uuid}"): - data_repo.update_inference_log(log.uuid, status="") - st.rerun() + st.markdown("---") + display_sidebar_log_list(data_repo, project_uuid) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index d9b09674..be6ff253 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -260,6 +260,168 @@ def update_timings_in_order(project_uuid): data_repo.update_specific_timing(timing.uuid, frame_time=float(i)) +def change_frame_position_input(timing_uuid, src): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) + + min_value = 1 + max_value = len(timing_list) + + new_position = st.number_input("Move to new position:", min_value=min_value, max_value=max_value, + value=timing.aux_frame_index + 1, step=1, key=f"new_position_{timing.uuid}_{src}") + + if st.button('Update Position',key=f"change_frame_position_{timing.uuid}_{src}"): + data_repo.update_specific_timing(timing.uuid, aux_frame_index=new_position - 1) + st.rerun() + + +def move_frame(direction, timing_uuid): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( + timing_uuid) + + if direction == "Up": + if timing.aux_frame_index == 0: + st.error("This is the first frame") + time.sleep(1) + return + + data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index - 1) + elif direction == "Down": + timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) + if timing.aux_frame_index == len(timing_list) - 1: + st.error("This is the last frame") + time.sleep(1) + return + + data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index + 1) + +def move_frame_back_button(timing_uuid, orientation): + direction = "Up" + if orientation == "side-to-side": + arrow = "⬅️" + else: # up-down + arrow = "⬆️" + if st.button(arrow, key=f"move_frame_back_{timing_uuid}", help="Move frame back"): + move_frame(direction, timing_uuid) + st.rerun() + + +def move_frame_forward_button(timing_uuid, orientation): + direction = "Down" + if orientation == "side-to-side": + arrow = "➡️" + else: # up-down + arrow = "⬇️" + + if st.button(arrow, key=f"move_frame_forward_{timing_uuid}", help="Move frame forward"): + move_frame(direction, timing_uuid) + st.rerun() + + +def delete_frame_button(timing_uuid, show_label=False): + if show_label: + label = "Delete Frame 🗑️" + else: + label = "🗑️" + + if st.button(label, key=f"delete_frame_{timing_uuid}", help="Delete frame"): + delete_frame(timing_uuid) + st.rerun() + +def delete_frame(timing_uuid): + data_repo = DataRepo() + timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) + next_timing = data_repo.get_next_timing(timing_uuid) + timing_details = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) + + if next_timing: + data_repo.update_specific_timing( + next_timing.uuid, + interpolated_clip_list=None, + preview_video_id=None, + timed_clip_id=None + ) + + data_repo.delete_timing_from_uuid(timing.uuid) + + if timing.aux_frame_index == len(timing_details) - 1: + st.session_state['current_frame_index'] = max(1, st.session_state['current_frame_index'] - 1) + st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] + st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid + +def replace_image_widget(timing_uuid, stage): + data_repo = DataRepo() + timing = data_repo.get_timing_from_uuid(timing_uuid) + timing_details = data_repo.get_timing_list_from_project(timing.project.uuid) + + replace_with = st.radio("Replace with:", [ + "Uploaded Frame", "Other Frame"], horizontal=True, key=f"replace_with_what_{stage}") + + + if replace_with == "Other Frame": + + which_stage_to_use_for_replacement = st.radio("Select stage to use:", [ + ImageStage.MAIN_VARIANT.value, ImageStage.SOURCE_IMAGE.value], key=f"which_stage_to_use_for_replacement_{stage}", horizontal=True) + which_image_to_use_for_replacement = st.number_input("Select image to use:", min_value=0, max_value=len( + timing_details)-1, value=0, key=f"which_image_to_use_for_replacement_{stage}") + + if which_stage_to_use_for_replacement == ImageStage.SOURCE_IMAGE.value: + selected_image = timing_details[which_image_to_use_for_replacement].source_image + + + elif which_stage_to_use_for_replacement == ImageStage.MAIN_VARIANT.value: + selected_image = timing_details[which_image_to_use_for_replacement].primary_image + + + st.image(selected_image.local_path, use_column_width=True) + + if st.button("Replace with selected frame", disabled=False,key=f"replace_with_selected_frame_{stage}"): + if stage == "source": + + data_repo.update_specific_timing(timing.uuid, source_image_id=selected_image.uuid) + st.success("Replaced") + time.sleep(1) + st.rerun() + + else: + number_of_image_variants = add_image_variant( + selected_image.uuid, timing.uuid) + promote_image_variant( + timing.uuid, number_of_image_variants - 1) + st.success("Replaced") + time.sleep(1) + st.rerun() + + elif replace_with == "Uploaded Frame": + if stage == "source": + uploaded_file = st.file_uploader("Upload Source Image", type=[ + "png", "jpeg"], accept_multiple_files=False) + if uploaded_file != None: + if st.button("Upload Source Image"): + if uploaded_file: + timing = data_repo.get_timing_from_uuid(timing.uuid) + if save_uploaded_image(uploaded_file, timing.project.uuid, timing.uuid, "source"): + time.sleep(1.5) + st.rerun() + else: + replacement_frame = st.file_uploader("Upload Styled Image", type=[ + "png", "jpeg"], accept_multiple_files=False, key=f"replacement_frame_upload_{stage}") + if replacement_frame != None: + if st.button("Replace frame", disabled=False): + images_for_model = [] + timing = data_repo.get_timing_from_uuid(timing.uuid) + if replacement_frame: + saved_file = save_uploaded_image(replacement_frame, timing.project.uuid, timing.uuid, "styled") + if saved_file: + number_of_image_variants = add_image_variant(saved_file.uuid, timing.uuid) + promote_image_variant( + timing.uuid, number_of_image_variants - 1) + st.success("Replaced") + time.sleep(1) + st.rerun() + def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) diff --git a/ui_components/setup.py b/ui_components/setup.py index b14019d9..0ecff640 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -150,7 +150,7 @@ def setup_app_ui(): "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "green"}}, manual_select=st.session_state["manual_select"]) # TODO: CORRECT-CODE - view_types = ["Individual View", "List View", "Log List"] + view_types = ["Individual View", "List View"] if 'frame_styling_view_type_index' not in st.session_state: st.session_state['frame_styling_view_type_index'] = 0 @@ -195,7 +195,7 @@ def on_change_view_type(key): elif st.session_state["main_view_type"] == "Tools & Settings": with st.sidebar: - tool_pages = ["Custom Models", "Project Settings"] + tool_pages = ["Query Logger", "Custom Models", "Project Settings"] if st.session_state["page"] not in tool_pages: st.session_state["page"] = tool_pages[0] @@ -203,7 +203,8 @@ def on_change_view_type(key): st.session_state['page'] = option_menu(None, tool_pages, icons=['pencil', 'palette', "hourglass", 'stopwatch'], menu_icon="cast", orientation="horizontal", key="secti2on_selector", styles={ "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "green"}}, manual_select=st.session_state["manual_select"]) - + if st.session_state["page"] == "Query Logger": + st.info("Query Logger will appear here.") if st.session_state["page"] == "Custom Models": custom_models_page(st.session_state["project_uuid"]) elif st.session_state["page"] == "Project Settings": diff --git a/ui_components/widgets/add_key_frame_element.py b/ui_components/widgets/add_key_frame_element.py index 739a9207..a2a77079 100644 --- a/ui_components/widgets/add_key_frame_element.py +++ b/ui_components/widgets/add_key_frame_element.py @@ -34,7 +34,7 @@ def add_key_frame_element(timing_details, project_uuid): transformation_stage = st.radio( label="Which stage would you like to use?", options=ImageStage.value_list(), - key="transformation_stage", + key="transformation_stage-bottom", horizontal=True ) image_idx = st.number_input( @@ -46,7 +46,7 @@ def add_key_frame_element(timing_details, project_uuid): key="image_idx" ) if transformation_stage == ImageStage.SOURCE_IMAGE.value: - if timing_details[image_idx - 1].source_image != "": + if timing_details[image_idx - 1].source_image is not None and timing_details[image_idx - 1].source_image != "": selected_image_location = timing_details[image_idx - 1].source_image.location else: selected_image_location = "" diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index f0f422eb..5d241527 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -40,21 +40,20 @@ def frame_selector_widget(): - - image_1, image_2 = st.columns([1,1]) - with image_1: - st.warning(f"Guidance Image:") - display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value, clickable=False) - with st.expander("Replace guidance image"): + with st.expander("🖼️ Frame Details"): + image_1, image_2 = st.columns([1,1]) + with image_1: + st.warning(f"Guidance Image:") + display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value, clickable=False) + st.caption("Replace guidance image") replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value) - with image_2: - st.success(f"Main Styled Image:") - display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) - with st.expander("Replace styled image"): + with image_2: + st.success(f"Main Styled Image:") + display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) + st.caption("Replace styled image") replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value) - - st.markdown("***") - - if st.button("Delete key frame"): - delete_frame(st.session_state['current_frame_uuid']) - st.rerun() \ No newline at end of file + + + if st.button("Delete key frame"): + delete_frame(st.session_state['current_frame_uuid']) + st.rerun() \ No newline at end of file diff --git a/ui_components/widgets/inpainting_element.py b/ui_components/widgets/inpainting_element.py index 0e8c0820..1f1bef1f 100644 --- a/ui_components/widgets/inpainting_element.py +++ b/ui_components/widgets/inpainting_element.py @@ -54,10 +54,10 @@ def inpainting_element(timing_uuid): st.info("You need to add a style first in the Style Selection section.") else: if stage == WorkflowStageType.SOURCE.value: - editing_image = timing.source_image.location + editing_image = timing.source_image.location if timing.source_image is not None else "" elif stage == WorkflowStageType.STYLED.value: variants = timing.alternative_images_list - editing_image = timing.primary_image_location + editing_image = timing.primary_image_location if timing.primary_image_location is not None else "" width = int(project_settings.width) height = int(project_settings.height) From d4896551abef5de2586a108e0a7675e0a8361485 Mon Sep 17 00:00:00 2001 From: peter942 Date: Fri, 13 Oct 2023 02:57:36 +0200 Subject: [PATCH 073/164] Logging view and more --- .../components/frame_styling_page.py | 93 ++----------------- ui_components/widgets/frame_selector.py | 21 +++-- ui_components/widgets/sidebar_logger.py | 86 +++++++++++++++++ 3 files changed, 110 insertions(+), 90 deletions(-) create mode 100644 ui_components/widgets/sidebar_logger.py diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 8fed3b68..d25126b1 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -1,10 +1,6 @@ -from datetime import timedelta -import json -import time -import streamlit as st -from shared.constants import InferenceParamType, InferenceStatus, ViewType -from shared.utils import is_url_valid +import streamlit as st +from shared.constants import ViewType from ui_components.methods.common_methods import process_inference_output from ui_components.methods.ml_methods import trigger_restyling_process @@ -21,11 +17,10 @@ from ui_components.widgets.animation_style_element import animation_style_element from ui_components.widgets.inpainting_element import inpainting_element from ui_components.widgets.drawing_element import drawing_element +from ui_components.widgets.sidebar_logger import sidebar_logger from ui_components.widgets.list_view import list_view_set_up, page_toggle, styling_list_view,motion_list_view from utils import st_memory - -import math from ui_components.constants import CreativeProcessType, WorkflowStageType from utils.data_repo.data_repo import DataRepo @@ -237,87 +232,17 @@ def frame_styling_page(mainheader2, project_uuid: str): elif st.session_state['list_view_type'] == "Timeline View": - with st.sidebar: - with st.expander("🌀 Batch Styling", expanded=True): - styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.LIST.value) + if st.session_state['page'] == "Styling": + with st.sidebar: + with st.expander("🌀 Batch Styling", expanded=False): + styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.LIST.value) timeline_view(shift_frames_setting, project_uuid, "Styling", header_col_3, header_col_4) elif st.session_state['page'] == "Motion": timeline_view(shift_frames_setting, project_uuid, "Motion", header_col_3, header_col_4) - - # ------- change this ---------- - + with st.sidebar: with st.expander("🔍 Inference Logging", expanded=True): - - def display_sidebar_log_list(data_repo, project_uuid): - a1, _, a3 = st.columns([1, 0.2, 1]) - - log_list = data_repo.get_all_inference_log_list(project_uuid) - refresh_disabled = not any(log.status in [InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value] for log in log_list) - - if a1.button("Refresh log", disabled=refresh_disabled): st.rerun() - a3.button("Jump to full log view") - - b1, b2 = st.columns([1, 1]) - items_per_page = b2.slider("Items per page", min_value=1, max_value=20, value=5, step=1) - page_number = b1.number_input('Page number', min_value=1, max_value=math.ceil(len(log_list) / items_per_page), value=1, step=1) - - log_list = log_list[::-1][(page_number - 1) * items_per_page : page_number * items_per_page] - - st.markdown("---") - - for idx, log in enumerate(log_list): - - origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) - if not log.status or not origin_data: - continue - - output_url = None - output_data = json.loads(log.output_details) - if 'output' in output_data and output_data['output']: - output_url = output_data['output'][0] if isinstance(output_data['output'], list) else output_data['output'] - - c1, c2, c3 = st.columns([1, 1 if output_url else 0.01, 1]) - - with c1: - input_params = json.loads(log.input_params) - st.caption(f"Prompt:") - prompt = input_params.get('prompt', 'No prompt found') - st.write(f'"{prompt[:30]}..."' if len(prompt) > 30 else f'"{prompt}"') - st.caption(f"Model:") - st.write(json.loads(log.output_details)['model_name'].split('/')[-1]) - - with c2: - if output_url: - if output_url.endswith('png') or output_url.endswith('jpg') or output_url.endswith('jpeg') or output_url.endswith('gif'): - st.image(output_url) - elif output_url.endswith('mp4'): - st.video(output_url, format='mp4', start_time=0) - else: - st.info("No data to display") - - with c3: - if log.status == InferenceStatus.COMPLETED.value: - st.success("Completed") - elif log.status == InferenceStatus.FAILED.value: - st.warning("Failed") - elif log.status == InferenceStatus.QUEUED.value: - st.info("Queued") - elif log.status == InferenceStatus.IN_PROGRESS.value: - st.info("In progress") - elif log.status == InferenceStatus.CANCELED.value: - st.warning("Canceled") - - if output_url: - if st.button(f"Jump to frame {idx}"): - st.info("Fix this.") - # if st.button("Delete", key=f"delete_{log.uuid}"): - # data_repo.update_inference_log(log.uuid, status="") - # st.rerun() - - st.markdown("---") - - display_sidebar_log_list(data_repo, project_uuid) + sidebar_logger(data_repo, project_uuid) diff --git a/ui_components/widgets/frame_selector.py b/ui_components/widgets/frame_selector.py index 5d241527..e5b3b228 100644 --- a/ui_components/widgets/frame_selector.py +++ b/ui_components/widgets/frame_selector.py @@ -40,19 +40,28 @@ def frame_selector_widget(): - with st.expander("🖼️ Frame Details"): - image_1, image_2 = st.columns([1,1]) - with image_1: + with st.expander(f"🖼️ Frame #{st.session_state['current_frame_index']} Details"): + a1, a2 = st.columns([1,1]) + with a1: st.warning(f"Guidance Image:") display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value, clickable=False) - st.caption("Replace guidance image") - replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value) - with image_2: + + with a2: st.success(f"Main Styled Image:") display_image(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value, clickable=False) + + st.markdown("---") + + b1, b2 = st.columns([1,1]) + with b1: + st.caption("Replace guidance image") + replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.SOURCE.value) + + with b2: st.caption("Replace styled image") replace_image_widget(st.session_state['current_frame_uuid'], stage=WorkflowStageType.STYLED.value) + st.markdown("---") if st.button("Delete key frame"): delete_frame(st.session_state['current_frame_uuid']) diff --git a/ui_components/widgets/sidebar_logger.py b/ui_components/widgets/sidebar_logger.py new file mode 100644 index 00000000..155a0500 --- /dev/null +++ b/ui_components/widgets/sidebar_logger.py @@ -0,0 +1,86 @@ +import streamlit as st + +from shared.constants import InferenceParamType, InferenceStatus + +import json +import math + +def sidebar_logger(data_repo, project_uuid): + a1, _, a3 = st.columns([1, 0.2, 1]) + + log_list = data_repo.get_all_inference_log_list(project_uuid) + refresh_disabled = not any(log.status in [InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value] for log in log_list) + + if a1.button("Refresh log", disabled=refresh_disabled): st.rerun() + a3.button("Jump to full log view") + + # Add radio button for status selection + status_option = st.radio("Statuses to display:", options=["All", "In Progress", "Succeeded", "Failed"], key="status_option", index=0, horizontal=True) + + # Filter log_list based on selected status + if status_option == "In Progress": + log_list = [log for log in log_list if log.status in [InferenceStatus.QUEUED.value, InferenceStatus.IN_PROGRESS.value]] + elif status_option == "Succeeded": + log_list = [log for log in log_list if log.status == InferenceStatus.COMPLETED.value] + elif status_option == "Failed": + log_list = [log for log in log_list if log.status == InferenceStatus.FAILED.value] + + b1, b2 = st.columns([1, 1]) + items_per_page = b2.slider("Items per page", min_value=1, max_value=20, value=5, step=1) + page_number = b1.number_input('Page number', min_value=1, max_value=math.ceil(len(log_list) / items_per_page), value=1, step=1) + + log_list = log_list[::-1][(page_number - 1) * items_per_page : page_number * items_per_page] + + st.markdown("---") + + for idx, log in enumerate(log_list): + + origin_data = json.loads(log.input_params).get(InferenceParamType.ORIGIN_DATA.value, None) + if not log.status or not origin_data: + continue + + output_url = None + output_data = json.loads(log.output_details) + if 'output' in output_data and output_data['output']: + output_url = output_data['output'][0] if isinstance(output_data['output'], list) else output_data['output'] + + c1, c2, c3 = st.columns([1, 1 if output_url else 0.01, 1]) + + with c1: + input_params = json.loads(log.input_params) + st.caption(f"Prompt:") + prompt = input_params.get('prompt', 'No prompt found') + st.write(f'"{prompt[:30]}..."' if len(prompt) > 30 else f'"{prompt}"') + st.caption(f"Model:") + st.write(json.loads(log.output_details)['model_name'].split('/')[-1]) + + with c2: + if output_url: + if output_url.endswith('png') or output_url.endswith('jpg') or output_url.endswith('jpeg') or output_url.endswith('gif'): + st.image(output_url) + elif output_url.endswith('mp4'): + st.video(output_url, format='mp4', start_time=0) + else: + st.info("No data to display") + + with c3: + if log.status == InferenceStatus.COMPLETED.value: + st.success("Completed") + elif log.status == InferenceStatus.FAILED.value: + st.warning("Failed") + elif log.status == InferenceStatus.QUEUED.value: + st.info("Queued") + elif log.status == InferenceStatus.IN_PROGRESS.value: + st.info("In progress") + elif log.status == InferenceStatus.CANCELED.value: + st.warning("Canceled") + + if output_url: + if st.button(f"Jump to frame {idx}"): + st.info("Fix this.") + + # if st.button("Delete", key=f"delete_{log.uuid}"): + # data_repo.update_inference_log(log.uuid, status="") + # st.rerun() + + st.markdown("---") \ No newline at end of file From a54b76e4e96ff5ce725470d682aa4191893f1185 Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 14 Oct 2023 18:12:11 +0530 Subject: [PATCH 074/164] rebase changes --- .../components/frame_styling_page.py | 14 +- ui_components/components/mood_board_page.py | 126 ++++++++++++++++++ ui_components/setup.py | 6 +- .../widgets/variant_comparison_grid.py | 36 +++++ 4 files changed, 179 insertions(+), 3 deletions(-) create mode 100644 ui_components/components/mood_board_page.py create mode 100644 ui_components/widgets/variant_comparison_grid.py diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index d25126b1..c08662c6 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -18,9 +18,12 @@ from ui_components.widgets.inpainting_element import inpainting_element from ui_components.widgets.drawing_element import drawing_element from ui_components.widgets.sidebar_logger import sidebar_logger +from ui_components.widgets.variant_comparison_grid import variant_comparison_grid from ui_components.widgets.list_view import list_view_set_up, page_toggle, styling_list_view,motion_list_view from utils import st_memory +import time + from ui_components.constants import CreativeProcessType, WorkflowStageType from utils.data_repo.data_repo import DataRepo @@ -96,13 +99,19 @@ def frame_styling_page(mainheader2, project_uuid: str): elif st.session_state['page'] == CreativeProcessType.STYLING.value: # carousal_of_images_element(project_uuid, stage=WorkflowStageType.STYLED.value) comparison_values = [ - "Other Variants", "Source Frame", "Previous & Next Frame", "None"] + "Single Variants", "All Other Variants","Source Frame", "Previous & Next Frame", "None"] st.session_state['show_comparison'] = st_memory.radio("Show comparison to:", options=comparison_values, horizontal=True, key="show_comparison_radio") - if st.session_state['show_comparison'] == "Other Variants": + if st.session_state['show_comparison'] == "Single Variants": variant_comparison_element(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) + + elif st.session_state['show_comparison'] == "All Other Variants": + + + + variant_comparison_grid(st.session_state['current_frame_uuid'], stage=CreativeProcessType.STYLING.value) elif st.session_state['show_comparison'] == "Source Frame": compare_to_source_frame(timing_details) @@ -243,6 +252,7 @@ def frame_styling_page(mainheader2, project_uuid: str): timeline_view(shift_frames_setting, project_uuid, "Motion", header_col_3, header_col_4) with st.sidebar: + with st.expander("🔍 Inference Logging", expanded=True): sidebar_logger(data_repo, project_uuid) diff --git a/ui_components/components/mood_board_page.py b/ui_components/components/mood_board_page.py new file mode 100644 index 00000000..19905967 --- /dev/null +++ b/ui_components/components/mood_board_page.py @@ -0,0 +1,126 @@ +import streamlit as st +from utils.data_repo.data_repo import DataRepo +from shared.constants import AIModelType + + +def mood_board_page(project_uuid): + + def get_varied_text(styling_instructions="", character_instructions="", action_instructions="", scene_instructions=""): + text_list = [] + + if styling_instructions: + system_instructions = "PLACEHOLDER_STYLING" + # result = query_model(styling_instructions, system_instructions) + result = "Styling instructions" + text_list.append(result) + + if character_instructions: + system_instructions = "PLACEHOLDER_CHARACTER" + # result = query_model(character_instructions, system_instructions) + result = "Character instructions" + text_list.append(result) + + if action_instructions: + system_instructions = "PLACEHOLDER_ACTION" + # result = query_model(action_instructions, system_instructions) + result = "Action instructions" + text_list.append(result) + + if scene_instructions: + system_instructions = "PLACEHOLDER_SCENE" + # result = query_model(scene_instructions, system_instructions) + result = "Scene instructions" + text_list.append(result) + + return ", ".join(text_list) + + data_repo = DataRepo() + st.subheader("Mood Board") + a1, a2, a3 = st.columns([0.5, 1, 0.5]) + with a2: + prompt = st.text_area("What's your prompt?", key="prompt") + + + b1, b2, b3, b4 = st.columns([1, 1, 1, 1]) + with b1: + variate_styling = st.checkbox("Variate styling", key="variate_styling") + if variate_styling: + styling_instructions = st.text_area("How would you like to variate styling?", key="variate_styling_textarea") + else: + styling_instructions = "" + + with b2: + variate_character = st.checkbox("Variate character", key="variate_character") + if variate_character: + character_instructions = st.text_area("How would you like to variate character?", key="variate_character_textarea") + else: + character_instructions = "" + + with b3: + variate_action = st.checkbox("Variate action", key="variate_action") + if variate_action: + action_instructions = st.text_area("How would you like to variate action?", key="variate_action_textarea") + else: + action_instructions = "" + + with b4: + variate_scene = st.checkbox("Variate scene", key="variate_scene") + if variate_scene: + scene_instructions = st.text_area("How would you like to variate the scene?", key="variate_scene_textarea") + else: + scene_instructions = "" + + model_list = data_repo.get_all_ai_model_list(model_type_list=[AIModelType.TXT2IMG.value], custom_trained=False) + model_name_list = list(set([m.name for m in model_list])) + + c1, c2, c3 = st.columns([0.25, 1, 0.25]) + with c2: + models_to_use = st.multiselect("Which models would you like to use?", model_name_list, key="models_to_use", default=model_name_list) + + d1, d2, d3 = st.columns([0.5, 1, 0.5]) + with d2: + number_to_generate = st.slider("How many images would you like to generate?", min_value=1, max_value=100, value=10, step=1, key="number_to_generate") + + if st.button("Generate images", key="generate_images", use_container_width=True, type="primary"): + st.info("Generating images...") + counter = 0 + varied_text = "" + for _ in range(number_to_generate): + for model_name in models_to_use: + if counter % 4 == 0 and (styling_instructions or character_instructions or action_instructions or scene_instructions): + varied_text = get_varied_text(styling_instructions, character_instructions, action_instructions, scene_instructions) + prompt_with_variations = f"{prompt}, {varied_text}" if prompt else varied_text + st.write(f"Prompt: '{prompt_with_variations}'") + st.write(f"Model: {model_name}") + counter += 1 + + timing = data_repo.get_timing_from_uuid("c414f700-680b-4712-a9c5-22c9935d7855") + + variants = timing.alternative_images_list + + st.markdown("***") + num_columns = st.slider('Number of columns', min_value=1, max_value=10, value=4) + + num_items_per_page = 30 + num_pages = len(variants) // num_items_per_page + if len(variants) % num_items_per_page > 0: + num_pages += 1 # Add extra page if there are remaining items + + page_number = st.radio("Select page", options=range(1, num_pages + 1)) + + start_index = (page_number - 1) * num_items_per_page + end_index = start_index + num_items_per_page + + for i in range(start_index, min(end_index, len(variants)), num_columns): + cols = st.columns(num_columns) + for j in range(num_columns): + if i + j < len(variants): + with cols[j]: + st.image(variants[i + j].location, use_column_width=True) + with st.expander(f'Variant #{i + j + 1}', False): + st.info("Instructions: PLACEHOLDER") + + if st.button(f"Add to timeline", key=f"Promote Variant #{i + j + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): + promote_image_variant(timing.uuid, i + j) + st.rerun() + st.markdown("***") \ No newline at end of file diff --git a/ui_components/setup.py b/ui_components/setup.py index 0ecff640..4ea73313 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -12,6 +12,7 @@ from ui_components.components.new_project_page import new_project_page from ui_components.components.project_settings_page import project_settings_page from ui_components.components.video_rendering_page import video_rendering_page +from ui_components.components.mood_board_page import mood_board_page from streamlit_option_menu import option_menu from ui_components.constants import CreativeProcessType from ui_components.models import InternalAppSettingObject @@ -195,7 +196,7 @@ def on_change_view_type(key): elif st.session_state["main_view_type"] == "Tools & Settings": with st.sidebar: - tool_pages = ["Query Logger", "Custom Models", "Project Settings"] + tool_pages = ["Query Logger", "Mood Board", "Custom Models", "Project Settings"] if st.session_state["page"] not in tool_pages: st.session_state["page"] = tool_pages[0] @@ -209,6 +210,9 @@ def on_change_view_type(key): custom_models_page(st.session_state["project_uuid"]) elif st.session_state["page"] == "Project Settings": project_settings_page(st.session_state["project_uuid"]) + elif st.session_state["page"] == "Mood Board": + mood_board_page(st.session_state["project_uuid"]) + elif st.session_state["main_view_type"] == "Video Rendering": diff --git a/ui_components/widgets/variant_comparison_grid.py b/ui_components/widgets/variant_comparison_grid.py new file mode 100644 index 00000000..f88fa9f5 --- /dev/null +++ b/ui_components/widgets/variant_comparison_grid.py @@ -0,0 +1,36 @@ +import streamlit as st +from ui_components.constants import CreativeProcessType +from ui_components.methods.common_methods import promote_image_variant +from utils.data_repo.data_repo import DataRepo + + +def variant_comparison_grid(timing_uuid, stage=CreativeProcessType.MOTION.value): + data_repo = DataRepo() + + timing = data_repo.get_timing_from_uuid(timing_uuid) + variants = timing.alternative_images_list + + + + current_variant = timing.primary_interpolated_video_index if stage == CreativeProcessType.MOTION.value else int( + timing.primary_variant_index) + + num_columns = st.slider('Number of columns', min_value=1, max_value=10, value=4) + + for i in range(0, len(variants), num_columns): + cols = st.columns(num_columns) + for j in range(num_columns): + if i + j < len(variants): + with cols[j]: + if stage == CreativeProcessType.MOTION.value: + st.video(variants[i + j].location, format='mp4', start_time=0) if variants[i + j] else st.error("No video present") + else: + st.image(variants[i + j].location, use_column_width=True) + + if i + j == current_variant: + st.success("**Main variant**") + else: + if st.button(f"Promote Variant #{i + j + 1}", key=f"Promote Variant #{i + j + 1} for {st.session_state['current_frame_index']}", help="Promote this variant to the primary image", use_container_width=True): + + promote_image_variant(timing.uuid, i + j) + st.rerun() \ No newline at end of file From f7bcbd9afd4471a99b30c9e8f47d68e6b7deeb4e Mon Sep 17 00:00:00 2001 From: piyushK52 Date: Sat, 14 Oct 2023 18:12:58 +0530 Subject: [PATCH 075/164] rebase changes --- ui_components/methods/common_methods.py | 162 ------------------------ 1 file changed, 162 deletions(-) diff --git a/ui_components/methods/common_methods.py b/ui_components/methods/common_methods.py index be6ff253..d9b09674 100644 --- a/ui_components/methods/common_methods.py +++ b/ui_components/methods/common_methods.py @@ -260,168 +260,6 @@ def update_timings_in_order(project_uuid): data_repo.update_specific_timing(timing.uuid, frame_time=float(i)) -def change_frame_position_input(timing_uuid, src): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) - - min_value = 1 - max_value = len(timing_list) - - new_position = st.number_input("Move to new position:", min_value=min_value, max_value=max_value, - value=timing.aux_frame_index + 1, step=1, key=f"new_position_{timing.uuid}_{src}") - - if st.button('Update Position',key=f"change_frame_position_{timing.uuid}_{src}"): - data_repo.update_specific_timing(timing.uuid, aux_frame_index=new_position - 1) - st.rerun() - - -def move_frame(direction, timing_uuid): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid( - timing_uuid) - - if direction == "Up": - if timing.aux_frame_index == 0: - st.error("This is the first frame") - time.sleep(1) - return - - data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index - 1) - elif direction == "Down": - timing_list = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) - if timing.aux_frame_index == len(timing_list) - 1: - st.error("This is the last frame") - time.sleep(1) - return - - data_repo.update_specific_timing(timing.uuid, aux_frame_index=timing.aux_frame_index + 1) - -def move_frame_back_button(timing_uuid, orientation): - direction = "Up" - if orientation == "side-to-side": - arrow = "⬅️" - else: # up-down - arrow = "⬆️" - if st.button(arrow, key=f"move_frame_back_{timing_uuid}", help="Move frame back"): - move_frame(direction, timing_uuid) - st.rerun() - - -def move_frame_forward_button(timing_uuid, orientation): - direction = "Down" - if orientation == "side-to-side": - arrow = "➡️" - else: # up-down - arrow = "⬇️" - - if st.button(arrow, key=f"move_frame_forward_{timing_uuid}", help="Move frame forward"): - move_frame(direction, timing_uuid) - st.rerun() - - -def delete_frame_button(timing_uuid, show_label=False): - if show_label: - label = "Delete Frame 🗑️" - else: - label = "🗑️" - - if st.button(label, key=f"delete_frame_{timing_uuid}", help="Delete frame"): - delete_frame(timing_uuid) - st.rerun() - -def delete_frame(timing_uuid): - data_repo = DataRepo() - timing: InternalFrameTimingObject = data_repo.get_timing_from_uuid(timing_uuid) - next_timing = data_repo.get_next_timing(timing_uuid) - timing_details = data_repo.get_timing_list_from_project(project_uuid=timing.project.uuid) - - if next_timing: - data_repo.update_specific_timing( - next_timing.uuid, - interpolated_clip_list=None, - preview_video_id=None, - timed_clip_id=None - ) - - data_repo.delete_timing_from_uuid(timing.uuid) - - if timing.aux_frame_index == len(timing_details) - 1: - st.session_state['current_frame_index'] = max(1, st.session_state['current_frame_index'] - 1) - st.session_state['prev_frame_index'] = st.session_state['current_frame_index'] - st.session_state['current_frame_uuid'] = timing_details[st.session_state['current_frame_index'] - 1].uuid - -def replace_image_widget(timing_uuid, stage): - data_repo = DataRepo() - timing = data_repo.get_timing_from_uuid(timing_uuid) - timing_details = data_repo.get_timing_list_from_project(timing.project.uuid) - - replace_with = st.radio("Replace with:", [ - "Uploaded Frame", "Other Frame"], horizontal=True, key=f"replace_with_what_{stage}") - - - if replace_with == "Other Frame": - - which_stage_to_use_for_replacement = st.radio("Select stage to use:", [ - ImageStage.MAIN_VARIANT.value, ImageStage.SOURCE_IMAGE.value], key=f"which_stage_to_use_for_replacement_{stage}", horizontal=True) - which_image_to_use_for_replacement = st.number_input("Select image to use:", min_value=0, max_value=len( - timing_details)-1, value=0, key=f"which_image_to_use_for_replacement_{stage}") - - if which_stage_to_use_for_replacement == ImageStage.SOURCE_IMAGE.value: - selected_image = timing_details[which_image_to_use_for_replacement].source_image - - - elif which_stage_to_use_for_replacement == ImageStage.MAIN_VARIANT.value: - selected_image = timing_details[which_image_to_use_for_replacement].primary_image - - - st.image(selected_image.local_path, use_column_width=True) - - if st.button("Replace with selected frame", disabled=False,key=f"replace_with_selected_frame_{stage}"): - if stage == "source": - - data_repo.update_specific_timing(timing.uuid, source_image_id=selected_image.uuid) - st.success("Replaced") - time.sleep(1) - st.rerun() - - else: - number_of_image_variants = add_image_variant( - selected_image.uuid, timing.uuid) - promote_image_variant( - timing.uuid, number_of_image_variants - 1) - st.success("Replaced") - time.sleep(1) - st.rerun() - - elif replace_with == "Uploaded Frame": - if stage == "source": - uploaded_file = st.file_uploader("Upload Source Image", type=[ - "png", "jpeg"], accept_multiple_files=False) - if uploaded_file != None: - if st.button("Upload Source Image"): - if uploaded_file: - timing = data_repo.get_timing_from_uuid(timing.uuid) - if save_uploaded_image(uploaded_file, timing.project.uuid, timing.uuid, "source"): - time.sleep(1.5) - st.rerun() - else: - replacement_frame = st.file_uploader("Upload Styled Image", type=[ - "png", "jpeg"], accept_multiple_files=False, key=f"replacement_frame_upload_{stage}") - if replacement_frame != None: - if st.button("Replace frame", disabled=False): - images_for_model = [] - timing = data_repo.get_timing_from_uuid(timing.uuid) - if replacement_frame: - saved_file = save_uploaded_image(replacement_frame, timing.project.uuid, timing.uuid, "styled") - if saved_file: - number_of_image_variants = add_image_variant(saved_file.uuid, timing.uuid) - promote_image_variant( - timing.uuid, number_of_image_variants - 1) - st.success("Replaced") - time.sleep(1) - st.rerun() - def promote_image_variant(timing_uuid, variant_to_promote_frame_number: str): data_repo = DataRepo() timing = data_repo.get_timing_from_uuid(timing_uuid) From 4b9d2e4ee7298040ed3f7f72eb5af1d35fdb8189 Mon Sep 17 00:00:00 2001 From: peter942 Date: Sat, 14 Oct 2023 16:09:47 +0200 Subject: [PATCH 076/164] Rearranging things --- .../components/frame_styling_page.py | 67 +++------ ui_components/setup.py | 53 ++++--- ui_components/widgets/list_view.py | 131 ------------------ .../style_explorer_element.py} | 42 +++--- 4 files changed, 64 insertions(+), 229 deletions(-) delete mode 100644 ui_components/widgets/list_view.py rename ui_components/{components/mood_board_page.py => widgets/style_explorer_element.py} (78%) diff --git a/ui_components/components/frame_styling_page.py b/ui_components/components/frame_styling_page.py index 8e6035f8..2a446b28 100644 --- a/ui_components/components/frame_styling_page.py +++ b/ui_components/components/frame_styling_page.py @@ -2,10 +2,10 @@ import streamlit as st from shared.constants import ViewType -from ui_components.methods.common_methods import add_key_frame,compare_to_previous_and_next_frame,compare_to_source_frame, process_inference_output,style_cloning_element, promote_image_variant +from ui_components.methods.common_methods import add_key_frame,compare_to_previous_and_next_frame,compare_to_source_frame,style_cloning_element from ui_components.methods.ml_methods import trigger_restyling_process from ui_components.widgets.cropping_element import cropping_selector_element -from ui_components.widgets.frame_clip_generation_elements import current_preview_video_element, update_animation_style_element +from ui_components.widgets.frame_clip_generation_elements import current_preview_video_element from ui_components.widgets.frame_selector import frame_selector_widget from ui_components.widgets.image_carousal import display_image from ui_components.widgets.prompt_finder import prompt_finder_element @@ -17,8 +17,8 @@ from ui_components.widgets.inpainting_element import inpainting_element from ui_components.widgets.drawing_element import drawing_element from ui_components.widgets.sidebar_logger import sidebar_logger +from ui_components.widgets.style_explorer_element import style_explorer_element from ui_components.widgets.variant_comparison_grid import variant_comparison_grid -from ui_components.widgets.list_view import list_view_set_up, page_toggle, styling_list_view,motion_list_view from utils import st_memory import time @@ -54,7 +54,7 @@ def frame_styling_page(mainheader2, project_uuid: str): if 'frame_styling_view_type' not in st.session_state: - st.session_state['frame_styling_view_type'] = "Individual View" + st.session_state['frame_styling_view_type'] = "Individual" st.session_state['frame_styling_view_type_index'] = 0 @@ -63,7 +63,7 @@ def frame_styling_page(mainheader2, project_uuid: str): # round down st.session_state['which_image']to nearest 10 - if st.session_state['frame_styling_view_type'] == "List View": + if st.session_state['frame_styling_view_type'] == "Timeline" or st.session_state['frame_styling_view_type'] == "Explorer": st.markdown( f"#### :red[{st.session_state['main_view_type']}] > **:green[{st.session_state['frame_styling_view_type']}]** > :orange[{st.session_state['page']}]") else: @@ -72,7 +72,11 @@ def frame_styling_page(mainheader2, project_uuid: str): project_settings = data_repo.get_project_setting(project_uuid) - if st.session_state['frame_styling_view_type'] == "Individual View": + if st.session_state['frame_styling_view_type'] == "Explorer": + style_explorer_element(project_uuid) + + + elif st.session_state['frame_styling_view_type'] == "Individual": with st.sidebar: frame_selector_widget() @@ -170,7 +174,7 @@ def frame_styling_page(mainheader2, project_uuid: str): st.markdown("***") st.info( - "You can restyle multiple frames at once in the List view.") + "You can restyle multiple frames at once in the Timeline view.") st.markdown("***") @@ -203,53 +207,24 @@ def frame_styling_page(mainheader2, project_uuid: str): add_key_frame(selected_image, inherit_styling_settings, how_long_after, which_stage_for_starting_image) st.rerun() - elif st.session_state['frame_styling_view_type'] == "List View": + elif st.session_state['frame_styling_view_type'] == "Timeline": st.markdown("---") - header_col_1, header_col_2, header_col_3, header_col_4, header_col_5 = st.columns([1.25,0.25,4, 1.5, 1.5]) - with header_col_1: - st.session_state['list_view_type'] = st_memory.radio("View type:", options=["Timeline View","Detailed View"], key="list_view_type_slider") - + header_col_3, header_col_4, header_col_5 = st.columns([4, 1.5, 1.5]) + with header_col_5: shift_frames_setting = st.toggle("Shift Frames", help="If set to True, it will shift the frames after your adjustment forward by the amount of time you move.") - if st.session_state['list_view_type'] == "Detailed View": - - with header_col_4: - num_pages, items_per_page = list_view_set_up(timing_details, project_uuid) - start_index, end_index = page_toggle(num_pages, items_per_page,project_uuid, position='top') - st.markdown("***") - - if st.session_state['page'] == "Styling": - - with st.sidebar: + if st.session_state['page'] == "Styling": + with st.sidebar: + with st.expander("🌀 Batch Styling", expanded=False): styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.LIST.value) - - styling_list_view(start_index, end_index, shift_frames_setting, project_uuid) - - st.markdown("***") - - # Update the current page in session state - elif st.session_state['page'] == "Motion": - - motion_list_view(start_index, end_index, shift_frames_setting, project_uuid) - - start_index, end_index = page_toggle(num_pages, items_per_page,project_uuid, position='bottom') - - elif st.session_state['list_view_type'] == "Timeline View": - - - - if st.session_state['page'] == "Styling": - with st.sidebar: - with st.expander("🌀 Batch Styling", expanded=False): - styling_element(st.session_state['current_frame_uuid'], view_type=ViewType.LIST.value) - timeline_view(shift_frames_setting, project_uuid, "Styling", header_col_3, header_col_4) - elif st.session_state['page'] == "Motion": - timeline_view(shift_frames_setting, project_uuid, "Motion", header_col_3, header_col_4) - + timeline_view(shift_frames_setting, project_uuid, "Styling", header_col_3, header_col_4) + elif st.session_state['page'] == "Motion": + timeline_view(shift_frames_setting, project_uuid, "Motion", header_col_3, header_col_4) + with st.sidebar: with st.expander("🔍 Inference Logging", expanded=True): diff --git a/ui_components/setup.py b/ui_components/setup.py index 5656cb19..3e191a46 100644 --- a/ui_components/setup.py +++ b/ui_components/setup.py @@ -11,7 +11,6 @@ from ui_components.components.new_project_page import new_project_page from ui_components.components.project_settings_page import project_settings_page from ui_components.components.video_rendering_page import video_rendering_page -from ui_components.components.mood_board_page import mood_board_page from streamlit_option_menu import option_menu from ui_components.constants import CreativeProcessType from ui_components.models import InternalAppSettingObject @@ -126,25 +125,11 @@ def setup_app_ui(): with st.sidebar: - pages = CreativeProcessType.value_list() - - if 'page' not in st.session_state: - st.session_state["page"] = pages[0] - st.session_state["manual_select"] = None - - if st.session_state["page"] not in pages: - st.session_state["page"] = pages[0] - st.session_state["manual_select"] = None - - st.session_state['page'] = option_menu(None, pages, icons=['palette', 'camera-reels', "hourglass", 'stopwatch'], menu_icon="cast", orientation="horizontal", key="secti2on_selector", styles={ - "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "green"}}, manual_select=st.session_state["manual_select"]) - - # TODO: CORRECT-CODE - view_types = ["Individual View", "List View"] + view_types = ["Explorer","Timeline","Individual"] if 'frame_styling_view_type_index' not in st.session_state: st.session_state['frame_styling_view_type_index'] = 0 - st.session_state['frame_styling_view_type'] = "Individual View" + st.session_state['frame_styling_view_type'] = "Explorer" st.session_state['change_view_type'] = False if 'change_view_type' not in st.session_state: @@ -156,12 +141,7 @@ def setup_app_ui(): else: st.session_state['frame_styling_view_type_index'] = None - def on_change_view_type(key): - selection = st.session_state[key] - if selection == "List View": - st.session_state['index_of_current_page'] = math.ceil( - st.session_state['current_frame_index'] / 10) - + # Option menu st.session_state['frame_styling_view_type'] = option_menu( None, @@ -172,12 +152,28 @@ def on_change_view_type(key): key="section-selecto1r", styles={"nav-link": {"font-size": "15px", "margin":"0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "orange"}}, - manual_select=st.session_state['frame_styling_view_type_index'], - on_change=on_change_view_type + manual_select=st.session_state['frame_styling_view_type_index'] ) + if st.session_state['frame_styling_view_type'] != "Explorer": + pages = CreativeProcessType.value_list() + else: + pages = ["Styling"] - + if 'page' not in st.session_state: + st.session_state["page"] = pages[0] + st.session_state["manual_select"] = None + + if st.session_state["page"] not in pages: + st.session_state["page"] = pages[0] + st.session_state["manual_select"] = None + + st.session_state['page'] = option_menu(None, pages, icons=['palette', 'camera-reels', "hourglass", 'stopwatch'], menu_icon="cast", orientation="horizontal", key="secti2on_selector", styles={ + "nav-link": {"font-size": "15px", "margin": "0px", "--hover-color": "#eee"}, "nav-link-selected": {"background-color": "green"}}, manual_select=st.session_state["manual_select"]) + + # TODO: CORRECT-CODE + + frame_styling_page( mainheader2, st.session_state["project_uuid"]) @@ -185,7 +181,7 @@ def on_change_view_type(key): elif st.session_state["main_view_type"] == "Tools & Settings": with st.sidebar: - tool_pages = ["Query Logger", "Mood Board", "Custom Models", "Project Settings"] + tool_pages = ["Query Logger", "Custom Models", "Project Settings"] if st.session_state["page"] not in tool_pages: st.session_state["page"] = tool_pages[0] @@ -199,8 +195,7 @@ def on_change_view_type(key): custom_models_page(st.session_state["project_uuid"]) elif st.session_state["page"] == "Project Settings": project_settings_page(st.session_state["project_uuid"]) - elif st.session_state["page"] == "Mood Board": - mood_board_page(st.session_state["project_uuid"]) + elif st.session_state["main_view_type"] == "Video Rendering": diff --git a/ui_components/widgets/list_view.py b/ui_components/widgets/list_view.py deleted file mode 100644 index 3e4e4588..00000000 --- a/ui_components/widgets/list_view.py +++ /dev/null @@ -1,131 +0,0 @@ -import streamlit as st -from ui_components.constants import WorkflowStageType -from utils.data_repo.data_repo import DataRepo -from ui_components.widgets.frame_time_selector import single_frame_time_selector, single_frame_time_duration_setter -from ui_components.widgets.image_carousal import display_image -from ui_components.methods.common_methods import delete_frame, move_frame,jump_to_single_frame_view_button,delete_frame_button,move_frame_back_button,move_frame_forward_button -import math -from utils.data_repo.data_repo import DataRepo -from ui_components.methods.common_methods import delete_frame -from ui_components.widgets.frame_clip_generation_elements import current_individual_clip_element, current_preview_video_element, update_animation_style_element -from ui_components.widgets.frame_time_selector import single_frame_time_selector, single_frame_time_duration_setter -from ui_components.widgets.image_carousal import display_image -from ui_components.widgets.frame_clip_generation_elements import current_individual_clip_element, current_preview_video_element -from utils.data_repo.data_repo import DataRepo - -def list_view_set_up(timing_details,project_uuid): - data_repo = DataRepo() - - timing_details = data_repo.get_timing_list_from_project(project_uuid) - if 'current_page' not in st.session_state: - st.session_state['current_page'] = 1 - - if not('index_of_current_page' in st.session_state and st.session_state['index_of_current_page']): - st.session_state['index_of_current_page'] = 1 - - items_per_page = 10 - num_pages = math.ceil(len(timing_details) / items_per_page) + 1 - - return num_pages, items_per_page - -def page_toggle(num_pages, items_per_page, project_uuid, position): - data_repo = DataRepo() - timing_details = data_repo.get_timing_list_from_project(project_uuid) - - st.session_state['current_page'] = st.radio(f"Select page:", options=range( - 1, num_pages), horizontal=True, index=st.session_state['index_of_current_page'] - 1, key=f"page_selection_radio_{position}") - if st.session_state['current_page'] != st.session_state['index_of_current_page']: - st.session_state['index_of_current_page'] = st.session_state['current_page'] - st.rerun() - - start_index = (st.session_state['current_page'] - 1) * items_per_page - end_index = min(start_index + items_per_page,len(timing_details)) - - return start_index, end_index - -def styling_list_view(start_index, end_index, shift_frames_setting, project_uuid): - data_repo = DataRepo() - timing_details = data_repo.get_timing_list_from_project(project_uuid) - for i in range(start_index, end_index): - display_number = i + 1 - st.subheader(f"Frame {display_number}") - image1, image2, image3 = st.columns([2, 3, 2]) - - with image1: - display_image(timing_uuid=timing_details[i].uuid, stage=WorkflowStageType.SOURCE.value, clickable=False) - - with image2: - display_image(timing_uuid=timing_details[i].uuid, stage=WorkflowStageType.STYLED.value, clickable=False) - - with image3: - time1, time2 = st.columns([1, 1]) - with time1: - single_frame_time_selector(timing_details[i].uuid, 'sidebar', shift_frames=shift_frames_setting) - single_frame_time_duration_setter(timing_details[i].uuid,'sidebar',shift_frames=shift_frames_setting) - - with time2: - st.write("") - - - jump_to_single_frame_view_button(display_number,timing_details) - - st.markdown("---") - btn1, btn2, btn3 = st.columns([2, 1, 1]) - with btn1: - if st.button("Delete this keyframe", key=f'{i}'): - delete_frame(timing_details[i].uuid) - st.rerun() - with btn2: - if st.button("⬆️", key=f"Promote {display_number}"): - move_frame("Up", timing_details[i].uuid) - st.rerun() - with btn3: - if st.button("⬇️", key=f"Demote {display_number}"): - move_frame("Down", timing_details[i].uuid) - st.rerun() - - st.markdown("***") - -def motion_list_view(start_index, end_index, shift_frames_setting, project_uuid): - data_repo = DataRepo() - timing_details = data_repo.get_timing_list_from_project(project_uuid) - num_timing_details = len(timing_details) - timing_details = data_repo.get_timing_list_from_project(project_uuid) - - for idx in range(start_index, end_index): - st.header(f"Frame {idx+1}") - timing1, timing2, timing3 = st.columns([1, 1, 1]) - - with timing1: - frame1, frame2, frame3 = st.columns([2, 1, 2]) - with frame1: - if timing_details[idx].primary_image_location: - st.image(timing_details[idx].primary_image_location) - with frame2: - st.write("") - st.write("") - st.write("") - st.write("") - st.write("") - st.info(" ➜") - with frame3: - if idx+1 < num_timing_details and timing_details[idx+1].primary_image_location: - st.image(timing_details[idx+1].primary_image_location) - elif idx+1 == num_timing_details: - st.write("") - st.write("") - st.write("") - st.write("") - st.markdown("