From f027c9e874909756acae4df8c582bff86cefa381 Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Mon, 4 Mar 2024 14:28:37 -0800 Subject: [PATCH 001/152] add auto-titling --- tabs/video_remixer_ui.py | 41 +++++++++++++++++++++++++++++++-------- webui_utils/file_utils.py | 4 ++-- 2 files changed, 35 insertions(+), 10 deletions(-) diff --git a/tabs/video_remixer_ui.py b/tabs/video_remixer_ui.py index ab5bde39..8e6fd3ec 100644 --- a/tabs/video_remixer_ui.py +++ b/tabs/video_remixer_ui.py @@ -313,10 +313,9 @@ def render_tab(self): next_labeled_scene = gr.Button(">", size="sm", min_width=20, scale=0) with gr.Row(): - auto_label_scenes = gr.Button(value="Auto Label Scenes", - size="sm", min_width=80) - reset_scene_labels = gr.Button(value="Reset Scene Labels", - size="sm", min_width=80) + auto_label_scenes = gr.Button(value="+ Sort Keys", size="sm", min_width=60) + auto_title_scenes = gr.Button(value="+ Titles", size="sm", min_width=60) + reset_scene_labels = gr.Button(value="Reset", size="sm", min_width=60) with gr.Row(): add_2x_slomo = gr.Button(value="+ 2X Slo Mo", size="sm", min_width=60, elem_id="highlightbutton") add_4x_slomo = gr.Button(value="+ 4X Slo Mo", size="sm", min_width=60, elem_id="highlightbutton") @@ -1031,6 +1030,10 @@ def render_tab(self): outputs=[scene_index, scene_name, scene_image, scene_state, scene_info, set_scene_label]) + auto_title_scenes.click(self.auto_title_scenes, + outputs=[scene_index, scene_name, scene_image, scene_state, + scene_info, set_scene_label]) + reset_scene_labels.click(self.reset_scene_labels, outputs=[scene_index, scene_name, scene_image, scene_state, scene_info, set_scene_label]) @@ -1867,6 +1870,20 @@ def auto_label_scenes(self): self.state.set_scene_label(scene_index, formatted_label) return self.scene_chooser_details(self.state.current_scene) + def auto_title_scenes(self): + num_scenes = len(self.state.scene_names) + # num_width = len(str(num_scenes)) + for scene_index in range(len(self.state.scene_names)): + scene_name = self.state.scene_names[scene_index] + title = self.scene_title(scene_name) + scene_label = self.state.scene_labels.get(scene_name) + sort_mark, hint_mark = None, None + if scene_label: + sort_mark, hint_mark, _ = self.state.split_label(scene_label) + formatted_label = self.state.compose_label(sort_mark, hint_mark, title) + self.state.set_scene_label(scene_index, formatted_label) + return self.scene_chooser_details(self.state.current_scene) + def reset_scene_labels(self): self.state.clear_all_scene_labels() return self.scene_chooser_details(self.state.current_scene) @@ -2154,6 +2171,17 @@ def next_button61(self, custom_video_options, custom_audio_options, output_filep except ValueError as error: return format_markdown(str(error), "error") + def scene_marker(self, scene_name): + scene_index = self.state.scene_names.index(scene_name) + _, _, _, _, scene_start, scene_duration, _, _ = self.state.scene_chooser_data(scene_index) + marker = f"[{scene_index} {scene_name} {scene_start} +{scene_duration}]" + return marker + + def scene_title(self, scene_name): + scene_index = self.state.scene_names.index(scene_name) + _, _, _, scene_position, _, _, _, _ = self.state.scene_chooser_data(scene_index) + return scene_position + def next_button62(self, marked_video_options, marked_audio_options, output_filepath): if not self.state.project_path: return format_markdown( @@ -2186,10 +2214,7 @@ def next_button62(self, marked_video_options, marked_audio_options, output_filep labels = [] kept_scenes = self.state.kept_scenes() for scene_name in kept_scenes: - scene_index = self.state.scene_names.index(scene_name) - _, _, _, _, scene_start, scene_duration, _, _ = \ - self.state.scene_chooser_data(scene_index) - labels.append(f"[{scene_index} {scene_name} {scene_start} +{scene_duration}]") + labels.append(self.scene_marker(scene_name)) draw_text_options["labels"] = labels self.state.save_custom_remix(self.log, output_filepath, global_options, kept_scenes, diff --git a/webui_utils/file_utils.py b/webui_utils/file_utils.py index c0f08c67..8db3cd5a 100644 --- a/webui_utils/file_utils.py +++ b/webui_utils/file_utils.py @@ -362,10 +362,10 @@ def simple_sanitize_filename(filename, default_filename=None): Uses the default_filename if a safe filename cannot be produced Raises ValueError if default_filename is not provided """ - # Keep only alphanumeric chars and spaces, and convert all space sequences into underscores + # Keep only alphanumeric chars, hyphens, and spaces, and convert all space sequences into underscores safe_name = re.sub(r' +', '_', - re.sub(r'[^A-Za-z0-9 ]+', + re.sub(r'[^-A-Za-z0-9 ]+', '', filename)) or default_filename if safe_name: From 09d1f9472c5bbd8f3639a6c2662d474644e94c42 Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Wed, 6 Mar 2024 01:31:13 -0800 Subject: [PATCH 002/152] reuse proc. vid clps custom remix, fix draw text --- tabs/video_remixer_ui.py | 3 ++- video_remixer.py | 46 +++++++++++++++++++++++++++------------- 2 files changed, 33 insertions(+), 16 deletions(-) diff --git a/tabs/video_remixer_ui.py b/tabs/video_remixer_ui.py index 8e6fd3ec..c6d6100e 100644 --- a/tabs/video_remixer_ui.py +++ b/tabs/video_remixer_ui.py @@ -2163,7 +2163,8 @@ def next_button61(self, custom_video_options, custom_audio_options, output_filep global_options = self.config.ffmpeg_settings["global_options"] remixer_settings = self.config.remixer_settings kept_scenes = self.state.prepare_save_remix(self.log, global_options, - remixer_settings, output_filepath) + remixer_settings, output_filepath, + invalidate_video_clips=False) self.state.save_custom_remix(self.log, output_filepath, global_options, kept_scenes, custom_video_options, custom_audio_options) return format_markdown(f"Remixed custom video {output_filepath} is complete.", diff --git a/video_remixer.py b/video_remixer.py index b4dd0800..9123d1d3 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -1345,7 +1345,7 @@ def clean_remix_content(self, purge_from, purge_root=None): clean_paths = clean_paths[1:] self.video_clips = [] self.clips = [] - elif purge_from == "scene_clips": + elif purge_from == "remix_clips": clean_paths = clean_paths[2:] self.clips = [] @@ -1494,7 +1494,8 @@ def get_resize_params(self, resize_w, resize_h, crop_w, crop_h, content_width, c crop_type = "crop" return scale_type, crop_type - def prepare_save_remix(self, log_fn, global_options, remixer_settings, output_filepath : str): + def prepare_save_remix(self, log_fn, global_options, remixer_settings, output_filepath : str, + invalidate_video_clips=True): if not output_filepath: raise ValueError("Enter a path for the remixed video to proceed") @@ -1520,13 +1521,20 @@ def prepare_save_remix(self, log_fn, global_options, remixer_settings, output_fi self.create_audio_clips(log_fn, global_options, audio_format=audio_format) self.save() - # always recreate video and scene clips - self.clean_remix_content(purge_from="video_clips") + # leave video clips if they are complete since we may be only making audio changes + if invalidate_video_clips or not self.processed_content_complete(self.VIDEO_STEP): + self.clean_remix_content(purge_from="video_clips") + else: + # always recreate remix clips + self.clean_remix_content(purge_from="remix_clips") + return kept_scenes def save_remix(self, log_fn, global_options, kept_scenes): - self.create_video_clips(log_fn, kept_scenes, global_options) - self.save() + # leave video clips if they are complete since we may be only making audio changes + if not self.processed_content_complete(self.VIDEO_STEP): + self.create_video_clips(log_fn, kept_scenes, global_options) + self.save() self.create_scene_clips(log_fn, kept_scenes, global_options) self.save() @@ -1550,11 +1558,13 @@ def save_custom_remix(self, _, _, output_ext = split_filepath(output_filepath) output_ext = output_ext[1:] - self.create_custom_video_clips(log_fn, kept_scenes, global_options, - custom_video_options=custom_video_options, - custom_ext=output_ext, - draw_text_options=draw_text_options) - self.save() + # leave video clips if they are complete since we may be only making audio changes + if not self.processed_content_complete(self.VIDEO_STEP): + self.create_custom_video_clips(log_fn, kept_scenes, global_options, + custom_video_options=custom_video_options, + custom_ext=output_ext, + draw_text_options=draw_text_options) + self.save() self.create_custom_scene_clips(kept_scenes, global_options, custom_audio_options=custom_audio_options, @@ -2165,8 +2175,8 @@ def force_drop_processed_scene(self, scene_index): self.resynthesis_path, self.inflation_path, self.upscale_path, - self.audio_clips_path, self.video_clips_path, + self.audio_clips_path, self.clips_path ]: content_path = os.path.join(path, scene_name) @@ -2471,11 +2481,17 @@ def create_custom_video_clips(self, # trim whitespace label = label.strip() if label else "" - # FFmpeg needs the colons escaped - label = label.replace(":", "\:") + # FFmpeg needs some things escaped + label = label.\ + replace(":", "\:").\ + replace(",", "\,").\ + replace("{", "\{").\ + replace("}", "\}").\ + replace("%", "\%") box_part = f":box=1:boxcolor={box_color}:boxborderw={border_size}" if draw_box else "" - label_part = f"text='{label}':x={box_x}:y={box_y}:fontsize={font_size}:fontcolor={font_color}:fontfile='{font_file}'{box_part}" + # label_part = f"text='{label}':x={box_x}:y={box_y}:fontsize={font_size}:fontcolor={font_color}:fontfile='{font_file}':expansion=none:{box_part}" + label_part = f"text='{label}':x={box_x}:y={box_y}:fontsize={font_size}:fontcolor={font_color}:fontfile='{font_file}':expansion=none{box_part}" shadow_part = f"text='{label}':x={shadow_x}:y={shadow_y}:fontsize={font_size}:fontcolor={shadow_color}:fontfile='{font_file}'" if draw_shadow else "" draw_text = f"{shadow_part},drawtext={label_part}" if draw_shadow else label_part use_custom_video_options = use_custom_video_options \ From e885c3a19c295e61ab93b6764398c7c8a2b2c995 Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Wed, 6 Mar 2024 01:47:00 -0800 Subject: [PATCH 003/152] more fix --- video_remixer.py | 1 - 1 file changed, 1 deletion(-) diff --git a/video_remixer.py b/video_remixer.py index 9123d1d3..a5d810f8 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -2490,7 +2490,6 @@ def create_custom_video_clips(self, replace("%", "\%") box_part = f":box=1:boxcolor={box_color}:boxborderw={border_size}" if draw_box else "" - # label_part = f"text='{label}':x={box_x}:y={box_y}:fontsize={font_size}:fontcolor={font_color}:fontfile='{font_file}':expansion=none:{box_part}" label_part = f"text='{label}':x={box_x}:y={box_y}:fontsize={font_size}:fontcolor={font_color}:fontfile='{font_file}':expansion=none{box_part}" shadow_part = f"text='{label}':x={shadow_x}:y={shadow_y}:fontsize={font_size}:fontcolor={shadow_color}:fontfile='{font_file}'" if draw_shadow else "" draw_text = f"{shadow_part},drawtext={label_part}" if draw_shadow else label_part From 7708d7257ddff2ee5ad3351fa31a83e10cc243a2 Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Wed, 6 Mar 2024 09:40:51 -0800 Subject: [PATCH 004/152] replace all sort labels, add titles only if not set --- tabs/video_remixer_ui.py | 36 ++++++++++++++++++++++++++---------- video_remixer.py | 2 ++ 2 files changed, 28 insertions(+), 10 deletions(-) diff --git a/tabs/video_remixer_ui.py b/tabs/video_remixer_ui.py index c6d6100e..72d2431a 100644 --- a/tabs/video_remixer_ui.py +++ b/tabs/video_remixer_ui.py @@ -1856,34 +1856,50 @@ def next_labeled_scene(self, scene_index, scene_name): def prev_labeled_scene(self, scene_index, scene_name): return self.scan_for_label(range(scene_index-1, -1, -1)) + # TODO move + # add sorting marks to each scene label, removing existing ones first def auto_label_scenes(self): num_scenes = len(self.state.scene_names) num_width = len(str(num_scenes)) + # remove existing sort marks + for scene_index in range(len(self.state.scene_names)): + scene_name = self.state.scene_names[scene_index] + scene_label = self.state.scene_labels.get(scene_name) + _, hint_mark, title = self.state.split_label(scene_label) + formatted_label = self.state.compose_label(None, hint_mark, title) + self.state.set_scene_label(scene_index, formatted_label) for scene_index in range(len(self.state.scene_names)): scene_name = self.state.scene_names[scene_index] scene_label = self.state.scene_labels.get(scene_name) - hint_mark, title = None, None - if scene_label: - _, hint_mark, title = self.state.split_label(scene_label) + _, hint_mark, title = self.state.split_label(scene_label) sort_mark = str(scene_index).zfill(num_width) formatted_label = self.state.compose_label(sort_mark, hint_mark, title) self.state.set_scene_label(scene_index, formatted_label) + self.clean_scene_labels() return self.scene_chooser_details(self.state.current_scene) + # TODO more inspired default title, move + # add a default title to each scene label, if not already set def auto_title_scenes(self): - num_scenes = len(self.state.scene_names) - # num_width = len(str(num_scenes)) for scene_index in range(len(self.state.scene_names)): scene_name = self.state.scene_names[scene_index] title = self.scene_title(scene_name) scene_label = self.state.scene_labels.get(scene_name) - sort_mark, hint_mark = None, None - if scene_label: - sort_mark, hint_mark, _ = self.state.split_label(scene_label) - formatted_label = self.state.compose_label(sort_mark, hint_mark, title) - self.state.set_scene_label(scene_index, formatted_label) + sort_mark, hint_mark, existing_title = self.state.split_label(scene_label) + if not existing_title: + formatted_label = self.state.compose_label(sort_mark, hint_mark, title) + self.state.set_scene_label(scene_index, formatted_label) + self.clean_scene_labels() return self.scene_chooser_details(self.state.current_scene) + # remove scene labels that do not have a corresponding scene name + def clean_scene_labels(self): + for scene_name, scene_label in self.state.scene_labels.copy().items(): + if not scene_name in self.state.scene_names: + self.log(f"deleting unused scene label {scene_label}") + del self.state.scene_labels[scene_name] + self.state.save() + def reset_scene_labels(self): self.state.clear_all_scene_labels() return self.scene_chooser_details(self.state.current_scene) diff --git a/video_remixer.py b/video_remixer.py index a5d810f8..fcc2b445 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -696,6 +696,8 @@ def create_thumbnails(self, log_fn, global_options, remixer_settings): def split_label(self, label): """Splits a label such as '(01){I:2S} My Title (part1){b}' into sort: '01', hint: 'I:2S' label: 'My Title (part1){b}' parts """ + if not label: + return None, None, None try: matches = re.search(self.SPLIT_LABELS, label) groups = matches.groups() From 395b5606cd76710776123a30dc6b81ebb7641383 Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Thu, 7 Mar 2024 17:17:40 -0800 Subject: [PATCH 005/152] update guide for recent changes --- guide/video_remixer_choose.md | 36 +++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/guide/video_remixer_choose.md b/guide/video_remixer_choose.md index 670e23fb..b7c1f792 100644 --- a/guide/video_remixer_choose.md +++ b/guide/video_remixer_choose.md @@ -34,29 +34,30 @@ _Choose Scene Range_ ## Properties Accordion _Scene Label_ -- Enter a scene label and click _Set_ to save it with the scene +- Enter a scene label and press _Enter_ or click _Set_ to save it with the scene - _Tip:_ Use the < and > buttons to navigate to labeled scenes ### About Scene Labels - Labels can be used to add a scene title - The entered scene title will appear in the video when using the _Labeled Remix_ feature - - The entered title will also be used as the based for the scene remix clip, making it easier to reuse individual clips + - The entered title will also be used as the basis for the scene remix clip, making it easier to find and reuse individual scene clips - Labels can be used to rearrange scene order in the remix video - When a label starts with a value inside parentheses, the value will be used to arrange the clips in sorted order - - _Tip:_ use the _Auto Label Scenes_ button to automatically add a sorting mark to each scene -- Labels can be used to mark a scene for 2X or 4X audio slow motion - - **NOTE** the main video must use 2X Inflation for this feature to work - - Use the _Add 2X Audio Slo Mo_ or _Add 4X Audio Slo Mo_ buttons to add a _processing hint_ to the scene label + - _Tip:_ use the _+ Sort Keys_ button to automatically add a sorting mark to each scene +- Labels can be used to mark a scene for 2X, 4X or 8X audio slow motion + - Use the _+ 2X Slo Mo_, _+ 4X Slo Mo_ or _+ 8X Slo Mo_ buttons to add a _processing hint_ to the scene label that adds audio pitch-adjusted slow motion -_Auto Label Scenes_ +_+ Sort Keys_ - Automatically adds a sorting mark to each scene -_Reset Scene Labels_ +_+ Title_ +- Automatically adds a default title to each scene + +_Reset_ - Clears the contents of all scene labels -_Add 2X Audio Slo Mo_ and _Add 4X Audio Slo Mo_ -- Add a _processing hint_ to the scene label to enabled 2X or 4X audio slow motion processing -- **NOTE** the main video must use 2X Inflation for this feature to work +_+ 2X Slo Mo_, _+ 4X Slo Mo_ and _+ 8X Slo Mo_ +- Add a _processing hint_ to the scene label to enable 2X, 4X or 8X audio slow motion with pitch adjustment processing The _Keep All Scenes_ and _Drop All Scenes_ buttons (inside the _Danger Zone_ accordion) - **Destructively** _Keep_ or _Drop_ all scenes (there is no undo) @@ -68,7 +69,7 @@ The _Split Scene_ and _Drop Processed Scene_ buttons (inside the _Danger Zone_ a ## Danger Zone Accordion _Keep All Scenes_ and _Drop All Scenes_ -- Sets all sets to _Keep_ or _Drop_ +- Sets **_all_** scenes to _Keep_ or _Drop_ _Invert Scene Choices_ - Changes all scene keep/drop statues to the opposite state @@ -79,5 +80,16 @@ _Invert Scene Choices_ _Drop Processed Scene_ - Opens the _Drop Processed Scenes_ tab to drop a scene, including its processed content, to save the remix video without that the dropped scene, avoiding reprocessing the whole video +_Mark Scene_ +- Remembers the current scene ID to make it easier to use features that require entering a scene ID range +- This can be used with the _Merge Scene Range_ and _Choose Scene Range_ features + 1. Go to the first scene of the range and click _Mark Scene_ + 1. Go to the last scene of the range, then click either of these shortcut buttons + - Merge Scenes + - Choose Scene Range + +_Merge Scenes_ +- Shortcut that takes you to the Remix Extra _Merge Scenes_ tab + ## Important - `ffmpeg.exe` must be available on the system path From 411130865ba21a58f28815b8c828acb2e705ecd1 Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Thu, 7 Mar 2024 17:45:18 -0800 Subject: [PATCH 006/152] more guide updates --- guide/video_remixer_choose.md | 38 +++++++++++++++++++++++++++-- guide/video_remixer_processing.md | 40 +++++++++---------------------- video_remixer.py | 1 + 3 files changed, 48 insertions(+), 31 deletions(-) diff --git a/guide/video_remixer_choose.md b/guide/video_remixer_choose.md index b7c1f792..28b8b3b1 100644 --- a/guide/video_remixer_choose.md +++ b/guide/video_remixer_choose.md @@ -44,8 +44,10 @@ _Scene Label_ - Labels can be used to rearrange scene order in the remix video - When a label starts with a value inside parentheses, the value will be used to arrange the clips in sorted order - _Tip:_ use the _+ Sort Keys_ button to automatically add a sorting mark to each scene -- Labels can be used to mark a scene for 2X, 4X or 8X audio slow motion - - Use the _+ 2X Slo Mo_, _+ 4X Slo Mo_ or _+ 8X Slo Mo_ buttons to add a _processing hint_ to the scene label that adds audio pitch-adjusted slow motion +- Labels can be used to add _Processing Hints_ + - Use the _+ 2X Slo Mo_, _+ 4X Slo Mo_ or _+ 8X Slo Mo_ buttons to add a _Processing Hint_ to the scene label that adds audio pitch-adjusted slow motion + +_Tip: See below for more details on_ Processing Hints _+ Sort Keys_ - Automatically adds a sorting mark to each scene @@ -91,5 +93,37 @@ _Mark Scene_ _Merge Scenes_ - Shortcut that takes you to the Remix Extra _Merge Scenes_ tab +## Processing Hints + +Resize: +- `{R:x/y}` zoom into quadrant `x` of `y` sized grid + - Example: + - `{R:1/4}` zoom into upper left quadrant of 2x2 grid + - `{R:5/9}` zoom into center quadrant of 3x3 grid (like a telephone keypad) +- `{R:z%}` zoom in the center at `z` percent + - Example: + - `{R:200%}` zoom in at 200% + +Resynthesize: +- `{Y:type}` enable resynthesis of `type` + - Types: + - `C` clean (first past only of two pass resynth) + - `S` scrub (two pass resynth) + - `R` replace (one pass resynth) + - `N` no resynthesis + +Inflation: +- `{I:nt}` inflate amount `n` with type `y` + - Amounts: `1`, `2`, `4`, `8`, `16` + - `1` means don't inflate + - Types: + - `A` slow motion with audio pitch adjust + - `S` slow motion with silent audio + - `N` no slow motion (can be omitted) + +Upscale: +- `{U:_}` upscale at 1X for clean-up + - `_` reserved for future use + ## Important - `ffmpeg.exe` must be available on the system path diff --git a/guide/video_remixer_processing.md b/guide/video_remixer_processing.md index b3f0f1e6..4ac3e1a7 100644 --- a/guide/video_remixer_processing.md +++ b/guide/video_remixer_processing.md @@ -14,23 +14,28 @@ - Clean frames by completely replacing with interpolations from adjacent frames - Deepest cleaning method, but does not handle fast-moving content well 1. Check _Inflate New Frames_ to insert AI-interpolated frames between all real frames - - Choose whether to inflate by _2X_, _4X_ or _8X_ - - The choices will insert 1, 3 or 7 new frames between existing frames + - Choose whether to inflate by _2X_, _4X_, _8X_ or _16X_ + - The choices will insert 1, 3, 7 or 15 new frames between existing frames - Choose whether to produce a slow-motion video - **_No_** Create a real-time video - Adjust the output FPS to compensate for the inserted frames - **_Audio_** Create a slow-motion video with audio - Adjust the output FPS and audio pitch to compensate for the new frames - - **_Tip:_** See below for more information about slow-motion videos - **_Silent_** Create a slow-motion video without audio - - Keep the original FPS to reveal the most motion slowdown + - Use silence instead of pitch-compensated audio 1. Check _Upscale Frames_ to use AI to clean and enlarge frames - - Choose whether to upscale by _1X_, _2X_ or _4X_ + - Choose whether to upscale by _1X_, _2X_, _3X_ or _4X_ - Upscaling at 1X will cleanse the frames without enlarging - - Upscaling at 2X - 4X will cleanse the frames and double or quadruple the frame size + - Upscaling at 2X - 4X will cleanse the frames and double, triple or quadruple the frame size 1. Click _Process Remix_ to kick off the processing - Progress can be tracked in the console +## Advanced Options Accordion +- Check _Automatically save default MP4 video_ to: + - Save the Remix Video using default MP4 settings as seen on the _Save MP4 Remix_ tab +- Check _Delete processed content after saving_ to: + - Automatically purged all generated project content after saving the Remix Video + ## Note - Content may be _purged_ (soft-deleted) when clicking _Process Remix_ - Previous process content not currently needed is set aside @@ -41,26 +46,3 @@ - The browser window does NOT need to be left open - The project can be reopened later to resume where you left off - `ffmpeg.exe` must be available on the system path - -## More About Slow-Motion Videos - -### _Real-Time_ and _Slow-Motion_ Video Compatibility -If creating slow-motion videos to combine with real-time videos, note the following chart showing the effect of Inflation options on Remix Video FPS: - -| Inflate By | Slo Mo 'NO' | Slo Mo 'AUDIO' | Slo Mo 'SILENT' -| :-: | :-: | :-: | :-: | -| 2X | FPS x 2 | FPS x 1, Audio Pitch x .50 | FPS x 1 | -| 4X | FPS x 4 | FPS x 2, Audio Pitch x .50 | FPS x 1 | -| 8X | FPS x 8 | FPS x 2, Audio Pitch x .25 | FPS x 1 | - -### Examples - -_Tip:_ Use the _Video Assembler_ tab under _Tools_, _File Conversion_ to assemble video clips sharing like characteristics - -- Make a video combining real-time footage and 8X silent slow motion - - Create the real-time remix video **_without_** using Inflation - - Create the slow-motion remix video with the "8X" and "Silent" Inflation options - -- Make a smoothed video combining real-time footage with 2X and 4X slow motion - - Create the real-time remix video with the "2X" and "No" Inflation options - - Create the slow-motion remix video with the "2X" and "4X", and "Audio" Inflation options diff --git a/video_remixer.py b/video_remixer.py index fcc2b445..20e56907 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -2268,6 +2268,7 @@ def compute_forced_inflation(self, scene_name): force_audio = True elif "S" in inflation_hint: force_silent = True + # else "N" for no slow motion return force_inflation, force_audio, force_inflate_by, force_silent def compute_scene_fps(self, scene_name): From 65e6f285d662d1b51f81d71df56418f8c0e5c2a3 Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Thu, 7 Mar 2024 17:51:58 -0800 Subject: [PATCH 007/152] more guide updating --- guide/autofill_duplicates.md | 15 +++++++++------ tabs/dedupe_autofill_ui.py | 2 +- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/guide/autofill_duplicates.md b/guide/autofill_duplicates.md index 80c86ca1..952756cc 100644 --- a/guide/autofill_duplicates.md +++ b/guide/autofill_duplicates.md @@ -1,10 +1,6 @@ **Auto-Fill Duplicate Frames** - Detect duplicate frames and fill with interpolated replacements ## How It Works -1. Set _Input PNG Files Path_ to a path on this server for the PNG files being deduplicated -1. Set _Output PNG Files Path_ to a path on this server to store the deduplicated PNG files - - Output PNG File Path can be left blank to use the default folder - - _Tip: The default folder is set by the_ `config.directories.output_deduplication` _setting_ 1. Set _Detect Threshold_ to specify the sensitivity to frame differences - A lower value finds fewer duplicates; a higher value finds more - This value requires experimentation. See _More Details_ below. @@ -18,9 +14,16 @@ 1. Set _Search Precision_ to the depth of search needed for accuracy - High precision yields precise frame timing, but takes a long time - Less precision is faster, with possible imprecise frame timing -1. Click _Deduplicate Frames_ -1. The _Details_ box shows the result of the operation, or any errors encountered +1. Choose _Individual Path_ or _Batch Processing_ + - If **Individual Path** + 1. Set _Input PNG Files Path_ to a path on this server for the PNG files being deduplicated + 1. Set _Output PNG Files Path_ to a path on this server to store the deduplicated PNG files + - If **Batch Processing** + 1. Set _Input PNG Files Path_ to a directory on this servery containing the frame groups to be deduplicated + 1. Set _Output PNG Files Path_ to a directory on this server for the deduplicated frame groups +1. Click _Deduplicate Frames_ or _Deduplicate Batch_ 1. On completion, a report .txt file is written to the output path with details of the auto-filling +- Progress can be tracked in the console ## Important - This process could be slow, perhaps many hours long! diff --git a/tabs/dedupe_autofill_ui.py b/tabs/dedupe_autofill_ui.py index 939f0184..a946360e 100644 --- a/tabs/dedupe_autofill_ui.py +++ b/tabs/dedupe_autofill_ui.py @@ -71,7 +71,7 @@ def render_tab(self): placeholder="Path on this server for the deduplicated PNG files") message_box_batch = gr.Markdown(format_markdown(self.DEFAULT_MESSAGE_BATCH)) gr.Markdown("*Progress can be tracked in the console*") - dedupe_batch = gr.Button("Deduplicate Frames " + SimpleIcons.SLOW_SYMBOL, + dedupe_batch = gr.Button("Deduplicate Batch " + SimpleIcons.SLOW_SYMBOL, variant="primary") with gr.Accordion(SimpleIcons.TIPS_SYMBOL + " Guide", open=False): WebuiTips.autofill_duplicates.render() From 7521daba4490b5f09b4fd6009824660f163df77b Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Thu, 7 Mar 2024 18:27:00 -0800 Subject: [PATCH 008/152] simple project settings memorization buttons --- guide/video_remixer_settings.md | 4 +- tabs/video_remixer_ui.py | 153 +++++++++++++++++++++----------- 2 files changed, 104 insertions(+), 53 deletions(-) diff --git a/guide/video_remixer_settings.md b/guide/video_remixer_settings.md index 6e0f7d39..d2768361 100644 --- a/guide/video_remixer_settings.md +++ b/guide/video_remixer_settings.md @@ -50,8 +50,10 @@ ## More Options Accordion 1. Click _More Options_ to access additional project setup options - - Click _Reuse Last-Used Settings_, to load the settings from the last created project + - Click _Reuse Last-Used Settings_ to load the settings from the last created project - Useful when processing a series of similar content such as TV programs + - Click _Use Memorized Setting_ to load the settings that were previously _remembered_ + - Click _Remember These Settings_ to save the settings for later use - Set _Crop X Offset_ and _Crop Y Offset_, useful for: - Removing letter/pillar boxes - Fixing incorrectly centered content diff --git a/tabs/video_remixer_ui.py b/tabs/video_remixer_ui.py index 72d2431a..ba52a1ad 100644 --- a/tabs/video_remixer_ui.py +++ b/tabs/video_remixer_ui.py @@ -216,7 +216,10 @@ def render_tab(self): crop_w = gr.Number(1920, label="Crop Width", precision=0) crop_h = gr.Number(1080, label="Crop Height", precision=0) with gr.Accordion(label="More Options", open=False): - reuse_prev_settings = gr.Button(value="Reuse Last-Used Settings", size="sm", scale=0) + with gr.Row(variant="compact"): + reuse_prev_settings = gr.Button(value="Reuse Last-Used Settings", size="sm", scale=1) + use_saved_settings = gr.Button(value="Use Memorized Settings", size="sm", scale=1) + save_settings = gr.Button(value="Remember These Settings", size="sm", scale=1) with gr.Row(variant="compact"): crop_offset_x = gr.Number(label="Crop X Offset (-1: center)", value=-1, container=False, scale=1) crop_offset_y = gr.Number(label="Crop Y Offset (-1: center)", value=-1, container=False, scale=1) @@ -940,8 +943,20 @@ def render_tab(self): reuse_prev_settings.click(self.reuse_prev_settings, outputs=[project_fps, split_type, scene_threshold, break_duration, break_ratio, resize_w, resize_h, - crop_w, crop_h, crop_offset_x, crop_offset_y, frame_format, - deinterlace, split_time]) + crop_w, crop_h, crop_offset_x, crop_offset_y, + frame_format, deinterlace, split_time]) + + use_saved_settings.click(self.use_saved_settings, + outputs=[project_fps, split_type, scene_threshold, + break_duration, break_ratio, resize_w, resize_h, + crop_w, crop_h, crop_offset_x, crop_offset_y, + frame_format, deinterlace, split_time]) + + save_settings.click(self.save_settings, + inputs=[project_fps, split_type, scene_threshold, + break_duration, break_ratio, resize_w, resize_h, + crop_w, crop_h, crop_offset_x, crop_offset_y, + frame_format, deinterlace, split_time]) next_button2.click(self.next_button2, inputs=[thumbnail_type, min_frames_per_scene, skip_detection], @@ -1531,22 +1546,7 @@ def next_button1(self, Session().set("last-video-remixer-project", project_path) # memorize these settings - last_settings = {} - last_settings["project_fps"] = self.state.project_fps - last_settings["split_type"] = self.state.split_type - last_settings["scene_threshold"] = self.state.scene_threshold - last_settings["break_duration"] = self.state.break_duration - last_settings["break_ratio"] = self.state.break_ratio - last_settings["resize_w"] = self.state.resize_w - last_settings["resize_h"] = self.state.resize_h - last_settings["crop_w"] = self.state.crop_w - last_settings["crop_h"] = self.state.crop_h - last_settings["crop_offset_x"] = self.state.crop_offset_x - last_settings["crop_offset_y"] = self.state.crop_offset_y - last_settings["frame_format"] = self.state.frame_format - last_settings["deinterlace"] = self.state.deinterlace - last_settings["split_time"] = self.state.split_time - Session().set("last-video-remixer-settings", last_settings) + self.save_named_settings("last-video-remixer-settings", self.state) return gr.update(selected=self.TAB_SET_UP_PROJECT), \ format_markdown(self.TAB1_DEFAULT_MESSAGE), \ @@ -1562,40 +1562,89 @@ def next_button1(self, def back_button1(self): return gr.update(selected=self.TAB_REMIX_HOME) - def reuse_prev_settings(self): - last_settings = Session().get("last-video-remixer-settings") - if last_settings: - return \ - last_settings["project_fps"], \ - last_settings["split_type"], \ - last_settings["scene_threshold"], \ - last_settings["break_duration"], \ - last_settings["break_ratio"], \ - last_settings["resize_w"], \ - last_settings["resize_h"], \ - last_settings["crop_w"], \ - last_settings["crop_h"], \ - last_settings["crop_offset_x"], \ - last_settings["crop_offset_y"], \ - last_settings["frame_format"], \ - last_settings["deinterlace"], \ - last_settings["split_time"] - else: + def save_named_settings(self, name, state : VideoRemixerState): + settings = {} + settings["project_fps"] = state.project_fps + settings["split_type"] = state.split_type + settings["scene_threshold"] = state.scene_threshold + settings["break_duration"] = state.break_duration + settings["break_ratio"] = state.break_ratio + settings["resize_w"] = state.resize_w + settings["resize_h"] = state.resize_h + settings["crop_w"] = state.crop_w + settings["crop_h"] = state.crop_h + settings["crop_offset_x"] = state.crop_offset_x + settings["crop_offset_y"] = state.crop_offset_y + settings["frame_format"] = state.frame_format + settings["deinterlace"] = state.deinterlace + settings["split_time"] = state.split_time + Session().set(name, settings) + + def use_named_settings(self, name): + settings = Session().get(name) + if settings: + try: return \ - self.config.remixer_settings["def_project_fps"], \ - self.UI_SAFETY_DEFAULTS["split_type"], \ - self.UI_SAFETY_DEFAULTS["scene_threshold"], \ - self.UI_SAFETY_DEFAULTS["break_duration"], \ - self.UI_SAFETY_DEFAULTS["break_ratio"], \ - self.UI_SAFETY_DEFAULTS["resize_w"], \ - self.UI_SAFETY_DEFAULTS["resize_h"], \ - self.UI_SAFETY_DEFAULTS["crop_w"], \ - self.UI_SAFETY_DEFAULTS["crop_h"], \ - self.UI_SAFETY_DEFAULTS["crop_offsets"], \ - self.UI_SAFETY_DEFAULTS["crop_offsets"], \ - self.UI_SAFETY_DEFAULTS["frame_format"], \ - self.UI_SAFETY_DEFAULTS["deinterlace"], \ - self.UI_SAFETY_DEFAULTS["split_time"] + settings["project_fps"], \ + settings["split_type"], \ + settings["scene_threshold"], \ + settings["break_duration"], \ + settings["break_ratio"], \ + settings["resize_w"], \ + settings["resize_h"], \ + settings["crop_w"], \ + settings["crop_h"], \ + settings["crop_offset_x"], \ + settings["crop_offset_y"], \ + settings["frame_format"], \ + settings["deinterlace"], \ + settings["split_time"] + except Exception: + pass + return self.use_default_settings() + + def use_default_settings(self): + return \ + self.config.remixer_settings["def_project_fps"], \ + self.UI_SAFETY_DEFAULTS["split_type"], \ + self.UI_SAFETY_DEFAULTS["scene_threshold"], \ + self.UI_SAFETY_DEFAULTS["break_duration"], \ + self.UI_SAFETY_DEFAULTS["break_ratio"], \ + self.UI_SAFETY_DEFAULTS["resize_w"], \ + self.UI_SAFETY_DEFAULTS["resize_h"], \ + self.UI_SAFETY_DEFAULTS["crop_w"], \ + self.UI_SAFETY_DEFAULTS["crop_h"], \ + self.UI_SAFETY_DEFAULTS["crop_offsets"], \ + self.UI_SAFETY_DEFAULTS["crop_offsets"], \ + self.UI_SAFETY_DEFAULTS["frame_format"], \ + self.UI_SAFETY_DEFAULTS["deinterlace"], \ + self.UI_SAFETY_DEFAULTS["split_time"] + + def reuse_prev_settings(self): + return self.use_named_settings("last-video-remixer-settings") + + def use_saved_settings(self): + return self.use_named_settings("saved-video-remixer-settings") + + def save_settings(self, project_fps, split_type, scene_threshold, break_duration, break_ratio, + resize_w, resize_h, crop_w, crop_h, crop_offset_x, crop_offset_y, + frame_format, deinterlace, split_time): + state = VideoRemixerState() + state.project_fps = project_fps + state.split_type = split_type + state.scene_threshold = scene_threshold + state.break_duration = break_duration + state.break_ratio = break_ratio + state.resize_w = resize_w + state.resize_h = resize_h + state.crop_w = crop_w + state.crop_h = crop_h + state.crop_offset_x = crop_offset_x + state.crop_offset_y = crop_offset_y + state.frame_format = frame_format + state.deinterlace = deinterlace + state.split_time = split_time + self.save_named_settings("saved-video-remixer-settings", state) ### SET UP PROJECT EVENT HANDLERS From 4e20242a7db91df0c90ba0aa545915f40efd64ab Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Fri, 8 Mar 2024 15:00:47 -0800 Subject: [PATCH 009/152] need to take project resize/crop into account --- video_remixer.py | 62 ++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 49 insertions(+), 13 deletions(-) diff --git a/video_remixer.py b/video_remixer.py index 20e56907..73a848d1 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -1625,6 +1625,29 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): resize_handled = False resize_hint = self.get_hint(self.scene_labels.get(scene_name), "R") if resize_hint: + # use the main resize/crop settings if resizing, or the content native + # dimensions if not, as a foundation for handling resize hints + if self.resize: + main_resize_w = self.resize_w + main_resize_h = self.resize_h + main_crop_w = self.crop_w + main_crop_h = self.crop_h + if self.crop_offset_x < 0: + main_offset_x = (main_resize_w - main_crop_w) / 2.0 + else: + main_offset_x = self.crop_offset_x + if self.crop_offset_y < 0: + main_offset_y = (main_resize_h - main_crop_h) / 2.0 + else: + main_offset_y = self.crop_offset_y + else: + main_resize_w = content_width + main_resize_h = content_height + main_crop_w = content_width + main_crop_h = content_height + main_offset_x = 0 + main_offset_y = 0 + try: if "/" in resize_hint: if len(resize_hint) >= 3: @@ -1642,11 +1665,12 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): row = int(quadrant / magnitude) column = quadrant % magnitude - # based on the zoom magnitude, compute new resize & crop - resize_w = evenify(content_width * magnitude) - resize_h = evenify(content_height * magnitude) - crop_offset_x = column * content_width - crop_offset_y = row * content_height + resize_w = evenify(main_resize_w * magnitude) + resize_h = evenify(main_resize_h * magnitude) + main_offset_x *= magnitude + main_offset_y *= magnitude + crop_offset_x = (column * main_crop_w) + main_offset_x + crop_offset_y = (row * main_crop_h) + main_offset_y scale_type = remixer_settings["scale_type_up"] self.resize_scene(log_fn, @@ -1654,8 +1678,8 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): scene_output_path, int(resize_w), int(resize_h), - int(self.crop_w), - int(self.crop_h), + int(main_crop_w), + int(main_crop_h), int(crop_offset_x), int(crop_offset_y), scale_type, @@ -1668,8 +1692,20 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): zoom_percent = int(resize_hint.replace("%", "")) if zoom_percent >= 100: magnitude = zoom_percent / 100.0 - resize_w = evenify(content_width * magnitude) - resize_h = evenify(content_height * magnitude) + + resize_w = evenify(main_resize_w * magnitude) + resize_h = evenify(main_resize_h * magnitude) + + if self.crop_offset_x == -1: + crop_offset_x = ((resize_w - main_crop_w) / 2.0) + else: + crop_offset_x = main_offset_x * magnitude + + if self.crop_offset_y == -1: + crop_offset_y = ((resize_h - main_crop_h) / 2.0) + else: + crop_offset_y + main_offset_y * magnitude + scale_type = remixer_settings["scale_type_up"] self.resize_scene(log_fn, @@ -1677,10 +1713,10 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): scene_output_path, int(resize_w), int(resize_h), - int(self.crop_w), - int(self.crop_h), - -1, - -1, + int(main_crop_w), + int(main_crop_h), + int(crop_offset_x), + int(crop_offset_y), scale_type, crop_type="crop") resize_handled = True From a186ff6d2a61a743ac2b4ee6688952210636427f Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Sat, 9 Mar 2024 10:54:36 -0800 Subject: [PATCH 010/152] wip --- resize_frames.py | 52 +++++++++----- video_remixer.py | 171 ++++++++++++++++++++++++++--------------------- 2 files changed, 129 insertions(+), 94 deletions(-) diff --git a/resize_frames.py b/resize_frames.py index 13c301f0..2f2d8be2 100644 --- a/resize_frames.py +++ b/resize_frames.py @@ -65,7 +65,8 @@ def __init__(self, crop_height : int=-1, crop_offset_x : int=-1, crop_offset_y : int=-1, - crop_type : str="none"): + crop_type : str="none", + params_fn : Callable | None=None): self.input_path = input_path self.output_path = output_path self.scale_width = scale_width @@ -77,6 +78,7 @@ def __init__(self, self.crop_offset_x = crop_offset_x self.crop_offset_y = crop_offset_y self.crop_type = crop_type + self.params_fn = params_fn def get_scale_type(self, scale_type : str) -> int: try: @@ -115,28 +117,44 @@ def resize(self, type : str="png") -> None: crop_type = self.get_crop_type(self.crop_type) with Mtqdm().open_bar(len(files), desc="Resizing") as bar: - for file in files: + for index, file in enumerate(files): self.log(f"processing {file}") image = cv2.imread(file) + if self.params_fn: + self.log(f"calling 'param_fn' to get parameters") + scale_width, \ + scale_height, \ + crop_width, \ + crop_height, \ + crop_offset_x, \ + crop_offset_y = self.params_fn(index) + else: + scale_width = self.scale_width + scale_height = self.scale_height + crop_width = self.crop_width + crop_height = self.crop_height + crop_offset_x = self.crop_offset_x + crop_offset_y = self.crop_offset_y + if scale_type: - size = (self.scale_width, self.scale_height) - self.log(f"resizing {file} to {self.scale_width}x{self.scale_height}") - image = cv2.resize(image, size, interpolation = scale_type) + size = (scale_width, scale_height) + self.log(f"resizing {file} to {scale_width}x{scale_height}") + image = cv2.resize(image, size, interpolation=scale_type) if crop_type: - if self.crop_width < 0: - self.crop_width = self.scale_width - if self.crop_height < 0: - self.crop_height = self.scale_height - if self.crop_offset_x < 0: - self.crop_offset_x = int((self.scale_width - self.crop_width) / 2) - if self.crop_offset_y < 0: - self.crop_offset_y = int((self.scale_height - self.crop_height) / 2) - min_x = int(self.crop_offset_x) - min_y = int(self.crop_offset_y) - max_x = int(min_x + self.crop_width) - max_y = int(min_y + self.crop_height) + if crop_width < 0: + crop_width = scale_width + if crop_height < 0: + crop_height = scale_height + if crop_offset_x < 0: + crop_offset_x = int((scale_width - crop_width) / 2.0) + if crop_offset_y < 0: + crop_offset_y = int((scale_height - crop_height) / 2.0) + min_x = int(crop_offset_x) + min_y = int(crop_offset_y) + max_x = int(min_x + crop_width) + max_y = int(min_y + crop_height) self.log(f"cropping {file} with [{min_y}:{max_y}, {min_x}:{max_x}]") image = image[min_y:max_y, min_x:max_x] diff --git a/video_remixer.py b/video_remixer.py index 73a848d1..f6a38fd1 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -4,6 +4,7 @@ import re import shutil import sys +from typing import Callable import yaml from yaml import Loader, YAMLError from webui_utils.auto_increment import AutoIncrementBackupFilename, AutoIncrementDirectory @@ -1592,7 +1593,8 @@ def resize_scene(self, crop_offset_x, crop_offset_y, scale_type, - crop_type="none"): + crop_type, + params_fn : Callable | None = None): ResizeFrames(scene_input_path, scene_output_path, @@ -1604,7 +1606,61 @@ def resize_scene(self, crop_width=crop_w, crop_height=crop_h, crop_offset_x=crop_offset_x, - crop_offset_y=crop_offset_y).resize(type=self.frame_format) + crop_offset_y=crop_offset_y, + params_fn=params_fn).resize(type=self.frame_format) + + def setup_resize_hint(self, content_width, content_height): + # use the main resize/crop settings if resizing, or the content native + # dimensions if not, as a foundation for handling resize hints + if self.resize: + main_resize_w = self.resize_w + main_resize_h = self.resize_h + main_crop_w = self.crop_w + main_crop_h = self.crop_h + if self.crop_offset_x < 0: + main_offset_x = (main_resize_w - main_crop_w) / 2.0 + else: + main_offset_x = self.crop_offset_x + if self.crop_offset_y < 0: + main_offset_y = (main_resize_h - main_crop_h) / 2.0 + else: + main_offset_y = self.crop_offset_y + else: + main_resize_w = content_width + main_resize_h = content_height + main_crop_w = content_width + main_crop_h = content_height + main_offset_x = 0 + main_offset_y = 0 + return main_resize_w, main_resize_h, main_crop_w, main_crop_h, main_offset_x, main_offset_y + + def compute_quadrant_zoom(self, quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): + quadrant = int(quadrant) - 1 + quadrants = int(quadrants) + magnitude = int(math.sqrt(quadrants)) + row = int(quadrant / magnitude) + column = quadrant % magnitude + resize_w = evenify(main_resize_w * magnitude) + resize_h = evenify(main_resize_h * magnitude) + offset_x = main_offset_x * magnitude + offset_y = main_offset_y * magnitude + crop_offset_x = (column * main_crop_w) + offset_x + crop_offset_y = (row * main_crop_h) + offset_y + return resize_w, resize_h, crop_offset_x, crop_offset_y + + def compute_percent_zoom(self, zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): + magnitude = zoom_percent / 100.0 + resize_w = evenify(main_resize_w * magnitude) + resize_h = evenify(main_resize_h * magnitude) + if self.crop_offset_x == -1: + crop_offset_x = ((resize_w - main_crop_w) / 2.0) + else: + crop_offset_x = main_offset_x * magnitude + if self.crop_offset_y == -1: + crop_offset_y = ((resize_h - main_crop_h) / 2.0) + else: + crop_offset_y = main_offset_y * magnitude + return resize_w, resize_h, crop_offset_x, crop_offset_y def resize_scenes(self, log_fn, kept_scenes, remixer_settings): scenes_base_path = self.scenes_source_path(self.RESIZE_STEP) @@ -1625,89 +1681,50 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): resize_handled = False resize_hint = self.get_hint(self.scene_labels.get(scene_name), "R") if resize_hint: - # use the main resize/crop settings if resizing, or the content native - # dimensions if not, as a foundation for handling resize hints - if self.resize: - main_resize_w = self.resize_w - main_resize_h = self.resize_h - main_crop_w = self.crop_w - main_crop_h = self.crop_h - if self.crop_offset_x < 0: - main_offset_x = (main_resize_w - main_crop_w) / 2.0 - else: - main_offset_x = self.crop_offset_x - if self.crop_offset_y < 0: - main_offset_y = (main_resize_h - main_crop_h) / 2.0 - else: - main_offset_y = self.crop_offset_y - else: - main_resize_w = content_width - main_resize_h = content_height - main_crop_w = content_width - main_crop_h = content_height - main_offset_x = 0 - main_offset_y = 0 + main_resize_w, main_resize_h, main_crop_w, main_crop_h, main_offset_x, \ + main_offset_y = self.setup_resize_hint(content_width, content_width) try: - if "/" in resize_hint: - if len(resize_hint) >= 3: - # interpret 'x/y' as - # x: quadrant, y: square number of quadrants - # '5/9' and '13/25' would be the center squares of 3x3 and 5x5 grids - # zoomed in at 300% and 500% - split_pos = resize_hint.index("/") - quadrant = resize_hint[:split_pos] - quadrants = resize_hint[split_pos+1:] - if quadrant and quadrants: - quadrant = int(quadrant) - 1 - quadrants = int(quadrants) - magnitude = int(math.sqrt(quadrants)) - row = int(quadrant / magnitude) - column = quadrant % magnitude - - resize_w = evenify(main_resize_w * magnitude) - resize_h = evenify(main_resize_h * magnitude) - main_offset_x *= magnitude - main_offset_y *= magnitude - crop_offset_x = (column * main_crop_w) + main_offset_x - crop_offset_y = (row * main_crop_h) + main_offset_y - scale_type = remixer_settings["scale_type_up"] - - self.resize_scene(log_fn, - scene_input_path, - scene_output_path, - int(resize_w), - int(resize_h), - int(main_crop_w), - int(main_crop_h), - int(crop_offset_x), - int(crop_offset_y), - scale_type, - crop_type="crop") - resize_handled = True - - elif "%" in resize_hint: - if len(resize_hint) >= 4: + if "/" in resize_hint and len(resize_hint) >= 3: + # interpret 'x/y' as x: quadrant, y: square-based number of quadrants + # '5/9' and '13/25' would be the center squares of 3x3 and 5x5 grids + # zoomed in at 300% and 500% + split_pos = resize_hint.index("/") + quadrant = resize_hint[:split_pos] + quadrants = resize_hint[split_pos+1:] + if quadrant and quadrants: + resize_w, resize_h, crop_offset_x, crop_offset_y = \ + self.compute_quadrant_zoom(quadrant, quadrants, + main_resize_w, main_resize_h, + main_offset_x, main_offset_y, + main_crop_w, main_crop_h) + + scale_type = remixer_settings["scale_type_up"] + self.resize_scene(log_fn, + scene_input_path, + scene_output_path, + int(resize_w), + int(resize_h), + int(main_crop_w), + int(main_crop_h), + int(crop_offset_x), + int(crop_offset_y), + scale_type, + crop_type="crop") + resize_handled = True + + elif "%" in resize_hint and len(resize_hint) >= 4: # interpret z% as zoom percent to zoom into center zoom_percent = int(resize_hint.replace("%", "")) if zoom_percent >= 100: - magnitude = zoom_percent / 100.0 + resize_w, resize_h, crop_offset_x, crop_offset_y = \ + self.compute_percent_zoom(zoom_percent, + main_resize_w, main_resize_h, + main_offset_x, main_offset_y, + main_crop_w, main_crop_h) - resize_w = evenify(main_resize_w * magnitude) - resize_h = evenify(main_resize_h * magnitude) - - if self.crop_offset_x == -1: - crop_offset_x = ((resize_w - main_crop_w) / 2.0) - else: - crop_offset_x = main_offset_x * magnitude - - if self.crop_offset_y == -1: - crop_offset_y = ((resize_h - main_crop_h) / 2.0) - else: - crop_offset_y + main_offset_y * magnitude scale_type = remixer_settings["scale_type_up"] - self.resize_scene(log_fn, scene_input_path, scene_output_path, From 659b596dae0ba11bfa5c8d6ae226d3e6a6fccfdd Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Sat, 9 Mar 2024 11:42:16 -0800 Subject: [PATCH 011/152] wip --- video_remixer.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/video_remixer.py b/video_remixer.py index f6a38fd1..e40ad497 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -1233,7 +1233,8 @@ def resize_chosen(self): return self.resize or self.hint_present("R") def resize_needed(self): - return self.resize_chosen() and not self.processed_content_complete(self.RESIZE_STEP) + return (self.resize and not self.processed_content_complete(self.RESIZE_STEP)) \ + or self.resize_chosen() def resynthesize_chosen(self): return self.resynthesize or self.hint_present("Y") @@ -1682,7 +1683,7 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): resize_hint = self.get_hint(self.scene_labels.get(scene_name), "R") if resize_hint: main_resize_w, main_resize_h, main_crop_w, main_crop_h, main_offset_x, \ - main_offset_y = self.setup_resize_hint(content_width, content_width) + main_offset_y = self.setup_resize_hint(content_width, content_height) try: if "/" in resize_hint and len(resize_hint) >= 3: From 0a5f25b4ba071f9630e7061332e443ddcaf29da7 Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Sat, 9 Mar 2024 14:22:40 -0800 Subject: [PATCH 012/152] animated zooming first light --- resize_frames.py | 36 +++++----- video_remixer.py | 169 +++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 172 insertions(+), 33 deletions(-) diff --git a/resize_frames.py b/resize_frames.py index 2f2d8be2..ee1d7204 100644 --- a/resize_frames.py +++ b/resize_frames.py @@ -65,8 +65,7 @@ def __init__(self, crop_height : int=-1, crop_offset_x : int=-1, crop_offset_y : int=-1, - crop_type : str="none", - params_fn : Callable | None=None): + crop_type : str="none"): self.input_path = input_path self.output_path = output_path self.scale_width = scale_width @@ -78,7 +77,6 @@ def __init__(self, self.crop_offset_x = crop_offset_x self.crop_offset_y = crop_offset_y self.crop_type = crop_type - self.params_fn = params_fn def get_scale_type(self, scale_type : str) -> int: try: @@ -102,11 +100,11 @@ def get_crop_type(self, crop_type : str) -> bool: except KeyError: raise ValueError(f"The crop type {crop_type} is unknown") - def resize(self, type : str="png") -> None: + def resize(self, type : str="png", params_fn : Callable | None=None, params_context : any=None) -> None: """Invoke the Resize Frames feature""" - if not self.scale_width: + if not self.scale_width and not params_fn: raise ValueError("scale_width must be provided") - if not self.scale_height: + if not self.scale_height and not params_fn: raise ValueError("scale_height must be provided") files = sorted(glob.glob(os.path.join(self.input_path, "*." + type))) @@ -118,28 +116,29 @@ def resize(self, type : str="png") -> None: with Mtqdm().open_bar(len(files), desc="Resizing") as bar: for index, file in enumerate(files): - self.log(f"processing {file}") image = cv2.imread(file) - if self.params_fn: - self.log(f"calling 'param_fn' to get parameters") - scale_width, \ - scale_height, \ - crop_width, \ - crop_height, \ - crop_offset_x, \ - crop_offset_y = self.params_fn(index) + if params_fn: + try: + self.log(f"calling 'param_fn' to get parameters") + scale_width, \ + scale_height, \ + crop_offset_x, \ + crop_offset_y = params_fn(index, params_context) + except Exception as error: + print(error) + 1/0 else: scale_width = self.scale_width scale_height = self.scale_height - crop_width = self.crop_width - crop_height = self.crop_height crop_offset_x = self.crop_offset_x crop_offset_y = self.crop_offset_y + crop_width = self.crop_width + crop_height = self.crop_height + if scale_type: size = (scale_width, scale_height) - self.log(f"resizing {file} to {scale_width}x{scale_height}") image = cv2.resize(image, size, interpolation=scale_type) if crop_type: @@ -161,7 +160,6 @@ def resize(self, type : str="png") -> None: _, filename, ext = split_filepath(file) output_filepath = os.path.join(self.output_path, f"{filename}{ext}") - self.log(f"saving resized file {output_filepath}") cv2.imwrite(output_filepath, image) Mtqdm().update_bar(bar) diff --git a/video_remixer.py b/video_remixer.py index e40ad497..bd84c9bb 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -1595,7 +1595,8 @@ def resize_scene(self, crop_offset_y, scale_type, crop_type, - params_fn : Callable | None = None): + params_fn : Callable | None = None, + params_context : any=None): ResizeFrames(scene_input_path, scene_output_path, @@ -1607,8 +1608,8 @@ def resize_scene(self, crop_width=crop_w, crop_height=crop_h, crop_offset_x=crop_offset_x, - crop_offset_y=crop_offset_y, - params_fn=params_fn).resize(type=self.frame_format) + crop_offset_y=crop_offset_y).resize(type=self.frame_format, params_fn=params_fn, + params_context=params_context) def setup_resize_hint(self, content_width, content_height): # use the main resize/crop settings if resizing, or the content native @@ -1635,6 +1636,25 @@ def setup_resize_hint(self, content_width, content_height): main_offset_y = 0 return main_resize_w, main_resize_h, main_crop_w, main_crop_h, main_offset_x, main_offset_y + QUADRANT_ZOOM_HINT = "/" + PERCENT_ZOOM_HINT = "%" + ANIMATED_ZOOM_HINT = "-" + QUADRANT_ZOOM_MIN_LEN = 3 # 1/3 + PERCENT_ZOOM_MIN_LEN = 4 # 123% + ANIMATED_ZOOM_MIN_LEN = 7 # 1/3-5/7 + + def get_quadrant_zoom(self, hint): + if self.QUADRANT_ZOOM_HINT in hint: + if len(hint) >= self.QUADRANT_ZOOM_MIN_LEN: + split_pos = hint.index(self.QUADRANT_ZOOM_HINT) + quadrant = hint[:split_pos] + quadrants = hint[split_pos+1:] + else: + quadrant, quadrants = 1, 1 + return quadrant, quadrants + else: + return None, None + def compute_quadrant_zoom(self, quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): quadrant = int(quadrant) - 1 quadrants = int(quadrants) @@ -1649,6 +1669,16 @@ def compute_quadrant_zoom(self, quadrant, quadrants, main_resize_w, main_resize_ crop_offset_y = (row * main_crop_h) + offset_y return resize_w, resize_h, crop_offset_x, crop_offset_y + def get_percent_zoom(self, hint): + if self.PERCENT_ZOOM_HINT in hint: + if len(hint) >= self.PERCENT_ZOOM_MIN_LEN: + zoom_percent = int(hint.replace(self.PERCENT_ZOOM_HINT, "")) + if zoom_percent >= 100: + return zoom_percent + return 100 + else: + return None + def compute_percent_zoom(self, zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): magnitude = zoom_percent / 100.0 resize_w = evenify(main_resize_w * magnitude) @@ -1663,6 +1693,64 @@ def compute_percent_zoom(self, zoom_percent, main_resize_w, main_resize_h, main_ crop_offset_y = main_offset_y * magnitude return resize_w, resize_h, crop_offset_x, crop_offset_y + def compute_zoom_type(self, type, param1, param2, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): + if type == self.QUADRANT_ZOOM_HINT: + quadrant, quadrants = param1, param2 + if quadrant and quadrants: + return self.compute_quadrant_zoom(quadrant, quadrants, + main_resize_w, main_resize_h, + main_offset_x, main_offset_y, + main_crop_w, main_crop_h) + elif type == self.PERCENT_ZOOM_HINT: + zoom_percent = param1 + if zoom_percent: + return self.compute_percent_zoom(zoom_percent, + main_resize_w, main_resize_h, + main_offset_x, main_offset_y, + main_crop_w, main_crop_h) + + def get_animated_zoom_part(self, hint): + if self.QUADRANT_ZOOM_HINT in hint and len(hint) >= self.QUADRANT_ZOOM_MIN_LEN: + type = self.QUADRANT_ZOOM_HINT + quadrant, quadrants = self.get_quadrant_zoom(hint) + return type, quadrant, quadrants + elif self.PERCENT_ZOOM_HINT in hint and len(hint) >= self.PERCENT_ZOOM_MIN_LEN: + type = self.PERCENT_ZOOM_HINT + self.get_percent_zoom(hint) + zoom_percent = self.get_percent_zoom(hint) + return type, zoom_percent, None + return None, None, None + + def get_animated_zoom(self, hint): + if self.ANIMATED_ZOOM_HINT in hint: + if len(hint) >= self.ANIMATED_ZOOM_MIN_LEN: + split_pos = hint.index(self.ANIMATED_ZOOM_HINT) + hint_from = hint[:split_pos] + hint_to = hint[split_pos+1:] + from_type, from_param1, from_param2 = self.get_animated_zoom_part(hint_from) + to_type, to_param1, to_param2 = self.get_animated_zoom_part(hint_to) + if from_type and to_type: + return from_type, from_param1, from_param2, to_type, to_param1, to_param2 + return None, None, None, None, None, None + + def _resize_frame_param(self, index, context): + from_resize_w = context["from_resize_w"] + from_resize_h = context["from_resize_h"] + from_crop_offset_x = context["from_crop_offset_x"] + from_crop_offset_y = context["from_crop_offset_y"] + step_resize_w = context["step_resize_w"] + step_resize_h = context["step_resize_h"] + step_crop_offset_x = context["step_crop_offset_x"] + step_crop_offset_y = context["step_crop_offset_y"] + + print(index, context) + + return \ + int(from_resize_w + (index * step_resize_w)), \ + int(from_resize_h + (index * step_resize_h)), \ + int(from_crop_offset_x + (index * step_crop_offset_x)), \ + int(from_crop_offset_y + (index * step_crop_offset_y)) + def resize_scenes(self, log_fn, kept_scenes, remixer_settings): scenes_base_path = self.scenes_source_path(self.RESIZE_STEP) create_directory(self.resize_path) @@ -1686,13 +1774,68 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): main_offset_y = self.setup_resize_hint(content_width, content_height) try: - if "/" in resize_hint and len(resize_hint) >= 3: + if self.ANIMATED_ZOOM_HINT in resize_hint: + # interprent 'any-any' as animating from one to the other zoom factor + from_type, from_param1, from_param2, to_type, to_param1, to_param2 = \ + self.get_animated_zoom(resize_hint) + + if from_type and to_type: + from_resize_w, from_resize_h, from_crop_offset_x, from_crop_offset_y = \ + self.compute_zoom_type(from_type, from_param1, from_param2, + main_resize_w, main_resize_h, + main_offset_x, main_offset_y, + main_crop_w, main_crop_h) + to_resize_w, to_resize_h, to_crop_offset_x, to_crop_offset_y = \ + self.compute_zoom_type(to_type, to_param1, to_param2, + main_resize_w, main_resize_h, + main_offset_x, main_offset_y, + main_crop_w, main_crop_h) + print(from_resize_w, from_resize_h, from_crop_offset_x, from_crop_offset_y) + print(to_resize_w, to_resize_h, to_crop_offset_x, to_crop_offset_y) + + first_frame, last_frame, _ = details_from_group_name(scene_name) + num_frames = (last_frame - first_frame) + 1 + diff_resize_w = to_resize_w - from_resize_w + diff_resize_h = to_resize_h - from_resize_h + diff_crop_offset_x = to_crop_offset_x - from_crop_offset_x + diff_crop_offset_y = to_crop_offset_y - from_crop_offset_y + + step_resize_w = diff_resize_w / num_frames + step_resize_h = diff_resize_h / num_frames + step_crop_offset_x = diff_crop_offset_x / num_frames + step_crop_offset_y = diff_crop_offset_y / num_frames + + context = {} + context["from_resize_w"] = from_resize_w + context["from_resize_h"] = from_resize_h + context["from_crop_offset_x"] = from_crop_offset_x + context["from_crop_offset_y"] = from_crop_offset_y + context["step_resize_w"] = step_resize_w + context["step_resize_h"] = step_resize_h + context["step_crop_offset_x"] = step_crop_offset_x + context["step_crop_offset_y"] = step_crop_offset_y + + scale_type = remixer_settings["scale_type_up"] + self.resize_scene(log_fn, + scene_input_path, + scene_output_path, + None, + None, + main_crop_w, + main_crop_h, + None, + None, + scale_type, + crop_type="crop", + params_fn=self._resize_frame_param, + params_context=context) + resize_handled = True + + elif self.PERCENT_ZOOM_HINT in resize_hint: # interpret 'x/y' as x: quadrant, y: square-based number of quadrants # '5/9' and '13/25' would be the center squares of 3x3 and 5x5 grids # zoomed in at 300% and 500% - split_pos = resize_hint.index("/") - quadrant = resize_hint[:split_pos] - quadrants = resize_hint[split_pos+1:] + quadrant, quadrants = self.get_quadrant_zoom(resize_hint) if quadrant and quadrants: resize_w, resize_h, crop_offset_x, crop_offset_y = \ self.compute_quadrant_zoom(quadrant, quadrants, @@ -1714,17 +1857,16 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): crop_type="crop") resize_handled = True - elif "%" in resize_hint and len(resize_hint) >= 4: + elif self.PERCENT_ZOOM_HINT in resize_hint: # interpret z% as zoom percent to zoom into center - zoom_percent = int(resize_hint.replace("%", "")) - if zoom_percent >= 100: + zoom_percent = self.get_percent_zoom(resize_hint) + if zoom_percent: resize_w, resize_h, crop_offset_x, crop_offset_y = \ self.compute_percent_zoom(zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) - scale_type = remixer_settings["scale_type_up"] self.resize_scene(log_fn, scene_input_path, @@ -1738,10 +1880,9 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): scale_type, crop_type="crop") resize_handled = True - else: - # zooming out past 100% isn't supported - log_fn(f"resize_scenes() ignoring unsupported zoom {zoom_percent}%") except Exception as error: + print(error) + 1/0 log_fn( f"Error in resize_scenes() handling processing hint {resize_hint} - skipping processing: {error}") resize_handled = False From 8d37e9a08d941921f44050a661a78bad281333e7 Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Sat, 9 Mar 2024 21:24:35 -0800 Subject: [PATCH 013/152] work out animated combined zoom --- resequence_files.py | 2 - resize_frames.py | 14 +-- tabs/video_remixer_ui.py | 5 + video_remixer.py | 193 ++++++++++++++++++++++++++++----------- 4 files changed, 150 insertions(+), 64 deletions(-) diff --git a/resequence_files.py b/resequence_files.py index 42d59bec..80d0106f 100644 --- a/resequence_files.py +++ b/resequence_files.py @@ -181,11 +181,9 @@ def resequence(self, ignore_name_clash=True) -> None: if self.rename: new_filepath = os.path.join(self.input_path, new_filename) os.replace(old_filepath, new_filepath) - self.log(f"File {file} renamed to {new_filepath}") else: new_filepath = os.path.join(self.output_path, new_filename) shutil.copy(old_filepath, new_filepath) - self.log(f"File {file} copied to {new_filepath}") running_index += self.index_step Mtqdm().update_bar(bar) diff --git a/resize_frames.py b/resize_frames.py index ee1d7204..62dc7892 100644 --- a/resize_frames.py +++ b/resize_frames.py @@ -119,15 +119,11 @@ def resize(self, type : str="png", params_fn : Callable | None=None, params_cont image = cv2.imread(file) if params_fn: - try: - self.log(f"calling 'param_fn' to get parameters") - scale_width, \ - scale_height, \ - crop_offset_x, \ - crop_offset_y = params_fn(index, params_context) - except Exception as error: - print(error) - 1/0 + self.log(f"calling 'param_fn' to get parameters") + scale_width, \ + scale_height, \ + crop_offset_x, \ + crop_offset_y = params_fn(index, params_context) else: scale_width = self.scale_width scale_height = self.scale_height diff --git a/tabs/video_remixer_ui.py b/tabs/video_remixer_ui.py index ba52a1ad..08dfb22d 100644 --- a/tabs/video_remixer_ui.py +++ b/tabs/video_remixer_ui.py @@ -2061,6 +2061,11 @@ def next_button4(self): self.log("purging now-stale remix content") self.state.clean_remix_content(purge_from="audio_clips") + # TODO reconcile with the line above + # Compiling scenes implies a last state before processing, + # and the user may expect that all content will be processed + self.state.processed_content_invalid = True + # user will expect to return to the processing tab on reopening self.log("saving project after compiling scenes") self.save_progress("process") diff --git a/video_remixer.py b/video_remixer.py index bd84c9bb..79712d9b 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -1638,9 +1638,11 @@ def setup_resize_hint(self, content_width, content_height): QUADRANT_ZOOM_HINT = "/" PERCENT_ZOOM_HINT = "%" + COMBINED_ZOOM_HINT = "@" ANIMATED_ZOOM_HINT = "-" QUADRANT_ZOOM_MIN_LEN = 3 # 1/3 PERCENT_ZOOM_MIN_LEN = 4 # 123% + COMBINED_ZOOM_MIN_LEN = 8 # 1/1@100% ANIMATED_ZOOM_MIN_LEN = 7 # 1/3-5/7 def get_quadrant_zoom(self, hint): @@ -1655,7 +1657,7 @@ def get_quadrant_zoom(self, hint): else: return None, None - def compute_quadrant_zoom(self, quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): + def compute_quadrant_zoom(self, quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h, centered=False): quadrant = int(quadrant) - 1 quadrants = int(quadrants) magnitude = int(math.sqrt(quadrants)) @@ -1665,8 +1667,23 @@ def compute_quadrant_zoom(self, quadrant, quadrants, main_resize_w, main_resize_ resize_h = evenify(main_resize_h * magnitude) offset_x = main_offset_x * magnitude offset_y = main_offset_y * magnitude - crop_offset_x = (column * main_crop_w) + offset_x - crop_offset_y = (row * main_crop_h) + offset_y + + if centered: + # move the offset to the center of the grid box + centering_offset_x = main_crop_w / 2 + centering_offset_y = main_crop_h / 2 + else: + centering_offset_x = 0 + centering_offset_y = 0 + + crop_offset_x = (column * main_crop_w) + offset_x + centering_offset_x + crop_offset_y = (row * main_crop_h) + offset_y + centering_offset_y + + if crop_offset_x + main_crop_w > resize_w: + crop_offset_x = resize_w - main_crop_w + if crop_offset_y + main_crop_h > resize_h: + crop_offset_y =resize_h - main_crop_h + return resize_w, resize_h, crop_offset_x, crop_offset_y def get_percent_zoom(self, hint): @@ -1693,8 +1710,29 @@ def compute_percent_zoom(self, zoom_percent, main_resize_w, main_resize_h, main_ crop_offset_y = main_offset_y * magnitude return resize_w, resize_h, crop_offset_x, crop_offset_y - def compute_zoom_type(self, type, param1, param2, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): - if type == self.QUADRANT_ZOOM_HINT: + # TODO this doesn't always work as expected, something like, the magnitudes are different + # between both, so things don't line up + def compute_combined_zoom(self, quadrant, quadrants, zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): + resize_w, resize_h, _, _ = self.compute_percent_zoom(zoom_percent, + main_resize_w, main_resize_h, + main_offset_x, main_offset_y, + main_crop_w, main_crop_h) + + _, _, crop_offset_x, crop_offset_y = self.compute_quadrant_zoom(quadrant, quadrants, + main_resize_w, main_resize_h, + main_offset_x, main_offset_y, + main_crop_w, main_crop_h, centered=True) + return resize_w, resize_h, crop_offset_x, crop_offset_y + + def compute_zoom_type(self, type, param1, param2, param3, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): + if type == self.COMBINED_ZOOM_HINT: + quadrant, quadrants, zoom_percent = param1, param2, param3 + if quadrant and quadrants and zoom_percent: + return self.compute_combined_zoom(quadrant, quadrants, zoom_percent, + main_resize_w, main_resize_h, + main_offset_x, main_offset_y, + main_crop_w, main_crop_h) + elif type == self.QUADRANT_ZOOM_HINT: quadrant, quadrants = param1, param2 if quadrant and quadrants: return self.compute_quadrant_zoom(quadrant, quadrants, @@ -1702,23 +1740,44 @@ def compute_zoom_type(self, type, param1, param2, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) elif type == self.PERCENT_ZOOM_HINT: - zoom_percent = param1 + zoom_percent = param3 if zoom_percent: return self.compute_percent_zoom(zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) - def get_animated_zoom_part(self, hint): + def get_zoom_part(self, hint): + if self.COMBINED_ZOOM_HINT in hint and len(hint) >= self.COMBINED_ZOOM_MIN_LEN: + type = self.COMBINED_ZOOM_HINT + quadrant, quadrants, zoom_percent = self.get_combined_zoom(hint) + return type, quadrant, quadrants, zoom_percent if self.QUADRANT_ZOOM_HINT in hint and len(hint) >= self.QUADRANT_ZOOM_MIN_LEN: type = self.QUADRANT_ZOOM_HINT quadrant, quadrants = self.get_quadrant_zoom(hint) - return type, quadrant, quadrants + return type, quadrant, quadrants, None elif self.PERCENT_ZOOM_HINT in hint and len(hint) >= self.PERCENT_ZOOM_MIN_LEN: type = self.PERCENT_ZOOM_HINT self.get_percent_zoom(hint) zoom_percent = self.get_percent_zoom(hint) - return type, zoom_percent, None + return type, None, None, zoom_percent + return None, None, None, None + + def get_combined_zoom(self, hint): + if self.COMBINED_ZOOM_HINT in hint: + if len(hint) >= self.COMBINED_ZOOM_MIN_LEN: + split_pos = hint.index(self.COMBINED_ZOOM_HINT) + hint_a = hint[:split_pos] + hint_b = hint[split_pos+1:] + a_type, a_quadrant, a_quadrants, a_zoom_percent = self.get_zoom_part(hint_a) + b_type, b_quadrant, b_quadrants, b_zoom_percent = self.get_zoom_part(hint_b) + if a_type == self.PERCENT_ZOOM_HINT and b_type == self.QUADRANT_ZOOM_HINT: + zoom_percent = a_zoom_percent + quadrant, quadrants = b_quadrant, b_quadrants + elif a_type == self.QUADRANT_ZOOM_HINT and b_type == self.PERCENT_ZOOM_HINT: + zoom_percent = b_zoom_percent + quadrant, quadrants = a_quadrant, a_quadrants + return quadrant, quadrants, zoom_percent return None, None, None def get_animated_zoom(self, hint): @@ -1727,11 +1786,49 @@ def get_animated_zoom(self, hint): split_pos = hint.index(self.ANIMATED_ZOOM_HINT) hint_from = hint[:split_pos] hint_to = hint[split_pos+1:] - from_type, from_param1, from_param2 = self.get_animated_zoom_part(hint_from) - to_type, to_param1, to_param2 = self.get_animated_zoom_part(hint_to) + from_type, from_param1, from_param2, from_param3 = self.get_zoom_part(hint_from) + to_type, to_param1, to_param2, to_param3 = self.get_zoom_part(hint_to) if from_type and to_type: - return from_type, from_param1, from_param2, to_type, to_param1, to_param2 - return None, None, None, None, None, None + return from_type, from_param1, from_param2, from_param3, to_type, to_param1, to_param2, to_param3 + return None, None, None, None, None, None, None, None + + def compute_animated_zoom(self, num_frames, from_type, from_param1, from_param2, from_param3, + to_type, to_param1, to_param2, to_param3, + main_resize_w, main_resize_h, main_offset_x, main_offset_y, + main_crop_w, main_crop_h): + + from_resize_w, from_resize_h, from_crop_offset_x, from_crop_offset_y = \ + self.compute_zoom_type(from_type, from_param1, from_param2, from_param3, + main_resize_w, main_resize_h, + main_offset_x, main_offset_y, + main_crop_w, main_crop_h) + + to_resize_w, to_resize_h, to_crop_offset_x, to_crop_offset_y = \ + self.compute_zoom_type(to_type, to_param1, to_param2, to_param3, + main_resize_w, main_resize_h, + main_offset_x, main_offset_y, + main_crop_w, main_crop_h) + + diff_resize_w = to_resize_w - from_resize_w + diff_resize_h = to_resize_h - from_resize_h + diff_crop_offset_x = to_crop_offset_x - from_crop_offset_x + diff_crop_offset_y = to_crop_offset_y - from_crop_offset_y + + step_resize_w = diff_resize_w / num_frames + step_resize_h = diff_resize_h / num_frames + step_crop_offset_x = diff_crop_offset_x / num_frames + step_crop_offset_y = diff_crop_offset_y / num_frames + + context = {} + context["from_resize_w"] = from_resize_w + context["from_resize_h"] = from_resize_h + context["from_crop_offset_x"] = from_crop_offset_x + context["from_crop_offset_y"] = from_crop_offset_y + context["step_resize_w"] = step_resize_w + context["step_resize_h"] = step_resize_h + context["step_crop_offset_x"] = step_crop_offset_x + context["step_crop_offset_y"] = step_crop_offset_y + return context def _resize_frame_param(self, index, context): from_resize_w = context["from_resize_w"] @@ -1743,8 +1840,6 @@ def _resize_frame_param(self, index, context): step_crop_offset_x = context["step_crop_offset_x"] step_crop_offset_y = context["step_crop_offset_y"] - print(index, context) - return \ int(from_resize_w + (index * step_resize_w)), \ int(from_resize_h + (index * step_resize_h)), \ @@ -1776,44 +1871,16 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): try: if self.ANIMATED_ZOOM_HINT in resize_hint: # interprent 'any-any' as animating from one to the other zoom factor - from_type, from_param1, from_param2, to_type, to_param1, to_param2 = \ + from_type, from_param1, from_param2, from_param3, to_type, to_param1, to_param2, to_param3 = \ self.get_animated_zoom(resize_hint) - if from_type and to_type: - from_resize_w, from_resize_h, from_crop_offset_x, from_crop_offset_y = \ - self.compute_zoom_type(from_type, from_param1, from_param2, - main_resize_w, main_resize_h, - main_offset_x, main_offset_y, - main_crop_w, main_crop_h) - to_resize_w, to_resize_h, to_crop_offset_x, to_crop_offset_y = \ - self.compute_zoom_type(to_type, to_param1, to_param2, - main_resize_w, main_resize_h, - main_offset_x, main_offset_y, - main_crop_w, main_crop_h) - print(from_resize_w, from_resize_h, from_crop_offset_x, from_crop_offset_y) - print(to_resize_w, to_resize_h, to_crop_offset_x, to_crop_offset_y) - first_frame, last_frame, _ = details_from_group_name(scene_name) num_frames = (last_frame - first_frame) + 1 - diff_resize_w = to_resize_w - from_resize_w - diff_resize_h = to_resize_h - from_resize_h - diff_crop_offset_x = to_crop_offset_x - from_crop_offset_x - diff_crop_offset_y = to_crop_offset_y - from_crop_offset_y - - step_resize_w = diff_resize_w / num_frames - step_resize_h = diff_resize_h / num_frames - step_crop_offset_x = diff_crop_offset_x / num_frames - step_crop_offset_y = diff_crop_offset_y / num_frames - - context = {} - context["from_resize_w"] = from_resize_w - context["from_resize_h"] = from_resize_h - context["from_crop_offset_x"] = from_crop_offset_x - context["from_crop_offset_y"] = from_crop_offset_y - context["step_resize_w"] = step_resize_w - context["step_resize_h"] = step_resize_h - context["step_crop_offset_x"] = step_crop_offset_x - context["step_crop_offset_y"] = step_crop_offset_y + context = self.compute_animated_zoom(num_frames, + from_type, from_param1, from_param2, from_param3, + to_type, to_param1, to_param2, to_param3, + main_resize_w, main_resize_h, main_offset_x, main_offset_y, + main_crop_w, main_crop_h) scale_type = remixer_settings["scale_type_up"] self.resize_scene(log_fn, @@ -1831,7 +1898,30 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): params_context=context) resize_handled = True - elif self.PERCENT_ZOOM_HINT in resize_hint: + elif self.COMBINED_ZOOM_HINT in resize_hint: + quadrant, quadrants, zoom_percent = self.get_combined_zoom(resize_hint) + if quadrant and quadrants and zoom_percent: + resize_w, resize_h, crop_offset_x, crop_offset_y = \ + self.compute_combined_zoom(quadrant, quadrants, zoom_percent, + main_resize_w, main_resize_h, + main_offset_x, main_offset_y, + main_crop_w, main_crop_h) + + scale_type = remixer_settings["scale_type_up"] + self.resize_scene(log_fn, + scene_input_path, + scene_output_path, + int(resize_w), + int(resize_h), + int(main_crop_w), + int(main_crop_h), + int(crop_offset_x), + int(crop_offset_y), + scale_type, + crop_type="crop") + resize_handled = True + + elif self.QUADRANT_ZOOM_HINT in resize_hint: # interpret 'x/y' as x: quadrant, y: square-based number of quadrants # '5/9' and '13/25' would be the center squares of 3x3 and 5x5 grids # zoomed in at 300% and 500% @@ -1866,7 +1956,6 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) - scale_type = remixer_settings["scale_type_up"] self.resize_scene(log_fn, scene_input_path, @@ -1881,8 +1970,6 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): crop_type="crop") resize_handled = True except Exception as error: - print(error) - 1/0 log_fn( f"Error in resize_scenes() handling processing hint {resize_hint} - skipping processing: {error}") resize_handled = False From 0e82a619a342a0a19fdf4b62d9ba716f001ce9d0 Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Mon, 11 Mar 2024 18:42:24 -0700 Subject: [PATCH 014/152] wip --- video_remixer.py | 77 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 55 insertions(+), 22 deletions(-) diff --git a/video_remixer.py b/video_remixer.py index 79712d9b..402fa00c 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -1637,6 +1637,7 @@ def setup_resize_hint(self, content_width, content_height): return main_resize_w, main_resize_h, main_crop_w, main_crop_h, main_offset_x, main_offset_y QUADRANT_ZOOM_HINT = "/" + QUADRANT_GRID_CHAR = "X" PERCENT_ZOOM_HINT = "%" COMBINED_ZOOM_HINT = "@" ANIMATED_ZOOM_HINT = "-" @@ -1653,38 +1654,67 @@ def get_quadrant_zoom(self, hint): quadrants = hint[split_pos+1:] else: quadrant, quadrants = 1, 1 + + print(quadrant, quadrants) + return quadrant, quadrants else: return None, None def compute_quadrant_zoom(self, quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h, centered=False): quadrant = int(quadrant) - 1 - quadrants = int(quadrants) - magnitude = int(math.sqrt(quadrants)) - row = int(quadrant / magnitude) - column = quadrant % magnitude - resize_w = evenify(main_resize_w * magnitude) - resize_h = evenify(main_resize_h * magnitude) - offset_x = main_offset_x * magnitude - offset_y = main_offset_y * magnitude - - if centered: - # move the offset to the center of the grid box - centering_offset_x = main_crop_w / 2 - centering_offset_y = main_crop_h / 2 + if self.QUADRANT_GRID_CHAR in quadrants: + parts = quadrants.split(self.QUADRANT_GRID_CHAR) + if len(parts) == 2: + grid_x = int(parts[0]) + grid_y = int(parts[1]) + magnitude_x = grid_x + magnitude_y = grid_y + magnitude = max(magnitude_x, magnitude_y) + row = int(quadrant / magnitude) + column = quadrant % magnitude + else: + magnitude = 1 + magnitude_x = magnitude + magnitude_y = magnitude + row = 0 + column = 0 else: - centering_offset_x = 0 - centering_offset_y = 0 + magnitude = int(math.sqrt(int(quadrants))) + magnitude_x = magnitude + magnitude_y = magnitude + row = int(quadrant / magnitude) + column = quadrant % magnitude + + resize_w = main_resize_w * magnitude + resize_h = main_resize_h * magnitude + crop_w = main_crop_w * magnitude + crop_h = main_crop_h * magnitude + + crop_offset_x = 0 + crop_offset_y = 0 + if main_offset_x >= 0: + crop_offset_x = main_offset_x * magnitude + if main_offset_y >= 0: + crop_offset_y = main_offset_y * magnitude - crop_offset_x = (column * main_crop_w) + offset_x + centering_offset_x - crop_offset_y = (row * main_crop_h) + offset_y + centering_offset_y + cell_width = crop_w / magnitude_x + cell_height = crop_h / magnitude_y + cell_centering_x = 0 + cell_centering_y = 0 + if cell_width > main_crop_w: + cell_centering_x = (cell_width - main_crop_w) / 2 + elif main_crop_w > cell_width: + cell_centering_x = (main_crop_w - cell_width) / 2 + if cell_height > main_crop_h: + cell_centering_y = (cell_height - main_crop_h) / 2 + elif main_crop_h > cell_height: + cell_centering_y = (main_crop_h - cell_height) / 2 - if crop_offset_x + main_crop_w > resize_w: - crop_offset_x = resize_w - main_crop_w - if crop_offset_y + main_crop_h > resize_h: - crop_offset_y =resize_h - main_crop_h + cell_offset_x = column * cell_width + crop_offset_x + cell_centering_x + cell_offset_y = row * cell_height + crop_offset_y + cell_centering_y - return resize_w, resize_h, crop_offset_x, crop_offset_y + return resize_w, resize_h, cell_offset_x, cell_offset_y def get_percent_zoom(self, hint): if self.PERCENT_ZOOM_HINT in hint: @@ -1722,6 +1752,7 @@ def compute_combined_zoom(self, quadrant, quadrants, zoom_percent, main_resize_w main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h, centered=True) + return resize_w, resize_h, crop_offset_x, crop_offset_y def compute_zoom_type(self, type, param1, param2, param3, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): @@ -1970,6 +2001,8 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): crop_type="crop") resize_handled = True except Exception as error: + print(error) + raise log_fn( f"Error in resize_scenes() handling processing hint {resize_hint} - skipping processing: {error}") resize_handled = False From 60ad16bce3808f3f7e8a56df310d161a7df3080c Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Tue, 12 Mar 2024 00:14:58 -0700 Subject: [PATCH 015/152] ensure all animation frames are rendered --- video_remixer.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/video_remixer.py b/video_remixer.py index 402fa00c..24e716d3 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -1845,6 +1845,9 @@ def compute_animated_zoom(self, num_frames, from_type, from_param1, from_param2, diff_crop_offset_x = to_crop_offset_x - from_crop_offset_x diff_crop_offset_y = to_crop_offset_y - from_crop_offset_y + # ensure the final transition occurs + num_frames -= 1 + step_resize_w = diff_resize_w / num_frames step_resize_h = diff_resize_h / num_frames step_crop_offset_x = diff_crop_offset_x / num_frames From 02a4c10b7ba7ba773d0c4264acfce67bb21d84f7 Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Tue, 12 Mar 2024 00:25:42 -0700 Subject: [PATCH 016/152] math close to working --- video_remixer.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/video_remixer.py b/video_remixer.py index 24e716d3..fdbbeee1 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -1743,17 +1743,27 @@ def compute_percent_zoom(self, zoom_percent, main_resize_w, main_resize_h, main_ # TODO this doesn't always work as expected, something like, the magnitudes are different # between both, so things don't line up def compute_combined_zoom(self, quadrant, quadrants, zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): - resize_w, resize_h, _, _ = self.compute_percent_zoom(zoom_percent, + percent_resize_w, percent_resize_h, _, _ = self.compute_percent_zoom(zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) - _, _, crop_offset_x, crop_offset_y = self.compute_quadrant_zoom(quadrant, quadrants, + quadrant_resize_w, _, quadrant_offset_x, quadrant_offset_y = self.compute_quadrant_zoom(quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h, centered=True) - return resize_w, resize_h, crop_offset_x, crop_offset_y + scale = percent_resize_w / quadrant_resize_w + scaled_offset_x = quadrant_offset_x * scale + scaled_offset_y = quadrant_offset_y * scale + + # both return the upper left corner of a main_crop-sized rectangle in their resize domain + # adding 1/2 the main_crop size will give the centerpoint in that domain + # scaling by the difference in domain size will make the rectangles compatible + # want the centerpoint of the quadrant resize to dominate + # once scaled by the percent resize size, the quadrant offset will work without needing to compute centerpoints + + return percent_resize_w, percent_resize_h, scaled_offset_x, scaled_offset_y def compute_zoom_type(self, type, param1, param2, param3, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): if type == self.COMBINED_ZOOM_HINT: From 8a1c1717d638159688a7ae92713dd75e5b2db8f0 Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Tue, 12 Mar 2024 00:42:19 -0700 Subject: [PATCH 017/152] wip --- video_remixer.py | 168 ++++++++++++++++++++++++----------------------- 1 file changed, 86 insertions(+), 82 deletions(-) diff --git a/video_remixer.py b/video_remixer.py index fdbbeee1..17bcc8e2 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -1661,8 +1661,87 @@ def get_quadrant_zoom(self, hint): else: return None, None - def compute_quadrant_zoom(self, quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h, centered=False): + def get_percent_zoom(self, hint): + if self.PERCENT_ZOOM_HINT in hint: + if len(hint) >= self.PERCENT_ZOOM_MIN_LEN: + zoom_percent = int(hint.replace(self.PERCENT_ZOOM_HINT, "")) + if zoom_percent >= 100: + return zoom_percent + return 100 + else: + return None + + def get_zoom_part(self, hint): + if self.COMBINED_ZOOM_HINT in hint and len(hint) >= self.COMBINED_ZOOM_MIN_LEN: + type = self.COMBINED_ZOOM_HINT + quadrant, quadrants, zoom_percent = self.get_combined_zoom(hint) + return type, quadrant, quadrants, zoom_percent + if self.QUADRANT_ZOOM_HINT in hint and len(hint) >= self.QUADRANT_ZOOM_MIN_LEN: + type = self.QUADRANT_ZOOM_HINT + quadrant, quadrants = self.get_quadrant_zoom(hint) + return type, quadrant, quadrants, None + elif self.PERCENT_ZOOM_HINT in hint and len(hint) >= self.PERCENT_ZOOM_MIN_LEN: + type = self.PERCENT_ZOOM_HINT + self.get_percent_zoom(hint) + zoom_percent = self.get_percent_zoom(hint) + return type, None, None, zoom_percent + return None, None, None, None + + def get_combined_zoom(self, hint): + if self.COMBINED_ZOOM_HINT in hint: + if len(hint) >= self.COMBINED_ZOOM_MIN_LEN: + split_pos = hint.index(self.COMBINED_ZOOM_HINT) + hint_a = hint[:split_pos] + hint_b = hint[split_pos+1:] + a_type, a_quadrant, a_quadrants, a_zoom_percent = self.get_zoom_part(hint_a) + b_type, b_quadrant, b_quadrants, b_zoom_percent = self.get_zoom_part(hint_b) + if a_type == self.PERCENT_ZOOM_HINT and b_type == self.QUADRANT_ZOOM_HINT: + zoom_percent = a_zoom_percent + quadrant, quadrants = b_quadrant, b_quadrants + elif a_type == self.QUADRANT_ZOOM_HINT and b_type == self.PERCENT_ZOOM_HINT: + zoom_percent = b_zoom_percent + quadrant, quadrants = a_quadrant, a_quadrants + return quadrant, quadrants, zoom_percent + return None, None, None + + def get_animated_zoom(self, hint): + if self.ANIMATED_ZOOM_HINT in hint: + if len(hint) >= self.ANIMATED_ZOOM_MIN_LEN: + split_pos = hint.index(self.ANIMATED_ZOOM_HINT) + hint_from = hint[:split_pos] + hint_to = hint[split_pos+1:] + from_type, from_param1, from_param2, from_param3 = self.get_zoom_part(hint_from) + to_type, to_param1, to_param2, to_param3 = self.get_zoom_part(hint_to) + if from_type and to_type: + return from_type, from_param1, from_param2, from_param3, to_type, to_param1, to_param2, to_param3 + return None, None, None, None, None, None, None, None + + def compute_zoom_type(self, type, param1, param2, param3, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): + if type == self.COMBINED_ZOOM_HINT: + quadrant, quadrants, zoom_percent = param1, param2, param3 + if quadrant and quadrants and zoom_percent: + return self.compute_combined_zoom(quadrant, quadrants, zoom_percent, + main_resize_w, main_resize_h, + main_offset_x, main_offset_y, + main_crop_w, main_crop_h) + elif type == self.QUADRANT_ZOOM_HINT: + quadrant, quadrants = param1, param2 + if quadrant and quadrants: + return self.compute_quadrant_zoom(quadrant, quadrants, + main_resize_w, main_resize_h, + main_offset_x, main_offset_y, + main_crop_w, main_crop_h) + elif type == self.PERCENT_ZOOM_HINT: + zoom_percent = param3 + if zoom_percent: + return self.compute_percent_zoom(zoom_percent, + main_resize_w, main_resize_h, + main_offset_x, main_offset_y, + main_crop_w, main_crop_h) + + def compute_quadrant_zoom(self, quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): quadrant = int(quadrant) - 1 + if self.QUADRANT_GRID_CHAR in quadrants: parts = quadrants.split(self.QUADRANT_GRID_CHAR) if len(parts) == 2: @@ -1691,6 +1770,7 @@ def compute_quadrant_zoom(self, quadrant, quadrants, main_resize_w, main_resize_ crop_w = main_crop_w * magnitude crop_h = main_crop_h * magnitude + crop_offset_x = 0 crop_offset_y = 0 if main_offset_x >= 0: @@ -1716,20 +1796,13 @@ def compute_quadrant_zoom(self, quadrant, quadrants, main_resize_w, main_resize_ return resize_w, resize_h, cell_offset_x, cell_offset_y - def get_percent_zoom(self, hint): - if self.PERCENT_ZOOM_HINT in hint: - if len(hint) >= self.PERCENT_ZOOM_MIN_LEN: - zoom_percent = int(hint.replace(self.PERCENT_ZOOM_HINT, "")) - if zoom_percent >= 100: - return zoom_percent - return 100 - else: - return None - def compute_percent_zoom(self, zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): magnitude = zoom_percent / 100.0 resize_w = evenify(main_resize_w * magnitude) resize_h = evenify(main_resize_h * magnitude) + + # TODO this may not properly handle non-centered crop offsets + # - may need to consider main specific offset in centering if self.crop_offset_x == -1: crop_offset_x = ((resize_w - main_crop_w) / 2.0) else: @@ -1738,10 +1811,9 @@ def compute_percent_zoom(self, zoom_percent, main_resize_w, main_resize_h, main_ crop_offset_y = ((resize_h - main_crop_h) / 2.0) else: crop_offset_y = main_offset_y * magnitude + return resize_w, resize_h, crop_offset_x, crop_offset_y - # TODO this doesn't always work as expected, something like, the magnitudes are different - # between both, so things don't line up def compute_combined_zoom(self, quadrant, quadrants, zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): percent_resize_w, percent_resize_h, _, _ = self.compute_percent_zoom(zoom_percent, main_resize_w, main_resize_h, @@ -1751,7 +1823,7 @@ def compute_combined_zoom(self, quadrant, quadrants, zoom_percent, main_resize_w quadrant_resize_w, _, quadrant_offset_x, quadrant_offset_y = self.compute_quadrant_zoom(quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, - main_crop_w, main_crop_h, centered=True) + main_crop_w, main_crop_h) scale = percent_resize_w / quadrant_resize_w scaled_offset_x = quadrant_offset_x * scale @@ -1765,74 +1837,6 @@ def compute_combined_zoom(self, quadrant, quadrants, zoom_percent, main_resize_w return percent_resize_w, percent_resize_h, scaled_offset_x, scaled_offset_y - def compute_zoom_type(self, type, param1, param2, param3, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): - if type == self.COMBINED_ZOOM_HINT: - quadrant, quadrants, zoom_percent = param1, param2, param3 - if quadrant and quadrants and zoom_percent: - return self.compute_combined_zoom(quadrant, quadrants, zoom_percent, - main_resize_w, main_resize_h, - main_offset_x, main_offset_y, - main_crop_w, main_crop_h) - elif type == self.QUADRANT_ZOOM_HINT: - quadrant, quadrants = param1, param2 - if quadrant and quadrants: - return self.compute_quadrant_zoom(quadrant, quadrants, - main_resize_w, main_resize_h, - main_offset_x, main_offset_y, - main_crop_w, main_crop_h) - elif type == self.PERCENT_ZOOM_HINT: - zoom_percent = param3 - if zoom_percent: - return self.compute_percent_zoom(zoom_percent, - main_resize_w, main_resize_h, - main_offset_x, main_offset_y, - main_crop_w, main_crop_h) - - def get_zoom_part(self, hint): - if self.COMBINED_ZOOM_HINT in hint and len(hint) >= self.COMBINED_ZOOM_MIN_LEN: - type = self.COMBINED_ZOOM_HINT - quadrant, quadrants, zoom_percent = self.get_combined_zoom(hint) - return type, quadrant, quadrants, zoom_percent - if self.QUADRANT_ZOOM_HINT in hint and len(hint) >= self.QUADRANT_ZOOM_MIN_LEN: - type = self.QUADRANT_ZOOM_HINT - quadrant, quadrants = self.get_quadrant_zoom(hint) - return type, quadrant, quadrants, None - elif self.PERCENT_ZOOM_HINT in hint and len(hint) >= self.PERCENT_ZOOM_MIN_LEN: - type = self.PERCENT_ZOOM_HINT - self.get_percent_zoom(hint) - zoom_percent = self.get_percent_zoom(hint) - return type, None, None, zoom_percent - return None, None, None, None - - def get_combined_zoom(self, hint): - if self.COMBINED_ZOOM_HINT in hint: - if len(hint) >= self.COMBINED_ZOOM_MIN_LEN: - split_pos = hint.index(self.COMBINED_ZOOM_HINT) - hint_a = hint[:split_pos] - hint_b = hint[split_pos+1:] - a_type, a_quadrant, a_quadrants, a_zoom_percent = self.get_zoom_part(hint_a) - b_type, b_quadrant, b_quadrants, b_zoom_percent = self.get_zoom_part(hint_b) - if a_type == self.PERCENT_ZOOM_HINT and b_type == self.QUADRANT_ZOOM_HINT: - zoom_percent = a_zoom_percent - quadrant, quadrants = b_quadrant, b_quadrants - elif a_type == self.QUADRANT_ZOOM_HINT and b_type == self.PERCENT_ZOOM_HINT: - zoom_percent = b_zoom_percent - quadrant, quadrants = a_quadrant, a_quadrants - return quadrant, quadrants, zoom_percent - return None, None, None - - def get_animated_zoom(self, hint): - if self.ANIMATED_ZOOM_HINT in hint: - if len(hint) >= self.ANIMATED_ZOOM_MIN_LEN: - split_pos = hint.index(self.ANIMATED_ZOOM_HINT) - hint_from = hint[:split_pos] - hint_to = hint[split_pos+1:] - from_type, from_param1, from_param2, from_param3 = self.get_zoom_part(hint_from) - to_type, to_param1, to_param2, to_param3 = self.get_zoom_part(hint_to) - if from_type and to_type: - return from_type, from_param1, from_param2, from_param3, to_type, to_param1, to_param2, to_param3 - return None, None, None, None, None, None, None, None - def compute_animated_zoom(self, num_frames, from_type, from_param1, from_param2, from_param3, to_type, to_param1, to_param2, to_param3, main_resize_w, main_resize_h, main_offset_x, main_offset_y, From bedcf5abcf54ef0daa9a4e470e506d69a3a78c28 Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Tue, 12 Mar 2024 07:25:50 -0700 Subject: [PATCH 018/152] wip --- video_remixer.py | 40 +++++++++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 9 deletions(-) diff --git a/video_remixer.py b/video_remixer.py index 17bcc8e2..76f8178c 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -1815,28 +1815,50 @@ def compute_percent_zoom(self, zoom_percent, main_resize_w, main_resize_h, main_ return resize_w, resize_h, crop_offset_x, crop_offset_y def compute_combined_zoom(self, quadrant, quadrants, zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): - percent_resize_w, percent_resize_h, _, _ = self.compute_percent_zoom(zoom_percent, + percent_resize_w, percent_resize_h, percent_offset_x, percent_offset_y = self.compute_percent_zoom(zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) + # the offsets are based on the center of the screen not the quadrant - quadrant_resize_w, _, quadrant_offset_x, quadrant_offset_y = self.compute_quadrant_zoom(quadrant, quadrants, + quadrant_resize_w, quadrant_resize_h, quadrant_offset_x, quadrant_offset_y = self.compute_quadrant_zoom(quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) + # quadrant_zoom_percent = (main_resize_w / quadrant_resize_w) * 100.0 + + # percent_resize_w2, percent_resize_h2, percent_offset_x2, percent_offset_y2 = self.compute_percent_zoom(quadrant_zoom_percent, + # main_resize_w, main_resize_h, + # main_offset_x, main_offset_y, + # main_crop_w, main_crop_h) + + # diff_w = percent_offset_x - percent_offset_x2 + # diff_h = percent_offset_y - percent_offset_y2 + + # return quadrant_resize_w, quadrant_resize_h, quadrant_offset_x - diff_w, quadrant_offset_y - diff_h + + # reality check, should just be the quadrant zoom - does + # return quadrant_resize_w, quadrant_resize_h, quadrant_offset_x, quadrant_offset_y + + # seems to work the same as the below + # scale = percent_resize_w / quadrant_resize_w + # scaled_offset_x = ((quadrant_offset_x + (main_crop_w / 2)) * scale) - (main_crop_w / 2) + # scaled_offset_y = ((quadrant_offset_y + (main_crop_h / 2)) * scale) - (main_crop_h / 2) + # return percent_resize_w, percent_resize_h, scaled_offset_x, scaled_offset_y + + # close but center point shifts scale = percent_resize_w / quadrant_resize_w scaled_offset_x = quadrant_offset_x * scale scaled_offset_y = quadrant_offset_y * scale - - # both return the upper left corner of a main_crop-sized rectangle in their resize domain - # adding 1/2 the main_crop size will give the centerpoint in that domain - # scaling by the difference in domain size will make the rectangles compatible - # want the centerpoint of the quadrant resize to dominate - # once scaled by the percent resize size, the quadrant offset will work without needing to compute centerpoints - return percent_resize_w, percent_resize_h, scaled_offset_x, scaled_offset_y + # definitely not right + # scale = quadrant_resize_w / percent_resize_w + # scaled_offset_x = quadrant_offset_x * scale + # scaled_offset_y = quadrant_offset_y * scale + # return percent_resize_w, percent_resize_h, scaled_offset_x, scaled_offset_y + def compute_animated_zoom(self, num_frames, from_type, from_param1, from_param2, from_param3, to_type, to_param1, to_param2, to_param3, main_resize_w, main_resize_h, main_offset_x, main_offset_y, From b7ea8618bda95042c66902ea0feab93ddbd8b10e Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Tue, 12 Mar 2024 16:13:38 -0700 Subject: [PATCH 019/152] zoom is working well based on center point --- video_remixer.py | 181 ++++++++++++++++++++++++----------------------- 1 file changed, 93 insertions(+), 88 deletions(-) diff --git a/video_remixer.py b/video_remixer.py index 76f8178c..86258f55 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -1765,112 +1765,104 @@ def compute_quadrant_zoom(self, quadrant, quadrants, main_resize_w, main_resize_ row = int(quadrant / magnitude) column = quadrant % magnitude + # compute frame scaling resize_w = main_resize_w * magnitude resize_h = main_resize_h * magnitude + + # compute crop area scaling crop_w = main_crop_w * magnitude crop_h = main_crop_h * magnitude + # if the main crop offset is negative, auto-center it within the frame + # otherwise scale up the specific offset + offset_x, offset_y = 0, 0 + if main_offset_x < 0: + offset_x = (resize_w - crop_w) / 2.0 + else: + offset_x = main_offset_x * magnitude + if main_offset_y < 0: + offset_y = (resize_h - crop_h) / 2.0 + else: + offset_y = main_offset_y * magnitude - crop_offset_x = 0 - crop_offset_y = 0 - if main_offset_x >= 0: - crop_offset_x = main_offset_x * magnitude - if main_offset_y >= 0: - crop_offset_y = main_offset_y * magnitude - + # compute the dimensions of one grid cell given the crop and magnitude(s) cell_width = crop_w / magnitude_x cell_height = crop_h / magnitude_y - cell_centering_x = 0 - cell_centering_y = 0 - if cell_width > main_crop_w: - cell_centering_x = (cell_width - main_crop_w) / 2 - elif main_crop_w > cell_width: - cell_centering_x = (main_crop_w - cell_width) / 2 - if cell_height > main_crop_h: - cell_centering_y = (cell_height - main_crop_h) / 2 - elif main_crop_h > cell_height: - cell_centering_y = (main_crop_h - cell_height) / 2 - - cell_offset_x = column * cell_width + crop_offset_x + cell_centering_x - cell_offset_y = row * cell_height + crop_offset_y + cell_centering_y - - return resize_w, resize_h, cell_offset_x, cell_offset_y + + # compute the upper left corner of the grid cell given the cell dimensions, + # and row, column; unadjusted for main crop offset + cell_offset_x = column * cell_width + cell_offset_y = row * cell_height + + # add the main offset + cell_offset_x += offset_x + cell_offset_y += offset_y + + # compute the center point + center_x = cell_offset_x + (cell_width / 2.0) + center_y = cell_offset_y + (cell_height / 2.0) + + return resize_w, resize_h, center_x, center_y def compute_percent_zoom(self, zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): magnitude = zoom_percent / 100.0 - resize_w = evenify(main_resize_w * magnitude) - resize_h = evenify(main_resize_h * magnitude) - # TODO this may not properly handle non-centered crop offsets - # - may need to consider main specific offset in centering - if self.crop_offset_x == -1: - crop_offset_x = ((resize_w - main_crop_w) / 2.0) + # compute frame scaling + resize_w = main_resize_w * magnitude + resize_h = main_resize_h * magnitude + + # compute crop area scaling + crop_w = main_crop_w * magnitude + crop_h = main_crop_h * magnitude + + # if the main crop offset is negative, auto-center it within the frame + # otherwise scale up the specific offset + offset_x, offset_y = 0, 0 + if main_offset_x < 0: + offset_x = (resize_w - crop_w) / 2.0 else: - crop_offset_x = main_offset_x * magnitude - if self.crop_offset_y == -1: - crop_offset_y = ((resize_h - main_crop_h) / 2.0) + offset_x = main_offset_x * magnitude + if main_offset_y < 0: + offset_y = (resize_h - crop_h) / 2.0 else: - crop_offset_y = main_offset_y * magnitude + offset_y = main_offset_y * magnitude - return resize_w, resize_h, crop_offset_x, crop_offset_y + # compute the centerpoint of the scaled crop area + center_x = (crop_w / 2.0) + offset_x + center_y = (crop_h / 2.0) + offset_y + + return resize_w, resize_h, center_x, center_y def compute_combined_zoom(self, quadrant, quadrants, zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): - percent_resize_w, percent_resize_h, percent_offset_x, percent_offset_y = self.compute_percent_zoom(zoom_percent, + percent_resize_w, percent_resize_h, percent_center_x, percent_center_y = self.compute_percent_zoom(zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) - # the offsets are based on the center of the screen not the quadrant - quadrant_resize_w, quadrant_resize_h, quadrant_offset_x, quadrant_offset_y = self.compute_quadrant_zoom(quadrant, quadrants, + quadrant_resize_w, quadrant_resize_h, quadrant_center_x, quadrant_center_y = self.compute_quadrant_zoom(quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) - # quadrant_zoom_percent = (main_resize_w / quadrant_resize_w) * 100.0 - - # percent_resize_w2, percent_resize_h2, percent_offset_x2, percent_offset_y2 = self.compute_percent_zoom(quadrant_zoom_percent, - # main_resize_w, main_resize_h, - # main_offset_x, main_offset_y, - # main_crop_w, main_crop_h) - - # diff_w = percent_offset_x - percent_offset_x2 - # diff_h = percent_offset_y - percent_offset_y2 - - # return quadrant_resize_w, quadrant_resize_h, quadrant_offset_x - diff_w, quadrant_offset_y - diff_h - - # reality check, should just be the quadrant zoom - does - # return quadrant_resize_w, quadrant_resize_h, quadrant_offset_x, quadrant_offset_y - - # seems to work the same as the below - # scale = percent_resize_w / quadrant_resize_w - # scaled_offset_x = ((quadrant_offset_x + (main_crop_w / 2)) * scale) - (main_crop_w / 2) - # scaled_offset_y = ((quadrant_offset_y + (main_crop_h / 2)) * scale) - (main_crop_h / 2) - # return percent_resize_w, percent_resize_h, scaled_offset_x, scaled_offset_y - - # close but center point shifts + # scale the quadrant center point to the percent resize scale = percent_resize_w / quadrant_resize_w - scaled_offset_x = quadrant_offset_x * scale - scaled_offset_y = quadrant_offset_y * scale - return percent_resize_w, percent_resize_h, scaled_offset_x, scaled_offset_y + center_x = quadrant_center_x * scale + center_y = quadrant_center_y * scale - # definitely not right - # scale = quadrant_resize_w / percent_resize_w - # scaled_offset_x = quadrant_offset_x * scale - # scaled_offset_y = quadrant_offset_y * scale - # return percent_resize_w, percent_resize_h, scaled_offset_x, scaled_offset_y + return percent_resize_w, percent_resize_h, center_x, center_y def compute_animated_zoom(self, num_frames, from_type, from_param1, from_param2, from_param3, to_type, to_param1, to_param2, to_param3, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): - from_resize_w, from_resize_h, from_crop_offset_x, from_crop_offset_y = \ + from_resize_w, from_resize_h, from_center_x, from_center_y = \ self.compute_zoom_type(from_type, from_param1, from_param2, from_param3, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) - to_resize_w, to_resize_h, to_crop_offset_x, to_crop_offset_y = \ + to_resize_w, to_resize_h, to_center_x, to_center_y = \ self.compute_zoom_type(to_type, to_param1, to_param2, to_param3, main_resize_w, main_resize_h, main_offset_x, main_offset_y, @@ -1878,43 +1870,50 @@ def compute_animated_zoom(self, num_frames, from_type, from_param1, from_param2, diff_resize_w = to_resize_w - from_resize_w diff_resize_h = to_resize_h - from_resize_h - diff_crop_offset_x = to_crop_offset_x - from_crop_offset_x - diff_crop_offset_y = to_crop_offset_y - from_crop_offset_y + diff_center_x = to_center_x - from_center_x + diff_center_y = to_center_y - from_center_y # ensure the final transition occurs num_frames -= 1 step_resize_w = diff_resize_w / num_frames step_resize_h = diff_resize_h / num_frames - step_crop_offset_x = diff_crop_offset_x / num_frames - step_crop_offset_y = diff_crop_offset_y / num_frames + step_center_x = diff_center_x / num_frames + step_center_y = diff_center_y / num_frames context = {} context["from_resize_w"] = from_resize_w context["from_resize_h"] = from_resize_h - context["from_crop_offset_x"] = from_crop_offset_x - context["from_crop_offset_y"] = from_crop_offset_y + context["from_center_x"] = from_center_x + context["from_center_y"] = from_center_y context["step_resize_w"] = step_resize_w context["step_resize_h"] = step_resize_h - context["step_crop_offset_x"] = step_crop_offset_x - context["step_crop_offset_y"] = step_crop_offset_y + context["step_center_x"] = step_center_x + context["step_center_y"] = step_center_y + context["main_crop_w"] = main_crop_w + context["main_crop_h"] = main_crop_h return context def _resize_frame_param(self, index, context): from_resize_w = context["from_resize_w"] from_resize_h = context["from_resize_h"] - from_crop_offset_x = context["from_crop_offset_x"] - from_crop_offset_y = context["from_crop_offset_y"] + from_center_x = context["from_center_x"] + from_center_y = context["from_center_y"] step_resize_w = context["step_resize_w"] step_resize_h = context["step_resize_h"] - step_crop_offset_x = context["step_crop_offset_x"] - step_crop_offset_y = context["step_crop_offset_y"] + step_center_x = context["step_center_x"] + step_center_y = context["step_center_y"] + main_crop_w = context["main_crop_w"] + main_crop_h = context["main_crop_h"] + + resize_w = from_resize_w + (index * step_resize_w) + resize_h = from_resize_h + (index * step_resize_h) + center_x = from_center_x + (index * step_center_x) + center_y = from_center_y + (index * step_center_y) + crop_offset_x = center_x - (main_crop_w / 2.0) + crop_offset_y = center_y - (main_crop_h / 2.0) - return \ - int(from_resize_w + (index * step_resize_w)), \ - int(from_resize_h + (index * step_resize_h)), \ - int(from_crop_offset_x + (index * step_crop_offset_x)), \ - int(from_crop_offset_y + (index * step_crop_offset_y)) + return int(resize_w), int(resize_h), int(crop_offset_x), int(crop_offset_y) def resize_scenes(self, log_fn, kept_scenes, remixer_settings): scenes_base_path = self.scenes_source_path(self.RESIZE_STEP) @@ -1971,13 +1970,15 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): elif self.COMBINED_ZOOM_HINT in resize_hint: quadrant, quadrants, zoom_percent = self.get_combined_zoom(resize_hint) if quadrant and quadrants and zoom_percent: - resize_w, resize_h, crop_offset_x, crop_offset_y = \ + resize_w, resize_h, center_x, center_y = \ self.compute_combined_zoom(quadrant, quadrants, zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) scale_type = remixer_settings["scale_type_up"] + crop_offset_x = center_x - (main_crop_w / 2.0) + crop_offset_y = center_y - (main_crop_h / 2.0) self.resize_scene(log_fn, scene_input_path, scene_output_path, @@ -1997,13 +1998,15 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): # zoomed in at 300% and 500% quadrant, quadrants = self.get_quadrant_zoom(resize_hint) if quadrant and quadrants: - resize_w, resize_h, crop_offset_x, crop_offset_y = \ + resize_w, resize_h, center_x, center_y = \ self.compute_quadrant_zoom(quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) scale_type = remixer_settings["scale_type_up"] + crop_offset_x = center_x - (main_crop_w / 2.0) + crop_offset_y = center_y - (main_crop_h / 2.0) self.resize_scene(log_fn, scene_input_path, scene_output_path, @@ -2021,12 +2024,14 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): # interpret z% as zoom percent to zoom into center zoom_percent = self.get_percent_zoom(resize_hint) if zoom_percent: - resize_w, resize_h, crop_offset_x, crop_offset_y = \ + resize_w, resize_h, center_x, center_y = \ self.compute_percent_zoom(zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) scale_type = remixer_settings["scale_type_up"] + crop_offset_x = center_x - (main_crop_w / 2.0) + crop_offset_y = center_y - (main_crop_h / 2.0) self.resize_scene(log_fn, scene_input_path, scene_output_path, From 34f2a73b7be0aeaecb170a106f9cedb88c4040fd Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Tue, 12 Mar 2024 16:23:37 -0700 Subject: [PATCH 020/152] some cleanup --- video_remixer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/video_remixer.py b/video_remixer.py index 86258f55..878a5d2f 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -1655,8 +1655,6 @@ def get_quadrant_zoom(self, hint): else: quadrant, quadrants = 1, 1 - print(quadrant, quadrants) - return quadrant, quadrants else: return None, None @@ -1845,6 +1843,7 @@ def compute_combined_zoom(self, quadrant, quadrants, zoom_percent, main_resize_w main_crop_w, main_crop_h) # scale the quadrant center point to the percent resize + # this seems to work on the left and middle but not on the right scale = percent_resize_w / quadrant_resize_w center_x = quadrant_center_x * scale center_y = quadrant_center_y * scale @@ -2045,6 +2044,7 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): crop_type="crop") resize_handled = True except Exception as error: + # TODO print(error) raise log_fn( From 8b1301a7dc31b0dfbee3734c33e2e664ae93e302 Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Tue, 12 Mar 2024 18:11:02 -0700 Subject: [PATCH 021/152] fix crop rect going out of bounds with combined zoom --- video_remixer.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/video_remixer.py b/video_remixer.py index 878a5d2f..62a2f056 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -1848,6 +1848,15 @@ def compute_combined_zoom(self, quadrant, quadrants, zoom_percent, main_resize_w center_x = quadrant_center_x * scale center_y = quadrant_center_y * scale + crop_offset_x = center_x - (main_crop_w / 2.0) + crop_offset_y = center_y - (main_crop_h / 2.0) + if crop_offset_x < 0 or crop_offset_x + main_crop_w > percent_resize_w \ + or crop_offset_y < 0 or crop_offset_y + main_crop_h > percent_resize_h: + # if out of bounds, resort to a quadrant zoom + resize_w, resize_h, center_x, center_y = \ + self.compute_quadrant_zoom(quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) + return resize_w, resize_h, center_x, center_y + return percent_resize_w, percent_resize_h, center_x, center_y def compute_animated_zoom(self, num_frames, from_type, from_param1, from_param2, from_param3, @@ -1975,9 +1984,10 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): main_offset_x, main_offset_y, main_crop_w, main_crop_h) - scale_type = remixer_settings["scale_type_up"] crop_offset_x = center_x - (main_crop_w / 2.0) crop_offset_y = center_y - (main_crop_h / 2.0) + + scale_type = remixer_settings["scale_type_up"] self.resize_scene(log_fn, scene_input_path, scene_output_path, From b8345c7f6c27cfaa57d0e747f17f844ef7027965 Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Tue, 12 Mar 2024 20:31:47 -0700 Subject: [PATCH 022/152] find a fitting zoom percent rather than err out --- video_remixer.py | 49 +++++++++++++++++++++++++++++++++++------------- 1 file changed, 36 insertions(+), 13 deletions(-) diff --git a/video_remixer.py b/video_remixer.py index 62a2f056..a5eb53d6 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -1831,33 +1831,56 @@ def compute_percent_zoom(self, zoom_percent, main_resize_w, main_resize_h, main_ return resize_w, resize_h, center_x, center_y + MAX_SELF_FIT_ZOOM = 1000 + def compute_combined_zoom(self, quadrant, quadrants, zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): - percent_resize_w, percent_resize_h, percent_center_x, percent_center_y = self.compute_percent_zoom(zoom_percent, + resize_w, resize_h, _, _ = self.compute_percent_zoom(zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) - - quadrant_resize_w, quadrant_resize_h, quadrant_center_x, quadrant_center_y = self.compute_quadrant_zoom(quadrant, quadrants, + quadrant_resize_w, _, quadrant_center_x, quadrant_center_y = self.compute_quadrant_zoom(quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) # scale the quadrant center point to the percent resize - # this seems to work on the left and middle but not on the right - scale = percent_resize_w / quadrant_resize_w + scale = resize_w / quadrant_resize_w center_x = quadrant_center_x * scale center_y = quadrant_center_y * scale + if self.check_crop_bounds(resize_w, resize_h, center_x, center_y, main_crop_w, main_crop_h): + # fit the requested zoom percent to be in bounds + fit_zoom_percent = zoom_percent + while fit_zoom_percent < self.MAX_SELF_FIT_ZOOM and \ + self.check_crop_bounds(resize_w, resize_h, center_x, center_y, main_crop_w, main_crop_h): + fit_zoom_percent += 1 + resize_w, resize_h, _, _ = self.compute_percent_zoom(fit_zoom_percent, + main_resize_w, main_resize_h, + main_offset_x, main_offset_y, + main_crop_w, main_crop_h) + quadrant_resize_w, _, quadrant_center_x, quadrant_center_y = self.compute_quadrant_zoom(quadrant, quadrants, + main_resize_w, main_resize_h, + main_offset_x, main_offset_y, + main_crop_w, main_crop_h) + + # scale the quadrant center point to the percent resize + # this seems to work on the left and middle but not on the right + scale = resize_w / quadrant_resize_w + center_x = quadrant_center_x * scale + center_y = quadrant_center_y * scale + + # if still out of bounds, restore to quadrant zoom + if self.check_crop_bounds(resize_w, resize_h, center_x, center_y, main_crop_w, main_crop_h): + resize_w, resize_h, center_x, center_y = \ + self.compute_quadrant_zoom(quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) + + return resize_w, resize_h, center_x, center_y + + def check_crop_bounds(self, resize_w, resize_h, center_x, center_y, main_crop_w, main_crop_h): crop_offset_x = center_x - (main_crop_w / 2.0) crop_offset_y = center_y - (main_crop_h / 2.0) - if crop_offset_x < 0 or crop_offset_x + main_crop_w > percent_resize_w \ - or crop_offset_y < 0 or crop_offset_y + main_crop_h > percent_resize_h: - # if out of bounds, resort to a quadrant zoom - resize_w, resize_h, center_x, center_y = \ - self.compute_quadrant_zoom(quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) - return resize_w, resize_h, center_x, center_y - - return percent_resize_w, percent_resize_h, center_x, center_y + return crop_offset_x < 0 or crop_offset_x + main_crop_w > resize_w \ + or crop_offset_y < 0 or crop_offset_y + main_crop_h > resize_h def compute_animated_zoom(self, num_frames, from_type, from_param1, from_param2, from_param3, to_type, to_param1, to_param2, to_param3, From f9020dc4692364dd8671439d69a67c8a9ac25624 Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Tue, 12 Mar 2024 21:48:58 -0700 Subject: [PATCH 023/152] add go to frame control, support negative values --- tabs/video_remixer_ui.py | 30 ++++++++++++++++++++++++++++-- video_remixer.py | 21 +++++++++++++++------ 2 files changed, 43 insertions(+), 8 deletions(-) diff --git a/tabs/video_remixer_ui.py b/tabs/video_remixer_ui.py index 08dfb22d..18f4e9a2 100644 --- a/tabs/video_remixer_ui.py +++ b/tabs/video_remixer_ui.py @@ -582,16 +582,24 @@ def render_tab(self): goto_100_702 = gr.Button(value="Last >", scale=1, min_width=90, size="sm") next_minute_702 = gr.Button(value="Minute >", scale=1, min_width=90, size="sm") with gr.Row(): + with gr.Column(scale=1): + with gr.Row(equal_height=True, variant="panel", elem_id="highlightbutton"): + go_to_f_button702 = gr.Button(value="Go to Frame", + variant="secondary", + size="sm", min_width=120) + go_to_f_702 = gr.Number(value=0, show_label=False, + info=None, + precision=0, container=False, + min_width=120) with gr.Column(scale=1): with gr.Row(equal_height=True, variant="panel", elem_id="highlightbutton"): go_to_s_button702 = gr.Button(value="Go to Second", variant="secondary", size="sm", min_width=120) go_to_s_702 = gr.Number(value=0, show_label=False, - info=None, minimum=0, + info=None, precision=0, container=False, min_width=120) - gr.Column(scale=7) with gr.Column(): preview_image702 = gr.Image(type="filepath", label="Split Frame Preview", tool=None, height=max_thumb_size) @@ -1188,6 +1196,14 @@ def render_tab(self): inputs=[scene_id_702, split_percent_702, go_to_s_702], outputs=split_percent_702, show_progress=False) + go_to_f_button702.click(self.go_to_f_click702, + inputs=[scene_id_702, split_percent_702, go_to_f_702], + outputs=split_percent_702, show_progress=False) + + go_to_f_702.submit(self.go_to_f_submit702, + inputs=[scene_id_702, split_percent_702, go_to_f_702], + outputs=split_percent_702, show_progress=False) + split_button702.click(self.split_button702, inputs=[scene_id_702, split_percent_702], outputs=[tabs_video_remixer, message_box702, scene_index, scene_name, scene_image, scene_state, scene_info, set_scene_label]) @@ -2542,6 +2558,16 @@ def go_to_s_click702(self, scene_index, split_percent, go_to_second): def go_to_s_submit702(self, scene_index, split_percent, go_to_second): return self.go_to_s_button702(scene_index, split_percent, go_to_second) + def go_to_f_button702(self, scene_index, split_percent, go_to_frame): + return self.state.compute_advance_702(scene_index, split_percent, False, by_exact_frame=True, + exact_frame=go_to_frame) + + def go_to_f_click702(self, scene_index, split_percent, go_to_frame): + return self.go_to_f_button702(scene_index, split_percent, go_to_frame) + + def go_to_f_submit702(self, scene_index, split_percent, go_to_frame): + return self.go_to_f_button702(scene_index, split_percent, go_to_frame) + def export_project_703(self, new_project_path : str, new_project_name : str): empty_args = dummy_args(2, lambda : gr.update(visible=True)) if not new_project_path: diff --git a/video_remixer.py b/video_remixer.py index 79712d9b..08b0bc06 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -951,7 +951,9 @@ def compute_advance_702(self, by_minute=False, by_second=False, by_exact_second=False, - exact_second=0): + exact_second=0, + by_exact_frame=False, + exact_frame=0): if not isinstance(scene_index, (int, float)): return None @@ -960,16 +962,23 @@ def compute_advance_702(self, first_frame, last_frame, _ = details_from_group_name(scene_name) num_frames = (last_frame - first_frame) + 1 split_percent_frame = num_frames * split_percent / 100.0 + frames_1s = self.project_fps + frames_60s = frames_1s * 60 - if by_exact_second: - frames_1s = self.project_fps - new_split_frame = frames_1s * exact_second + if by_exact_frame: + if exact_frame < 0: + new_split_frame = (num_frames + exact_frame) #- 1 + else: + new_split_frame = exact_frame + elif by_exact_second: + if exact_second < 0: + new_split_frame = (num_frames + (frames_1s * exact_second)) #- 1 + else: + new_split_frame = frames_1s * exact_second elif by_minute: - frames_60s = self.project_fps * 60 new_split_frame = \ split_percent_frame + frames_60s if by_next else split_percent_frame - frames_60s elif by_second: - frames_1s = self.project_fps new_split_frame = \ split_percent_frame + frames_1s if by_next else split_percent_frame - frames_1s else: # by frame From 08cd7cb075feddc59654f940f26a15e43a5b9097 Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Wed, 13 Mar 2024 07:35:04 -0700 Subject: [PATCH 024/152] fixes XxY grid math --- video_remixer.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/video_remixer.py b/video_remixer.py index fc24f7b1..fdd26f1f 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -1756,9 +1756,15 @@ def compute_quadrant_zoom(self, quadrant, quadrants, main_resize_w, main_resize_ grid_y = int(parts[1]) magnitude_x = grid_x magnitude_y = grid_y - magnitude = max(magnitude_x, magnitude_y) - row = int(quadrant / magnitude) - column = quadrant % magnitude + + if magnitude_x >= magnitude_y: + magnitude = magnitude_x + row = int(quadrant / magnitude_x) + column = quadrant % magnitude_x + else: + magnitude = magnitude_y + row = int(quadrant / magnitude_x) + column = quadrant % magnitude_x else: magnitude = 1 magnitude_x = magnitude From f94fcaa85c7c3c7c8c7d4ed9c190d629e778d3cf Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Wed, 13 Mar 2024 08:37:33 -0700 Subject: [PATCH 025/152] log the found fitted zoom percentage --- resize_frames.py | 2 -- slice_video.py | 2 -- video_remixer.py | 46 +++++++++++++++++++++++++++------------------- 3 files changed, 27 insertions(+), 23 deletions(-) diff --git a/resize_frames.py b/resize_frames.py index 62dc7892..4f45101c 100644 --- a/resize_frames.py +++ b/resize_frames.py @@ -119,7 +119,6 @@ def resize(self, type : str="png", params_fn : Callable | None=None, params_cont image = cv2.imread(file) if params_fn: - self.log(f"calling 'param_fn' to get parameters") scale_width, \ scale_height, \ crop_offset_x, \ @@ -151,7 +150,6 @@ def resize(self, type : str="png", params_fn : Callable | None=None, params_cont max_x = int(min_x + crop_width) max_y = int(min_y + crop_height) - self.log(f"cropping {file} with [{min_y}:{max_y}, {min_x}:{max_x}]") image = image[min_y:max_y, min_x:max_x] _, filename, ext = split_filepath(file) diff --git a/slice_video.py b/slice_video.py index a892f34f..d47f2a7e 100644 --- a/slice_video.py +++ b/slice_video.py @@ -197,7 +197,6 @@ def slice(self, ignore_errors=False): self.log(f"Creating output path {self.output_path}") create_directory(self.output_path) - self.log("using slice_video (may cause long delay while processing request)") pbar_desc = f"Slice {self.type}" errors = [] with Mtqdm().open_bar(total=len(group_names), desc=pbar_desc) as bar: @@ -216,7 +215,6 @@ def slice_group(self, group_name, ignore_errors=False): self.log(f"Creating output path {self.output_path}") create_directory(self.output_path) - self.log("using slice_video (may cause long delay while processing request)") pbar_desc = f"Slice {self.type}" errors = [] with Mtqdm().open_bar(total=1, desc=pbar_desc) as bar: diff --git a/video_remixer.py b/video_remixer.py index fdd26f1f..a237a72a 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -1723,30 +1723,32 @@ def get_animated_zoom(self, hint): return from_type, from_param1, from_param2, from_param3, to_type, to_param1, to_param2, to_param3 return None, None, None, None, None, None, None, None - def compute_zoom_type(self, type, param1, param2, param3, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): + def compute_zoom_type(self, type, param1, param2, param3, main_resize_w, main_resize_h, + main_offset_x, main_offset_y, main_crop_w, main_crop_h, log_fn): if type == self.COMBINED_ZOOM_HINT: quadrant, quadrants, zoom_percent = param1, param2, param3 if quadrant and quadrants and zoom_percent: return self.compute_combined_zoom(quadrant, quadrants, zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, - main_crop_w, main_crop_h) + main_crop_w, main_crop_h, log_fn=log_fn) elif type == self.QUADRANT_ZOOM_HINT: quadrant, quadrants = param1, param2 if quadrant and quadrants: return self.compute_quadrant_zoom(quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, - main_crop_w, main_crop_h) + main_crop_w, main_crop_h, log_fn=log_fn) elif type == self.PERCENT_ZOOM_HINT: zoom_percent = param3 if zoom_percent: return self.compute_percent_zoom(zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, - main_crop_w, main_crop_h) + main_crop_w, main_crop_h, log_fn=log_fn) - def compute_quadrant_zoom(self, quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): + def compute_quadrant_zoom(self, quadrant, quadrants, main_resize_w, main_resize_h, + main_offset_x, main_offset_y, main_crop_w, main_crop_h, log_fn): quadrant = int(quadrant) - 1 if self.QUADRANT_GRID_CHAR in quadrants: @@ -1817,7 +1819,8 @@ def compute_quadrant_zoom(self, quadrant, quadrants, main_resize_w, main_resize_ return resize_w, resize_h, center_x, center_y - def compute_percent_zoom(self, zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): + def compute_percent_zoom(self, zoom_percent, main_resize_w, main_resize_h, main_offset_x, + main_offset_y, main_crop_w, main_crop_h, log_fn): magnitude = zoom_percent / 100.0 # compute frame scaling @@ -1848,15 +1851,16 @@ def compute_percent_zoom(self, zoom_percent, main_resize_w, main_resize_h, main_ MAX_SELF_FIT_ZOOM = 1000 - def compute_combined_zoom(self, quadrant, quadrants, zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h): + def compute_combined_zoom(self, quadrant, quadrants, zoom_percent, main_resize_w, main_resize_h, + main_offset_x, main_offset_y, main_crop_w, main_crop_h, log_fn): resize_w, resize_h, _, _ = self.compute_percent_zoom(zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, - main_crop_w, main_crop_h) + main_crop_w, main_crop_h, log_fn) quadrant_resize_w, _, quadrant_center_x, quadrant_center_y = self.compute_quadrant_zoom(quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, - main_crop_w, main_crop_h) + main_crop_w, main_crop_h, log_fn) # scale the quadrant center point to the percent resize scale = resize_w / quadrant_resize_w @@ -1872,11 +1876,11 @@ def compute_combined_zoom(self, quadrant, quadrants, zoom_percent, main_resize_w resize_w, resize_h, _, _ = self.compute_percent_zoom(fit_zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, - main_crop_w, main_crop_h) + main_crop_w, main_crop_h, log_fn) quadrant_resize_w, _, quadrant_center_x, quadrant_center_y = self.compute_quadrant_zoom(quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, - main_crop_w, main_crop_h) + main_crop_w, main_crop_h, log_fn) # scale the quadrant center point to the percent resize # this seems to work on the left and middle but not on the right @@ -1886,8 +1890,12 @@ def compute_combined_zoom(self, quadrant, quadrants, zoom_percent, main_resize_w # if still out of bounds, restore to quadrant zoom if self.check_crop_bounds(resize_w, resize_h, center_x, center_y, main_crop_w, main_crop_h): + log_fn("Can't find fitting zoom percentage; ignoring percent part.") resize_w, resize_h, center_x, center_y = \ - self.compute_quadrant_zoom(quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, main_crop_w, main_crop_h) + self.compute_quadrant_zoom(quadrant, quadrants, main_resize_w, main_resize_h, + main_offset_x, main_offset_y, main_crop_w, main_crop_h, log_fn) + else: + log_fn(f"Found fitting zoom percentage: {fit_zoom_percent}%.") return resize_w, resize_h, center_x, center_y @@ -1900,19 +1908,19 @@ def check_crop_bounds(self, resize_w, resize_h, center_x, center_y, main_crop_w, def compute_animated_zoom(self, num_frames, from_type, from_param1, from_param2, from_param3, to_type, to_param1, to_param2, to_param3, main_resize_w, main_resize_h, main_offset_x, main_offset_y, - main_crop_w, main_crop_h): + main_crop_w, main_crop_h, log_fn): from_resize_w, from_resize_h, from_center_x, from_center_y = \ self.compute_zoom_type(from_type, from_param1, from_param2, from_param3, main_resize_w, main_resize_h, main_offset_x, main_offset_y, - main_crop_w, main_crop_h) + main_crop_w, main_crop_h, log_fn=log_fn) to_resize_w, to_resize_h, to_center_x, to_center_y = \ self.compute_zoom_type(to_type, to_param1, to_param2, to_param3, main_resize_w, main_resize_h, main_offset_x, main_offset_y, - main_crop_w, main_crop_h) + main_crop_w, main_crop_h, log_fn=log_fn) diff_resize_w = to_resize_w - from_resize_w diff_resize_h = to_resize_h - from_resize_h @@ -1995,7 +2003,7 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): from_type, from_param1, from_param2, from_param3, to_type, to_param1, to_param2, to_param3, main_resize_w, main_resize_h, main_offset_x, main_offset_y, - main_crop_w, main_crop_h) + main_crop_w, main_crop_h, log_fn) scale_type = remixer_settings["scale_type_up"] self.resize_scene(log_fn, @@ -2020,7 +2028,7 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): self.compute_combined_zoom(quadrant, quadrants, zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, - main_crop_w, main_crop_h) + main_crop_w, main_crop_h, log_fn) crop_offset_x = center_x - (main_crop_w / 2.0) crop_offset_y = center_y - (main_crop_h / 2.0) @@ -2049,7 +2057,7 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): self.compute_quadrant_zoom(quadrant, quadrants, main_resize_w, main_resize_h, main_offset_x, main_offset_y, - main_crop_w, main_crop_h) + main_crop_w, main_crop_h, log_fn) scale_type = remixer_settings["scale_type_up"] crop_offset_x = center_x - (main_crop_w / 2.0) @@ -2075,7 +2083,7 @@ def resize_scenes(self, log_fn, kept_scenes, remixer_settings): self.compute_percent_zoom(zoom_percent, main_resize_w, main_resize_h, main_offset_x, main_offset_y, - main_crop_w, main_crop_h) + main_crop_w, main_crop_h, log_fn) scale_type = remixer_settings["scale_type_up"] crop_offset_x = center_x - (main_crop_w / 2.0) crop_offset_y = center_y - (main_crop_h / 2.0) From cbc6cb95f67f11d280dfa44e62417b24b15a4257 Mon Sep 17 00:00:00 2001 From: Jerry Hogsett Date: Wed, 13 Mar 2024 13:15:28 -0700 Subject: [PATCH 026/152] wip --- tabs/video_remixer_ui.py | 17 +- video_remixer.py | 3661 ++++++++++++++++++------------------ video_remixer_processor.py | 1844 ++++++++++++++++++ 3 files changed, 3698 insertions(+), 1824 deletions(-) create mode 100644 video_remixer_processor.py diff --git a/tabs/video_remixer_ui.py b/tabs/video_remixer_ui.py index 18f4e9a2..04145117 100644 --- a/tabs/video_remixer_ui.py +++ b/tabs/video_remixer_ui.py @@ -2150,12 +2150,17 @@ def next_button5(self, inflate_option_changed, upscale_option_changed) - remix_report = self.state.process_remix(self.log, - kept_scenes, - self.config.remixer_settings, - self.engine, - self.config.engine_settings, - self.config.realesrgan_settings) + self.state.process_remix(self.log, + kept_scenes, + self.config.remixer_settings, + self.engine, + self.config.engine_settings, + self.config.realesrgan_settings) + + remix_report = self.generate_remix_report(self.processed_content_complete(self.RESIZE_STEP), + self.processed_content_complete(self.RESYNTH_STEP), + self.processed_content_complete(self.INFLATE_STEP), + self.processed_content_complete(self.UPSCALE_STEP)) styled_report = style_report("Content Ready for Remix Video:", remix_report, color="info") self.state.summary_info6 = styled_report diff --git a/video_remixer.py b/video_remixer.py index a237a72a..87fc7d25 100644 --- a/video_remixer.py +++ b/video_remixer.py @@ -741,6 +741,11 @@ def get_hint(self, scene_label, hint_type): return hints.get(hint_type) return None + RESIZE_HINT = "R" + RESYNTHESIS_HINT = "Y" + INFLATION_HINT = "I" + UPSCALE_HINT = "U" + def hint_present(self, hint_type): """return True if any kept scene has the passed hint type""" kept_scenes = self.kept_scenes() @@ -750,6 +755,18 @@ def hint_present(self, hint_type): return True return False + def resize_chosen(self): + return self.resize or self.hint_present(self.RESIZE_HINT) + + def resynthesize_chosen(self): + return self.resynthesize or self.hint_present(self.RESYNTHESIS_HINT) + + def inflate_chosen(self): + return self.inflate or self.hint_present(self.INFLATION_HINT) + + def upscale_chosen(self): + return self.upscale or self.hint_present(self.state.UPSCALE_HINT) + def set_scene_label(self, scene_index, scene_label): if scene_label: this_scene_name = self.scene_names[scene_index] @@ -1182,1327 +1199,1333 @@ def split_scene(self, log_fn, scene_index, split_percent, remixer_settings, glob return f"Scene split into new scenes {new_lower_scene_name} and {new_upper_scene_name}" - ## Main Processing ## - - RESIZE_STEP = "resize" - RESYNTH_STEP = "resynth" - INFLATE_STEP = "inflate" - UPSCALE_STEP = "upscale" - AUDIO_STEP = "audio" - VIDEO_STEP = "video" - - PURGED_CONTENT = "purged_content" - PURGED_DIR = "purged" - - def prepare_process_remix(self, redo_resynth, redo_inflate, redo_upscale): - self.setup_processing_paths() - - self.recompile_scenes() - - if self.processed_content_invalid: - self.purge_processed_content(purge_from=self.RESIZE_STEP) - self.processed_content_invalid = False - else: - self.purge_stale_processed_content(redo_resynth, redo_inflate, redo_upscale) - self.purge_incomplete_processed_content() - self.save() - - def process_remix(self, log_fn, kept_scenes, remixer_settings, engine, engine_settings, - realesrgan_settings): - if self.resize_needed(): - self.resize_scenes(log_fn, - kept_scenes, - remixer_settings) - - if self.resynthesize_needed(): - self.resynthesize_scenes(log_fn, - kept_scenes, - engine, - engine_settings, - self.resynth_option) - - if self.inflate_needed(): - self.inflate_scenes(log_fn, - kept_scenes, - engine, - engine_settings) - - if self.upscale_needed(): - self.upscale_scenes(log_fn, - kept_scenes, - realesrgan_settings, - remixer_settings) - - return self.generate_remix_report(self.processed_content_complete(self.RESIZE_STEP), - self.processed_content_complete(self.RESYNTH_STEP), - self.processed_content_complete(self.INFLATE_STEP), - self.processed_content_complete(self.UPSCALE_STEP)) - - def resize_chosen(self): - return self.resize or self.hint_present("R") - - def resize_needed(self): - return (self.resize and not self.processed_content_complete(self.RESIZE_STEP)) \ - or self.resize_chosen() - - def resynthesize_chosen(self): - return self.resynthesize or self.hint_present("Y") - - def resynthesize_needed(self): - return self.resynthesize_chosen() and not self.processed_content_complete(self.RESYNTH_STEP) - - def inflate_chosen(self): - return self.inflate or self.hint_present("I") - - def inflate_needed(self): - if self.inflate_chosen() and not self.processed_content_complete(self.INFLATE_STEP): - return True - - def upscale_chosen(self): - return self.upscale or self.hint_present("U") - - def upscale_needed(self): - return self.upscale_chosen() and not self.processed_content_complete(self.UPSCALE_STEP) - - def purge_paths(self, path_list : list, keep_original=False, purged_path=None, skip_empty_paths=False, additional_path=""): - """Purge a list of paths to the purged content directory - keep_original: True=don't remove original content when purging - purged_path: Used if calling multiple times to store purged content in the same purge directory - skip_empty_paths: True=don't purge directories that have no files inside - additional_path: If set, adds an additional segment onto the storage path (not returned) - Returns: Path to the purged content directory (not incl. additional_path) - """ - paths_to_purge = [] - for path in path_list: - if path and os.path.exists(path): - if not skip_empty_paths or directory_populated(path, files_only=True): - paths_to_purge.append(path) - if not paths_to_purge: - return None - - purged_root_path = os.path.join(self.project_path, self.PURGED_CONTENT) - create_directory(purged_root_path) - - if not purged_path: - purged_path, _ = AutoIncrementDirectory(purged_root_path).next_directory(self.PURGED_DIR) - - for path in paths_to_purge: - use_purged_path = os.path.join(purged_path, additional_path) - if keep_original: - _, last_path, _ = split_filepath(path) - copy_path = os.path.join(use_purged_path, last_path) - copy_files(path, copy_path) - else: - shutil.move(path, use_purged_path) - return purged_path - - def delete_purged_content(self): - purged_root_path = os.path.join(self.project_path, self.PURGED_CONTENT) - if os.path.exists(purged_root_path): - with Mtqdm().open_bar(total=1, desc="Deleting") as bar: - Mtqdm().message(bar, "Removing purged content - No ETA") - shutil.rmtree(purged_root_path) - Mtqdm().update_bar(bar) - return purged_root_path - else: - return None - - def delete_path(self, path): - if path and os.path.exists(path): - with Mtqdm().open_bar(total=1, desc="Deleting") as bar: - Mtqdm().message(bar, "Removing project content - No ETA") - shutil.rmtree(path) - Mtqdm().update_bar(bar) - return path - else: - return None - - def purge_processed_content(self, purge_from=RESIZE_STEP): - purge_paths = [self.resize_path, - self.resynthesis_path, - self.inflation_path, - self.upscale_path] - - if purge_from == self.RESIZE_STEP: - purge_paths = purge_paths[0:] - elif purge_from == self.RESYNTH_STEP: - purge_paths = purge_paths[1:] - elif purge_from == self.INFLATE_STEP: - purge_paths = purge_paths[2:] - elif purge_from == self.UPSCALE_STEP: - purge_paths = purge_paths[3:] - else: - raise RuntimeError(f"Unrecognized value {purge_from} passed to purge_processed_content()") - - purge_root = self.purge_paths(purge_paths) - self.clean_remix_content(purge_from="audio_clips", purge_root=purge_root) - return purge_root - - def clean_remix_content(self, purge_from, purge_root=None): - clean_paths = [self.audio_clips_path, - self.video_clips_path, - self.clips_path] - - # purge all of the paths, keeping the originals, for safekeeping ahead of reprocessing - purge_root = self.purge_paths(clean_paths, keep_original=True, purged_path=purge_root, - skip_empty_paths=True) - if purge_root: - self.copy_project_file(purge_root) - - if purge_from == "audio_clips": - clean_paths = clean_paths[0:] - self.audio_clips = [] - self.video_clips = [] - self.clips = [] - elif purge_from == "video_clips": - clean_paths = clean_paths[1:] - self.video_clips = [] - self.clips = [] - elif purge_from == "remix_clips": - clean_paths = clean_paths[2:] - self.clips = [] - - # clean directories as needed by purge_from - # audio wav files can be slow to extract, so they are carefully not cleaned unless needed - clean_directories(clean_paths) - return purge_root - - def clean_remix_audio(self): - clean_directories([self.audio_clips_path]) - - RESIZE_PATH = "SCENES-RC" - RESYNTH_PATH = "SCENES-RE" - INFLATE_PATH = "SCENES-IN" - UPSCALE_PATH = "SCENES-UP" - - def setup_processing_paths(self): - self.resize_path = os.path.join(self.project_path, self.RESIZE_PATH) - self.resynthesis_path = os.path.join(self.project_path, self.RESYNTH_PATH) - self.inflation_path = os.path.join(self.project_path, self.INFLATE_PATH) - self.upscale_path = os.path.join(self.project_path, self.UPSCALE_PATH) - - def _processed_content_complete(self, path, expected_dirs = 0, expected_files = 0): - if not path or not os.path.exists(path): - return False - if expected_dirs: - return len(get_directories(path)) == expected_dirs - if expected_files: - return len(get_files(path)) == expected_files - return True - - def processed_content_complete(self, processing_step): - expected_items = len(self.kept_scenes()) - if processing_step == self.RESIZE_STEP: - return self._processed_content_complete(self.resize_path, expected_dirs=expected_items) - elif processing_step == self.RESYNTH_STEP: - return self._processed_content_complete(self.resynthesis_path, expected_dirs=expected_items) - elif processing_step == self.INFLATE_STEP: - return self._processed_content_complete(self.inflation_path, expected_dirs=expected_items) - elif processing_step == self.UPSCALE_STEP: - return self._processed_content_complete(self.upscale_path, expected_dirs=expected_items) - elif processing_step == self.AUDIO_STEP: - return self._processed_content_complete(self.audio_clips_path, expected_files=expected_items) - elif processing_step == self.VIDEO_STEP: - return self._processed_content_complete(self.video_clips_path, expected_files=expected_items) - else: - raise RuntimeError(f"'processing_step' {processing_step} is unrecognized") - - # processed content is stale if it is not selected and exists - def processed_content_stale(self, selected : bool, path : str): - if selected: - return False - if not os.path.exists(path): - return False - contents = get_directories(path) - content_present = len(contents) > 0 - return content_present - - # content is stale if it is present on disk but not currently selected - # stale content and its derivative content should be purged - def purge_stale_processed_content(self, purge_resynth, purge_inflation, purge_upscale): - if self.processed_content_stale(self.resize_chosen(), self.resize_path): - self.purge_processed_content(purge_from=self.RESIZE_STEP) - - if self.processed_content_stale(self.resynthesize_chosen(), self.resynthesis_path) or purge_resynth: - self.purge_processed_content(purge_from=self.RESYNTH_STEP) - - if self.processed_content_stale(self.inflate_chosen(), self.inflation_path) or purge_inflation: - self.purge_processed_content(purge_from=self.INFLATE_STEP) - - if self.processed_content_stale(self.upscale_chosen(), self.upscale_path) or purge_upscale: - self.purge_processed_content(purge_from=self.UPSCALE_STEP) - - def purge_incomplete_processed_content(self): - # content is incomplete if the wrong number of scene directories are present - # if it is currently selected and incomplete, it should be purged - if self.resize_chosen() and not self.processed_content_complete(self.RESIZE_STEP): - self.purge_processed_content(purge_from=self.RESIZE_STEP) - - if self.resynthesize_chosen() and not self.processed_content_complete(self.RESYNTH_STEP): - self.purge_processed_content(purge_from=self.RESYNTH_STEP) - - if self.inflate_chosen() and not self.processed_content_complete(self.INFLATE_STEP): - self.purge_processed_content(purge_from=self.INFLATE_STEP) - - if self.upscale_chosen() and not self.processed_content_complete(self.UPSCALE_STEP): - self.purge_processed_content(purge_from=self.UPSCALE_STEP) - - def scenes_source_path(self, processing_step): - processing_path = self.scenes_path - - if processing_step == self.RESIZE_STEP: - # resize is the first processing step and always draws from the scenes path - pass - - elif processing_step == self.RESYNTH_STEP: - # resynthesis is the second processing step - if self.resize_chosen(): - # if resize is enabled, draw from the resized scenes path - processing_path = self.resize_path - - elif processing_step == self.INFLATE_STEP: - # inflation is the third processing step - if self.resynthesize_chosen(): - # if resynthesis is enabled, draw from the resyntheized scenes path - processing_path = self.resynthesis_path - elif self.resize_chosen(): - # if resize is enabled, draw from the resized scenes path - processing_path = self.resize_path - - elif processing_step == self.UPSCALE_STEP: - # upscaling is the fourth processing step - if self.inflate_chosen(): - # if inflation is enabled, draw from the inflation path - processing_path = self.inflation_path - elif self.resynthesize_chosen(): - # if resynthesis is enabled, draw from the resyntheized scenes path - processing_path = self.resynthesis_path - elif self.resize_chosen(): - # if resize is enabled, draw from the resized scenes path - processing_path = self.resize_path - - return processing_path - - def get_resize_params(self, resize_w, resize_h, crop_w, crop_h, content_width, content_height, remixer_settings): - if resize_w == content_width and resize_h == content_height: - scale_type = "none" - else: - if resize_w <= content_width and resize_h <= content_height: - # use the down scaling type if there are only reductions - # the default "area" type preserves details better on reducing - scale_type = remixer_settings["scale_type_down"] - else: - # otherwise use the upscaling type - # the default "lanczos" type preserves details better on enlarging - scale_type = remixer_settings["scale_type_up"] - - if crop_w == resize_w and crop_h == resize_h: - # disable cropping if none to do - crop_type = "none" - elif crop_w > resize_w or crop_h > resize_h: - # disable cropping if it will wrap/is invalid - # TODO put bounds on the crop parameters instead of disabling - crop_type = "none" - else: - crop_type = "crop" - return scale_type, crop_type - - def prepare_save_remix(self, log_fn, global_options, remixer_settings, output_filepath : str, - invalidate_video_clips=True): - if not output_filepath: - raise ValueError("Enter a path for the remixed video to proceed") - - self.recompile_scenes() - - kept_scenes = self.kept_scenes() - if not kept_scenes: - raise ValueError("No kept scenes were found") - - self.drop_empty_processed_scenes(kept_scenes) - self.save() - - # get this again in case scenes have been auto-dropped - kept_scenes = self.kept_scenes() - if not kept_scenes: - raise ValueError("No kept scenes after removing empties") - - # create audio clips only if they do not already exist - # this depends on the audio clips being purged at the time the scene selection are compiled - if self.video_details["has_audio"] and not self.processed_content_complete( - self.AUDIO_STEP): - audio_format = remixer_settings["audio_format"] - self.create_audio_clips(log_fn, global_options, audio_format=audio_format) - self.save() - - # leave video clips if they are complete since we may be only making audio changes - if invalidate_video_clips or not self.processed_content_complete(self.VIDEO_STEP): - self.clean_remix_content(purge_from="video_clips") - else: - # always recreate remix clips - self.clean_remix_content(purge_from="remix_clips") - - return kept_scenes - - def save_remix(self, log_fn, global_options, kept_scenes): - # leave video clips if they are complete since we may be only making audio changes - if not self.processed_content_complete(self.VIDEO_STEP): - self.create_video_clips(log_fn, kept_scenes, global_options) - self.save() - - self.create_scene_clips(log_fn, kept_scenes, global_options) - self.save() - - if not self.clips: - raise ValueError("No processed video clips were found") - - ffcmd = self.create_remix_video(log_fn, global_options, self.output_filepath) - log_fn(f"FFmpeg command: {ffcmd}") - self.save() - - def save_custom_remix(self, - log_fn, - output_filepath, - global_options, - kept_scenes, - custom_video_options, - custom_audio_options, - draw_text_options=None, - use_scene_sorting=True): - _, _, output_ext = split_filepath(output_filepath) - output_ext = output_ext[1:] - - # leave video clips if they are complete since we may be only making audio changes - if not self.processed_content_complete(self.VIDEO_STEP): - self.create_custom_video_clips(log_fn, kept_scenes, global_options, - custom_video_options=custom_video_options, - custom_ext=output_ext, - draw_text_options=draw_text_options) - self.save() - - self.create_custom_scene_clips(kept_scenes, global_options, - custom_audio_options=custom_audio_options, - custom_ext=output_ext) - self.save() - - if not self.clips: - raise ValueError("No processed video clips were found") - - ffcmd = self.create_remix_video(log_fn, global_options, output_filepath, - use_scene_sorting=use_scene_sorting) - log_fn(f"FFmpeg command: {ffcmd}") - self.save() - - def resize_scene(self, - log_fn, - scene_input_path, - scene_output_path, - resize_w, - resize_h, - crop_w, - crop_h, - crop_offset_x, - crop_offset_y, - scale_type, - crop_type, - params_fn : Callable | None = None, - params_context : any=None): - - ResizeFrames(scene_input_path, - scene_output_path, - resize_w, - resize_h, - scale_type, - log_fn, - crop_type=crop_type, - crop_width=crop_w, - crop_height=crop_h, - crop_offset_x=crop_offset_x, - crop_offset_y=crop_offset_y).resize(type=self.frame_format, params_fn=params_fn, - params_context=params_context) - - def setup_resize_hint(self, content_width, content_height): - # use the main resize/crop settings if resizing, or the content native - # dimensions if not, as a foundation for handling resize hints - if self.resize: - main_resize_w = self.resize_w - main_resize_h = self.resize_h - main_crop_w = self.crop_w - main_crop_h = self.crop_h - if self.crop_offset_x < 0: - main_offset_x = (main_resize_w - main_crop_w) / 2.0 - else: - main_offset_x = self.crop_offset_x - if self.crop_offset_y < 0: - main_offset_y = (main_resize_h - main_crop_h) / 2.0 - else: - main_offset_y = self.crop_offset_y - else: - main_resize_w = content_width - main_resize_h = content_height - main_crop_w = content_width - main_crop_h = content_height - main_offset_x = 0 - main_offset_y = 0 - return main_resize_w, main_resize_h, main_crop_w, main_crop_h, main_offset_x, main_offset_y - - QUADRANT_ZOOM_HINT = "/" - QUADRANT_GRID_CHAR = "X" - PERCENT_ZOOM_HINT = "%" - COMBINED_ZOOM_HINT = "@" - ANIMATED_ZOOM_HINT = "-" - QUADRANT_ZOOM_MIN_LEN = 3 # 1/3 - PERCENT_ZOOM_MIN_LEN = 4 # 123% - COMBINED_ZOOM_MIN_LEN = 8 # 1/1@100% - ANIMATED_ZOOM_MIN_LEN = 7 # 1/3-5/7 - - def get_quadrant_zoom(self, hint): - if self.QUADRANT_ZOOM_HINT in hint: - if len(hint) >= self.QUADRANT_ZOOM_MIN_LEN: - split_pos = hint.index(self.QUADRANT_ZOOM_HINT) - quadrant = hint[:split_pos] - quadrants = hint[split_pos+1:] - else: - quadrant, quadrants = 1, 1 - - return quadrant, quadrants - else: - return None, None - - def get_percent_zoom(self, hint): - if self.PERCENT_ZOOM_HINT in hint: - if len(hint) >= self.PERCENT_ZOOM_MIN_LEN: - zoom_percent = int(hint.replace(self.PERCENT_ZOOM_HINT, "")) - if zoom_percent >= 100: - return zoom_percent - return 100 - else: - return None - - def get_zoom_part(self, hint): - if self.COMBINED_ZOOM_HINT in hint and len(hint) >= self.COMBINED_ZOOM_MIN_LEN: - type = self.COMBINED_ZOOM_HINT - quadrant, quadrants, zoom_percent = self.get_combined_zoom(hint) - return type, quadrant, quadrants, zoom_percent - if self.QUADRANT_ZOOM_HINT in hint and len(hint) >= self.QUADRANT_ZOOM_MIN_LEN: - type = self.QUADRANT_ZOOM_HINT - quadrant, quadrants = self.get_quadrant_zoom(hint) - return type, quadrant, quadrants, None - elif self.PERCENT_ZOOM_HINT in hint and len(hint) >= self.PERCENT_ZOOM_MIN_LEN: - type = self.PERCENT_ZOOM_HINT - self.get_percent_zoom(hint) - zoom_percent = self.get_percent_zoom(hint) - return type, None, None, zoom_percent - return None, None, None, None - - def get_combined_zoom(self, hint): - if self.COMBINED_ZOOM_HINT in hint: - if len(hint) >= self.COMBINED_ZOOM_MIN_LEN: - split_pos = hint.index(self.COMBINED_ZOOM_HINT) - hint_a = hint[:split_pos] - hint_b = hint[split_pos+1:] - a_type, a_quadrant, a_quadrants, a_zoom_percent = self.get_zoom_part(hint_a) - b_type, b_quadrant, b_quadrants, b_zoom_percent = self.get_zoom_part(hint_b) - if a_type == self.PERCENT_ZOOM_HINT and b_type == self.QUADRANT_ZOOM_HINT: - zoom_percent = a_zoom_percent - quadrant, quadrants = b_quadrant, b_quadrants - elif a_type == self.QUADRANT_ZOOM_HINT and b_type == self.PERCENT_ZOOM_HINT: - zoom_percent = b_zoom_percent - quadrant, quadrants = a_quadrant, a_quadrants - return quadrant, quadrants, zoom_percent - return None, None, None - - def get_animated_zoom(self, hint): - if self.ANIMATED_ZOOM_HINT in hint: - if len(hint) >= self.ANIMATED_ZOOM_MIN_LEN: - split_pos = hint.index(self.ANIMATED_ZOOM_HINT) - hint_from = hint[:split_pos] - hint_to = hint[split_pos+1:] - from_type, from_param1, from_param2, from_param3 = self.get_zoom_part(hint_from) - to_type, to_param1, to_param2, to_param3 = self.get_zoom_part(hint_to) - if from_type and to_type: - return from_type, from_param1, from_param2, from_param3, to_type, to_param1, to_param2, to_param3 - return None, None, None, None, None, None, None, None - - def compute_zoom_type(self, type, param1, param2, param3, main_resize_w, main_resize_h, - main_offset_x, main_offset_y, main_crop_w, main_crop_h, log_fn): - if type == self.COMBINED_ZOOM_HINT: - quadrant, quadrants, zoom_percent = param1, param2, param3 - if quadrant and quadrants and zoom_percent: - return self.compute_combined_zoom(quadrant, quadrants, zoom_percent, - main_resize_w, main_resize_h, - main_offset_x, main_offset_y, - main_crop_w, main_crop_h, log_fn=log_fn) - elif type == self.QUADRANT_ZOOM_HINT: - quadrant, quadrants = param1, param2 - if quadrant and quadrants: - return self.compute_quadrant_zoom(quadrant, quadrants, - main_resize_w, main_resize_h, - main_offset_x, main_offset_y, - main_crop_w, main_crop_h, log_fn=log_fn) - elif type == self.PERCENT_ZOOM_HINT: - zoom_percent = param3 - if zoom_percent: - return self.compute_percent_zoom(zoom_percent, - main_resize_w, main_resize_h, - main_offset_x, main_offset_y, - main_crop_w, main_crop_h, log_fn=log_fn) - - def compute_quadrant_zoom(self, quadrant, quadrants, main_resize_w, main_resize_h, - main_offset_x, main_offset_y, main_crop_w, main_crop_h, log_fn): - quadrant = int(quadrant) - 1 - - if self.QUADRANT_GRID_CHAR in quadrants: - parts = quadrants.split(self.QUADRANT_GRID_CHAR) - if len(parts) == 2: - grid_x = int(parts[0]) - grid_y = int(parts[1]) - magnitude_x = grid_x - magnitude_y = grid_y - - if magnitude_x >= magnitude_y: - magnitude = magnitude_x - row = int(quadrant / magnitude_x) - column = quadrant % magnitude_x - else: - magnitude = magnitude_y - row = int(quadrant / magnitude_x) - column = quadrant % magnitude_x - else: - magnitude = 1 - magnitude_x = magnitude - magnitude_y = magnitude - row = 0 - column = 0 - else: - magnitude = int(math.sqrt(int(quadrants))) - magnitude_x = magnitude - magnitude_y = magnitude - row = int(quadrant / magnitude) - column = quadrant % magnitude - - # compute frame scaling - resize_w = main_resize_w * magnitude - resize_h = main_resize_h * magnitude - - # compute crop area scaling - crop_w = main_crop_w * magnitude - crop_h = main_crop_h * magnitude - - # if the main crop offset is negative, auto-center it within the frame - # otherwise scale up the specific offset - offset_x, offset_y = 0, 0 - if main_offset_x < 0: - offset_x = (resize_w - crop_w) / 2.0 - else: - offset_x = main_offset_x * magnitude - if main_offset_y < 0: - offset_y = (resize_h - crop_h) / 2.0 - else: - offset_y = main_offset_y * magnitude - - # compute the dimensions of one grid cell given the crop and magnitude(s) - cell_width = crop_w / magnitude_x - cell_height = crop_h / magnitude_y - - # compute the upper left corner of the grid cell given the cell dimensions, - # and row, column; unadjusted for main crop offset - cell_offset_x = column * cell_width - cell_offset_y = row * cell_height - - # add the main offset - cell_offset_x += offset_x - cell_offset_y += offset_y - - # compute the center point - center_x = cell_offset_x + (cell_width / 2.0) - center_y = cell_offset_y + (cell_height / 2.0) - - return resize_w, resize_h, center_x, center_y - - def compute_percent_zoom(self, zoom_percent, main_resize_w, main_resize_h, main_offset_x, - main_offset_y, main_crop_w, main_crop_h, log_fn): - magnitude = zoom_percent / 100.0 - - # compute frame scaling - resize_w = main_resize_w * magnitude - resize_h = main_resize_h * magnitude - - # compute crop area scaling - crop_w = main_crop_w * magnitude - crop_h = main_crop_h * magnitude - - # if the main crop offset is negative, auto-center it within the frame - # otherwise scale up the specific offset - offset_x, offset_y = 0, 0 - if main_offset_x < 0: - offset_x = (resize_w - crop_w) / 2.0 - else: - offset_x = main_offset_x * magnitude - if main_offset_y < 0: - offset_y = (resize_h - crop_h) / 2.0 - else: - offset_y = main_offset_y * magnitude - - # compute the centerpoint of the scaled crop area - center_x = (crop_w / 2.0) + offset_x - center_y = (crop_h / 2.0) + offset_y - - return resize_w, resize_h, center_x, center_y - - MAX_SELF_FIT_ZOOM = 1000 - - def compute_combined_zoom(self, quadrant, quadrants, zoom_percent, main_resize_w, main_resize_h, - main_offset_x, main_offset_y, main_crop_w, main_crop_h, log_fn): - resize_w, resize_h, _, _ = self.compute_percent_zoom(zoom_percent, - main_resize_w, main_resize_h, - main_offset_x, main_offset_y, - main_crop_w, main_crop_h, log_fn) - quadrant_resize_w, _, quadrant_center_x, quadrant_center_y = self.compute_quadrant_zoom(quadrant, quadrants, - main_resize_w, main_resize_h, - main_offset_x, main_offset_y, - main_crop_w, main_crop_h, log_fn) - - # scale the quadrant center point to the percent resize - scale = resize_w / quadrant_resize_w - center_x = quadrant_center_x * scale - center_y = quadrant_center_y * scale - - if self.check_crop_bounds(resize_w, resize_h, center_x, center_y, main_crop_w, main_crop_h): - # fit the requested zoom percent to be in bounds - fit_zoom_percent = zoom_percent - while fit_zoom_percent < self.MAX_SELF_FIT_ZOOM and \ - self.check_crop_bounds(resize_w, resize_h, center_x, center_y, main_crop_w, main_crop_h): - fit_zoom_percent += 1 - resize_w, resize_h, _, _ = self.compute_percent_zoom(fit_zoom_percent, - main_resize_w, main_resize_h, - main_offset_x, main_offset_y, - main_crop_w, main_crop_h, log_fn) - quadrant_resize_w, _, quadrant_center_x, quadrant_center_y = self.compute_quadrant_zoom(quadrant, quadrants, - main_resize_w, main_resize_h, - main_offset_x, main_offset_y, - main_crop_w, main_crop_h, log_fn) - - # scale the quadrant center point to the percent resize - # this seems to work on the left and middle but not on the right - scale = resize_w / quadrant_resize_w - center_x = quadrant_center_x * scale - center_y = quadrant_center_y * scale - - # if still out of bounds, restore to quadrant zoom - if self.check_crop_bounds(resize_w, resize_h, center_x, center_y, main_crop_w, main_crop_h): - log_fn("Can't find fitting zoom percentage; ignoring percent part.") - resize_w, resize_h, center_x, center_y = \ - self.compute_quadrant_zoom(quadrant, quadrants, main_resize_w, main_resize_h, - main_offset_x, main_offset_y, main_crop_w, main_crop_h, log_fn) - else: - log_fn(f"Found fitting zoom percentage: {fit_zoom_percent}%.") - - return resize_w, resize_h, center_x, center_y - - def check_crop_bounds(self, resize_w, resize_h, center_x, center_y, main_crop_w, main_crop_h): - crop_offset_x = center_x - (main_crop_w / 2.0) - crop_offset_y = center_y - (main_crop_h / 2.0) - return crop_offset_x < 0 or crop_offset_x + main_crop_w > resize_w \ - or crop_offset_y < 0 or crop_offset_y + main_crop_h > resize_h - - def compute_animated_zoom(self, num_frames, from_type, from_param1, from_param2, from_param3, - to_type, to_param1, to_param2, to_param3, - main_resize_w, main_resize_h, main_offset_x, main_offset_y, - main_crop_w, main_crop_h, log_fn): - - from_resize_w, from_resize_h, from_center_x, from_center_y = \ - self.compute_zoom_type(from_type, from_param1, from_param2, from_param3, - main_resize_w, main_resize_h, - main_offset_x, main_offset_y, - main_crop_w, main_crop_h, log_fn=log_fn) - - to_resize_w, to_resize_h, to_center_x, to_center_y = \ - self.compute_zoom_type(to_type, to_param1, to_param2, to_param3, - main_resize_w, main_resize_h, - main_offset_x, main_offset_y, - main_crop_w, main_crop_h, log_fn=log_fn) - - diff_resize_w = to_resize_w - from_resize_w - diff_resize_h = to_resize_h - from_resize_h - diff_center_x = to_center_x - from_center_x - diff_center_y = to_center_y - from_center_y - - # ensure the final transition occurs - num_frames -= 1 - - step_resize_w = diff_resize_w / num_frames - step_resize_h = diff_resize_h / num_frames - step_center_x = diff_center_x / num_frames - step_center_y = diff_center_y / num_frames - - context = {} - context["from_resize_w"] = from_resize_w - context["from_resize_h"] = from_resize_h - context["from_center_x"] = from_center_x - context["from_center_y"] = from_center_y - context["step_resize_w"] = step_resize_w - context["step_resize_h"] = step_resize_h - context["step_center_x"] = step_center_x - context["step_center_y"] = step_center_y - context["main_crop_w"] = main_crop_w - context["main_crop_h"] = main_crop_h - return context - - def _resize_frame_param(self, index, context): - from_resize_w = context["from_resize_w"] - from_resize_h = context["from_resize_h"] - from_center_x = context["from_center_x"] - from_center_y = context["from_center_y"] - step_resize_w = context["step_resize_w"] - step_resize_h = context["step_resize_h"] - step_center_x = context["step_center_x"] - step_center_y = context["step_center_y"] - main_crop_w = context["main_crop_w"] - main_crop_h = context["main_crop_h"] - - resize_w = from_resize_w + (index * step_resize_w) - resize_h = from_resize_h + (index * step_resize_h) - center_x = from_center_x + (index * step_center_x) - center_y = from_center_y + (index * step_center_y) - crop_offset_x = center_x - (main_crop_w / 2.0) - crop_offset_y = center_y - (main_crop_h / 2.0) - - return int(resize_w), int(resize_h), int(crop_offset_x), int(crop_offset_y) - - def resize_scenes(self, log_fn, kept_scenes, remixer_settings): - scenes_base_path = self.scenes_source_path(self.RESIZE_STEP) - create_directory(self.resize_path) - - content_width = self.video_details["content_width"] - content_height = self.video_details["content_height"] - scale_type, crop_type= self.get_resize_params(self.resize_w, self.resize_h, self.crop_w, - self.crop_h, content_width, content_height, - remixer_settings) - - with Mtqdm().open_bar(total=len(kept_scenes), desc="Resize") as bar: - for scene_name in kept_scenes: - scene_input_path = os.path.join(scenes_base_path, scene_name) - scene_output_path = os.path.join(self.resize_path, scene_name) - create_directory(scene_output_path) - - resize_handled = False - resize_hint = self.get_hint(self.scene_labels.get(scene_name), "R") - if resize_hint: - main_resize_w, main_resize_h, main_crop_w, main_crop_h, main_offset_x, \ - main_offset_y = self.setup_resize_hint(content_width, content_height) - - try: - if self.ANIMATED_ZOOM_HINT in resize_hint: - # interprent 'any-any' as animating from one to the other zoom factor - from_type, from_param1, from_param2, from_param3, to_type, to_param1, to_param2, to_param3 = \ - self.get_animated_zoom(resize_hint) - if from_type and to_type: - first_frame, last_frame, _ = details_from_group_name(scene_name) - num_frames = (last_frame - first_frame) + 1 - context = self.compute_animated_zoom(num_frames, - from_type, from_param1, from_param2, from_param3, - to_type, to_param1, to_param2, to_param3, - main_resize_w, main_resize_h, main_offset_x, main_offset_y, - main_crop_w, main_crop_h, log_fn) - - scale_type = remixer_settings["scale_type_up"] - self.resize_scene(log_fn, - scene_input_path, - scene_output_path, - None, - None, - main_crop_w, - main_crop_h, - None, - None, - scale_type, - crop_type="crop", - params_fn=self._resize_frame_param, - params_context=context) - resize_handled = True - - elif self.COMBINED_ZOOM_HINT in resize_hint: - quadrant, quadrants, zoom_percent = self.get_combined_zoom(resize_hint) - if quadrant and quadrants and zoom_percent: - resize_w, resize_h, center_x, center_y = \ - self.compute_combined_zoom(quadrant, quadrants, zoom_percent, - main_resize_w, main_resize_h, - main_offset_x, main_offset_y, - main_crop_w, main_crop_h, log_fn) - - crop_offset_x = center_x - (main_crop_w / 2.0) - crop_offset_y = center_y - (main_crop_h / 2.0) - - scale_type = remixer_settings["scale_type_up"] - self.resize_scene(log_fn, - scene_input_path, - scene_output_path, - int(resize_w), - int(resize_h), - int(main_crop_w), - int(main_crop_h), - int(crop_offset_x), - int(crop_offset_y), - scale_type, - crop_type="crop") - resize_handled = True - - elif self.QUADRANT_ZOOM_HINT in resize_hint: - # interpret 'x/y' as x: quadrant, y: square-based number of quadrants - # '5/9' and '13/25' would be the center squares of 3x3 and 5x5 grids - # zoomed in at 300% and 500% - quadrant, quadrants = self.get_quadrant_zoom(resize_hint) - if quadrant and quadrants: - resize_w, resize_h, center_x, center_y = \ - self.compute_quadrant_zoom(quadrant, quadrants, - main_resize_w, main_resize_h, - main_offset_x, main_offset_y, - main_crop_w, main_crop_h, log_fn) - - scale_type = remixer_settings["scale_type_up"] - crop_offset_x = center_x - (main_crop_w / 2.0) - crop_offset_y = center_y - (main_crop_h / 2.0) - self.resize_scene(log_fn, - scene_input_path, - scene_output_path, - int(resize_w), - int(resize_h), - int(main_crop_w), - int(main_crop_h), - int(crop_offset_x), - int(crop_offset_y), - scale_type, - crop_type="crop") - resize_handled = True - - elif self.PERCENT_ZOOM_HINT in resize_hint: - # interpret z% as zoom percent to zoom into center - zoom_percent = self.get_percent_zoom(resize_hint) - if zoom_percent: - resize_w, resize_h, center_x, center_y = \ - self.compute_percent_zoom(zoom_percent, - main_resize_w, main_resize_h, - main_offset_x, main_offset_y, - main_crop_w, main_crop_h, log_fn) - scale_type = remixer_settings["scale_type_up"] - crop_offset_x = center_x - (main_crop_w / 2.0) - crop_offset_y = center_y - (main_crop_h / 2.0) - self.resize_scene(log_fn, - scene_input_path, - scene_output_path, - int(resize_w), - int(resize_h), - int(main_crop_w), - int(main_crop_h), - int(crop_offset_x), - int(crop_offset_y), - scale_type, - crop_type="crop") - resize_handled = True - except Exception as error: - # TODO - print(error) - raise - log_fn( -f"Error in resize_scenes() handling processing hint {resize_hint} - skipping processing: {error}") - resize_handled = False - - if not resize_handled: - self.resize_scene(log_fn, - scene_input_path, - scene_output_path, - int(self.resize_w), - int(self.resize_h), - int(self.crop_w), - int(self.crop_h), - int(self.crop_offset_x), - int(self.crop_offset_y), - scale_type, - crop_type) - - Mtqdm().update_bar(bar) - - # TODO dry up this code with same in resynthesize_video_ui - maybe a specific resynth script - def one_pass_resynthesis(self, log_fn, input_path, output_path, output_basename, - engine : InterpolateSeries): - file_list = sorted(get_files(input_path, extension=self.frame_format)) - log_fn(f"beginning series of frame recreations at {output_path}") - engine.interpolate_series(file_list, output_path, 1, "interframe", offset=2, - type=self.frame_format) - - log_fn(f"auto-resequencing recreated frames at {output_path}") - ResequenceFiles(output_path, - self.frame_format, - "resynthesized_frame", - 1, 1, # start, step - 1, 0, # stride, offset - -1, # auto-zero fill - True, # rename - log_fn).resequence() - - def two_pass_resynth_pass(self, log_fn, input_path, output_path, output_basename, - engine : InterpolateSeries): - file_list = sorted(get_files(input_path, extension=self.frame_format)) - - inflated_frames = os.path.join(output_path, "inflated_frames") - log_fn(f"beginning series of interframe recreations at {inflated_frames}") - create_directory(inflated_frames) - engine.interpolate_series(file_list, inflated_frames, 1, "interframe", - type=self.frame_format) - - log_fn(f"selecting odd interframes only at {inflated_frames}") - ResequenceFiles(inflated_frames, - self.frame_format, - output_basename, - 1, 1, # start, step - 2, 1, # stride, offset - -1, # auto-zero fill - False, # rename - log_fn, - output_path=output_path).resequence() - remove_directories([inflated_frames]) - - def two_pass_resynthesis(self, log_fn, input_path, output_path, output_basename, engine, one_pass_only=False): - passes = 1 if one_pass_only else 2 - with Mtqdm().open_bar(total=passes, desc="Two-Pass Resynthesis") as bar: - if not one_pass_only: - interframes = os.path.join(output_path, "interframes") - create_directory(interframes) - self.two_pass_resynth_pass(log_fn, input_path, interframes, "odd_interframe", engine) - input_path = interframes - - self.two_pass_resynth_pass(log_fn, input_path, output_path, output_basename, engine) - - if not one_pass_only: - remove_directories([interframes]) - - def resynthesize_scenes(self, log_fn, kept_scenes, engine, engine_settings, resynth_option): - interpolater = Interpolate(engine.model, log_fn) - use_time_step = engine_settings["use_time_step"] - deep_interpolater = DeepInterpolate(interpolater, use_time_step, log_fn) - series_interpolater = InterpolateSeries(deep_interpolater, log_fn) - output_basename = "resynthesized_frames" - - scenes_base_path = self.scenes_source_path(self.RESYNTH_STEP) - create_directory(self.resynthesis_path) - - with Mtqdm().open_bar(total=len(kept_scenes), desc="Resynthesize") as bar: - for scene_name in kept_scenes: - scene_input_path = os.path.join(scenes_base_path, scene_name) - scene_output_path = os.path.join(self.resynthesis_path, scene_name) - create_directory(scene_output_path) - - resynth_type = resynth_option if self.resynthesize else None - resynth_hint = self.get_hint(self.scene_labels.get(scene_name), "Y") - if resynth_hint: - if "C" in resynth_hint: - resynth_type = "Clean" - elif "S" in resynth_hint: - resynth_type = "Scrub" - elif "R" in resynth_hint: - resynth_type = "Replace" - elif "N" in resynth_hint: - resynth_type = None - - if resynth_type == "Replace": - self.one_pass_resynthesis(log_fn, scene_input_path, scene_output_path, - output_basename, series_interpolater) - elif resynth_type == "Clean" or resynth_type == "Scrub": - one_pass_only = resynth_type == "Clean" - self.two_pass_resynthesis(log_fn, scene_input_path, scene_output_path, - output_basename, series_interpolater, - one_pass_only=one_pass_only) - else: - # no need to resynthesize so just copy the files using the resequencer - ResequenceFiles(scene_input_path, - self.frame_format, - "resynthesized_frame", - 1, 1, - 1, 0, - -1, - False, - log_fn, - output_path=scene_output_path).resequence() - - Mtqdm().update_bar(bar) - - def inflate_scenes(self, log_fn, kept_scenes, engine, engine_settings): - interpolater = Interpolate(engine.model, log_fn) - use_time_step = engine_settings["use_time_step"] - deep_interpolater = DeepInterpolate(interpolater, use_time_step, log_fn) - series_interpolater = InterpolateSeries(deep_interpolater, log_fn) - - scenes_base_path = self.scenes_source_path(self.INFLATE_STEP) - create_directory(self.inflation_path) - - with Mtqdm().open_bar(total=len(kept_scenes), desc="Inflate") as bar: - for scene_name in kept_scenes: - scene_input_path = os.path.join(scenes_base_path, scene_name) - scene_output_path = os.path.join(self.inflation_path, scene_name) - create_directory(scene_output_path) - - num_splits = 0 - disable_inflation = False - - project_splits = 0 - if self.inflate: - if self.inflate_by_option == "1X": - project_splits = 0 - if self.inflate_by_option == "2X": - project_splits = 1 - elif self.inflate_by_option == "4X": - project_splits = 2 - elif self.inflate_by_option == "8X": - project_splits = 3 - elif self.inflate_by_option == "16X": - project_splits = 4 - - # if it's for slow motion, the split should be relative to the - # project inflation rate - - hinted_splits = 0 - force_inflation, force_audio, force_inflate_by, force_silent =\ - self.compute_forced_inflation(scene_name) - if force_inflation: - if force_inflate_by == "1X": - disable_inflation = True - elif force_inflate_by == "2X": - hinted_splits = 1 - elif force_inflate_by == "4X": - hinted_splits = 2 - elif force_inflate_by == "8X": - hinted_splits = 3 - elif force_inflate_by == "16X": - hinted_splits = 4 - - if hinted_splits: - if force_audio or force_silent: - # the figures for audio slow motion are relative to the project split rate - # splits are really exponents of 2^n - num_splits = project_splits + hinted_splits - else: - # if not for slow motion, force an exact split - num_splits = hinted_splits - else: - num_splits = 0 if disable_inflation else project_splits - - if num_splits: - # the scene needs inflating - output_basename = "interpolated_frames" - file_list = sorted(get_files(scene_input_path, extension=self.frame_format)) - series_interpolater.interpolate_series(file_list, - scene_output_path, - num_splits, - output_basename, - type=self.frame_format) - ResequenceFiles(scene_output_path, - self.frame_format, - "inflated_frame", - 1, 1, - 1, 0, - -1, - True, - log_fn).resequence() - else: - # no need to inflate so just copy the files using the resequencer - ResequenceFiles(scene_input_path, - self.frame_format, - "inflated_frame", - 1, 1, - 1, 0, - -1, - False, - log_fn, - output_path=scene_output_path).resequence() - - Mtqdm().update_bar(bar) - - def get_upscaler(self, log_fn, realesrgan_settings, remixer_settings): - model_name = realesrgan_settings["model_name"] - gpu_ids = realesrgan_settings["gpu_ids"] - fp32 = realesrgan_settings["fp32"] - - # determine if cropped image size is above memory threshold requiring tiling - use_tiling_over = remixer_settings["use_tiling_over"] - size = self.crop_w * self.crop_h - - if size > use_tiling_over: - tiling = realesrgan_settings["tiling"] - tile_pad = realesrgan_settings["tile_pad"] - else: - tiling = 0 - tile_pad = 0 - return UpscaleSeries(model_name, gpu_ids, fp32, tiling, tile_pad, log_fn) - - FIXED_UPSCALE_FACTOR = 4.0 - TEMP_UPSCALE_PATH = "upscaled_frames" - DEFAULT_DOWNSCALE_TYPE = "area" - - def upscale_scene(self, - log_fn, - upscaler, - scene_input_path, - scene_output_path, - upscale_factor, - downscale_type=DEFAULT_DOWNSCALE_TYPE): - log_fn(f"creating scene output path {scene_output_path}") - create_directory(scene_output_path) - - working_path = os.path.join(scene_output_path, self.TEMP_UPSCALE_PATH) - log_fn(f"about to create working path {working_path}") - create_directory(working_path) - - # TODO make this logic general - - # upscale first at the engine's native scale - file_list = sorted(get_files(scene_input_path)) - output_basename = "upscaled_frames" - log_fn(f"about to upscale images to {working_path}") - upscaler.upscale_series(file_list, working_path, self.FIXED_UPSCALE_FACTOR, output_basename, - self.frame_format) - - # get size of upscaled frames - upscaled_files = sorted(get_files(working_path)) - width, height = image_size(upscaled_files[0]) - log_fn(f"size of upscaled images: {width} x {height}") - - # compute downscale factor - downscale_factor = self.FIXED_UPSCALE_FACTOR / upscale_factor - log_fn(f"downscale factor is {downscale_factor}") - - downscaled_width = int(width / downscale_factor) - downscaled_height = int(height / downscale_factor) - log_fn(f"size of downscaled images: {downscaled_width} x {downscaled_height}") - - if downscaled_width != width or downscaled_height != height: - # downsample to final size - log_fn(f"about to downscale images in {working_path} to {scene_output_path}") - ResizeFrames(scene_input_path, - scene_output_path, - downscaled_width, - downscaled_height, - downscale_type, - log_fn).resize(type=self.frame_format) - else: - log_fn("copying instead of unneeded downscaling") - copy_files(working_path, scene_output_path) - - try: - log_fn(f"about to delete working path {working_path}") - shutil.rmtree(working_path) - except OSError as error: - log_fn(f"ignoring error deleting working path: {error}") - - def upscale_factor_from_options(self) -> float: - upscale_factor = 1.0 - if self.upscale: - if self.upscale_option == "2X": - upscale_factor = 2.0 - elif self.upscale_option == "3X": - upscale_factor = 3.0 - elif self.upscale_option == "4X": - upscale_factor = 4.0 - return upscale_factor - - def upscale_scenes(self, log_fn, kept_scenes, realesrgan_settings, remixer_settings): - upscaler = self.get_upscaler(log_fn, realesrgan_settings, remixer_settings) - scenes_base_path = self.scenes_source_path(self.UPSCALE_STEP) - downscale_type = remixer_settings["scale_type_down"] - create_directory(self.upscale_path) - - upscale_factor = self.upscale_factor_from_options() - - with Mtqdm().open_bar(total=len(kept_scenes), desc="Upscale") as bar: - for scene_name in kept_scenes: - scene_input_path = os.path.join(scenes_base_path, scene_name) - scene_output_path = os.path.join(self.upscale_path, scene_name) - create_directory(scene_output_path) - - upscale_handled = False - upscale_hint = self.get_hint(self.scene_labels.get(scene_name), "U") - - if upscale_hint and not self.upscale: - # only apply the hint if not already upscaling, otherwise the - # frames may have mismatched sizes - try: - # for now ignore the hint value and upscale just at 1X, to clean up zooming - self.upscale_scene(log_fn, - upscaler, - scene_input_path, - scene_output_path, - 1.0, - downscale_type=downscale_type) - upscale_handled = True - - except Exception as error: - log_fn( -f"Error in upscale_scenes() handling processing hint {upscale_hint} - skipping processing: {error}") - upscale_handled = False - - if not upscale_handled: - if self.upscale: - self.upscale_scene(log_fn, - upscaler, - scene_input_path, - scene_output_path, - upscale_factor, - downscale_type=downscale_type) - else: - # no need to upscale so just copy the files using the resequencer - ResequenceFiles(scene_input_path, - self.frame_format, - "upscaled_frames", - 1, 1, - 1, 0, - -1, - False, - log_fn, - output_path=scene_output_path).resequence() - Mtqdm().update_bar(bar) - - def remix_filename_suffix(self, extra_suffix): - label = "remix" - - if self.resize_chosen(): - label += "-rc" if self.resize else "-rcH" - else: - label += "-or" - - if self.resynthesize_chosen(): - if self.resynthesize: - label += "-re" - if self.resynth_option == "Clean": - label += "C" - elif self.resynth_option == "Scrub": - label += "S" - elif self.resynth_option == "Replace": - label += "R" - else: - label += "-reH" - - if self.inflate_chosen(): - if self.inflate: - label += "-in" + self.inflate_by_option[0] - if self.inflate_slow_option == "Audio": - label += "SA" - elif self.inflate_slow_option == "Silent": - label += "SM" - else: - label += "-inH" - - if self.upscale_chosen(): - if self.upscale: - label += "-up" + self.upscale_option[0] - else: - label += "-upH" - - label += "-" + extra_suffix if extra_suffix else "" - return label - - def default_remix_filepath(self, extra_suffix=""): - _, filename, _ = split_filepath(self.source_video) - suffix = self.remix_filename_suffix(extra_suffix) - return os.path.join(self.project_path, f"{filename}-{suffix}.mp4") +# ## Main Processing ## + +# RESIZE_STEP = "resize" +# RESYNTH_STEP = "resynth" +# INFLATE_STEP = "inflate" +# UPSCALE_STEP = "upscale" +# AUDIO_STEP = "audio" +# VIDEO_STEP = "video" + +# PURGED_CONTENT = "purged_content" +# PURGED_DIR = "purged" + +# def prepare_process_remix(self, redo_resynth, redo_inflate, redo_upscale): +# self.setup_processing_paths() + +# self.recompile_scenes() + +# if self.processed_content_invalid: +# self.purge_processed_content(purge_from=self.RESIZE_STEP) +# self.processed_content_invalid = False +# else: +# self.purge_stale_processed_content(redo_resynth, redo_inflate, redo_upscale) +# self.purge_incomplete_processed_content() +# self.save() + +# def process_remix(self, log_fn, kept_scenes, remixer_settings, engine, engine_settings, +# realesrgan_settings): +# if self.resize_needed(): +# self.resize_scenes(log_fn, +# kept_scenes, +# remixer_settings) + +# if self.resynthesize_needed(): +# self.resynthesize_scenes(log_fn, +# kept_scenes, +# engine, +# engine_settings, +# self.resynth_option) + +# if self.inflate_needed(): +# self.inflate_scenes(log_fn, +# kept_scenes, +# engine, +# engine_settings) + +# if self.upscale_needed(): +# self.upscale_scenes(log_fn, +# kept_scenes, +# realesrgan_settings, +# remixer_settings) + +# return self.generate_remix_report(self.processed_content_complete(self.RESIZE_STEP), +# self.processed_content_complete(self.RESYNTH_STEP), +# self.processed_content_complete(self.INFLATE_STEP), +# self.processed_content_complete(self.UPSCALE_STEP)) + +# def resize_chosen(self): +# return self.resize or self.hint_present("R") + +# def resize_needed(self): +# return (self.resize and not self.processed_content_complete(self.RESIZE_STEP)) \ +# or self.resize_chosen() + +# def resynthesize_chosen(self): +# return self.resynthesize or self.hint_present("Y") + +# def resynthesize_needed(self): +# return self.resynthesize_chosen() and not self.processed_content_complete(self.RESYNTH_STEP) + +# def inflate_chosen(self): +# return self.inflate or self.hint_present("I") + +# def inflate_needed(self): +# if self.inflate_chosen() and not self.processed_content_complete(self.INFLATE_STEP): +# return True + +# def upscale_chosen(self): +# return self.upscale or self.hint_present("U") + +# def upscale_needed(self): +# return self.upscale_chosen() and not self.processed_content_complete(self.UPSCALE_STEP) + +# ## Purging ## + +# def purge_paths(self, path_list : list, keep_original=False, purged_path=None, skip_empty_paths=False, additional_path=""): +# """Purge a list of paths to the purged content directory +# keep_original: True=don't remove original content when purging +# purged_path: Used if calling multiple times to store purged content in the same purge directory +# skip_empty_paths: True=don't purge directories that have no files inside +# additional_path: If set, adds an additional segment onto the storage path (not returned) +# Returns: Path to the purged content directory (not incl. additional_path) +# """ +# paths_to_purge = [] +# for path in path_list: +# if path and os.path.exists(path): +# if not skip_empty_paths or directory_populated(path, files_only=True): +# paths_to_purge.append(path) +# if not paths_to_purge: +# return None + +# purged_root_path = os.path.join(self.project_path, self.PURGED_CONTENT) +# create_directory(purged_root_path) + +# if not purged_path: +# purged_path, _ = AutoIncrementDirectory(purged_root_path).next_directory(self.PURGED_DIR) + +# for path in paths_to_purge: +# use_purged_path = os.path.join(purged_path, additional_path) +# if keep_original: +# _, last_path, _ = split_filepath(path) +# copy_path = os.path.join(use_purged_path, last_path) +# copy_files(path, copy_path) +# else: +# shutil.move(path, use_purged_path) +# return purged_path + +# def delete_purged_content(self): +# purged_root_path = os.path.join(self.project_path, self.PURGED_CONTENT) +# if os.path.exists(purged_root_path): +# with Mtqdm().open_bar(total=1, desc="Deleting") as bar: +# Mtqdm().message(bar, "Removing purged content - No ETA") +# shutil.rmtree(purged_root_path) +# Mtqdm().update_bar(bar) +# return purged_root_path +# else: +# return None + +# def delete_path(self, path): +# if path and os.path.exists(path): +# with Mtqdm().open_bar(total=1, desc="Deleting") as bar: +# Mtqdm().message(bar, "Removing project content - No ETA") +# shutil.rmtree(path) +# Mtqdm().update_bar(bar) +# return path +# else: +# return None + +# def purge_processed_content(self, purge_from=RESIZE_STEP): +# purge_paths = [self.resize_path, +# self.resynthesis_path, +# self.inflation_path, +# self.upscale_path] + +# if purge_from == self.RESIZE_STEP: +# purge_paths = purge_paths[0:] +# elif purge_from == self.RESYNTH_STEP: +# purge_paths = purge_paths[1:] +# elif purge_from == self.INFLATE_STEP: +# purge_paths = purge_paths[2:] +# elif purge_from == self.UPSCALE_STEP: +# purge_paths = purge_paths[3:] +# else: +# raise RuntimeError(f"Unrecognized value {purge_from} passed to purge_processed_content()") + +# purge_root = self.purge_paths(purge_paths) +# self.clean_remix_content(purge_from="audio_clips", purge_root=purge_root) +# return purge_root + +# def clean_remix_content(self, purge_from, purge_root=None): +# clean_paths = [self.audio_clips_path, +# self.video_clips_path, +# self.clips_path] + +# # purge all of the paths, keeping the originals, for safekeeping ahead of reprocessing +# purge_root = self.purge_paths(clean_paths, keep_original=True, purged_path=purge_root, +# skip_empty_paths=True) +# if purge_root: +# self.copy_project_file(purge_root) + +# if purge_from == "audio_clips": +# clean_paths = clean_paths[0:] +# self.audio_clips = [] +# self.video_clips = [] +# self.clips = [] +# elif purge_from == "video_clips": +# clean_paths = clean_paths[1:] +# self.video_clips = [] +# self.clips = [] +# elif purge_from == "remix_clips": +# clean_paths = clean_paths[2:] +# self.clips = [] + +# # clean directories as needed by purge_from +# # audio wav files can be slow to extract, so they are carefully not cleaned unless needed +# clean_directories(clean_paths) +# return purge_root + +# def clean_remix_audio(self): +# clean_directories([self.audio_clips_path]) + +# RESIZE_PATH = "SCENES-RC" +# RESYNTH_PATH = "SCENES-RE" +# INFLATE_PATH = "SCENES-IN" +# UPSCALE_PATH = "SCENES-UP" + +# def setup_processing_paths(self): +# self.resize_path = os.path.join(self.project_path, self.RESIZE_PATH) +# self.resynthesis_path = os.path.join(self.project_path, self.RESYNTH_PATH) +# self.inflation_path = os.path.join(self.project_path, self.INFLATE_PATH) +# self.upscale_path = os.path.join(self.project_path, self.UPSCALE_PATH) + +# def _processed_content_complete(self, path, expected_dirs = 0, expected_files = 0): +# if not path or not os.path.exists(path): +# return False +# if expected_dirs: +# return len(get_directories(path)) == expected_dirs +# if expected_files: +# return len(get_files(path)) == expected_files +# return True + +# def processed_content_complete(self, processing_step): +# expected_items = len(self.kept_scenes()) +# if processing_step == self.RESIZE_STEP: +# return self._processed_content_complete(self.resize_path, expected_dirs=expected_items) +# elif processing_step == self.RESYNTH_STEP: +# return self._processed_content_complete(self.resynthesis_path, expected_dirs=expected_items) +# elif processing_step == self.INFLATE_STEP: +# return self._processed_content_complete(self.inflation_path, expected_dirs=expected_items) +# elif processing_step == self.UPSCALE_STEP: +# return self._processed_content_complete(self.upscale_path, expected_dirs=expected_items) +# elif processing_step == self.AUDIO_STEP: +# return self._processed_content_complete(self.audio_clips_path, expected_files=expected_items) +# elif processing_step == self.VIDEO_STEP: +# return self._processed_content_complete(self.video_clips_path, expected_files=expected_items) +# else: +# raise RuntimeError(f"'processing_step' {processing_step} is unrecognized") + +# # processed content is stale if it is not selected and exists +# def processed_content_stale(self, selected : bool, path : str): +# if selected: +# return False +# if not os.path.exists(path): +# return False +# contents = get_directories(path) +# content_present = len(contents) > 0 +# return content_present + +# # content is stale if it is present on disk but not currently selected +# # stale content and its derivative content should be purged +# def purge_stale_processed_content(self, purge_resynth, purge_inflation, purge_upscale): +# if self.processed_content_stale(self.resize_chosen(), self.resize_path): +# self.purge_processed_content(purge_from=self.RESIZE_STEP) + +# if self.processed_content_stale(self.resynthesize_chosen(), self.resynthesis_path) or purge_resynth: +# self.purge_processed_content(purge_from=self.RESYNTH_STEP) + +# if self.processed_content_stale(self.inflate_chosen(), self.inflation_path) or purge_inflation: +# self.purge_processed_content(purge_from=self.INFLATE_STEP) + +# if self.processed_content_stale(self.upscale_chosen(), self.upscale_path) or purge_upscale: +# self.purge_processed_content(purge_from=self.UPSCALE_STEP) + +# def purge_incomplete_processed_content(self): +# # content is incomplete if the wrong number of scene directories are present +# # if it is currently selected and incomplete, it should be purged +# if self.resize_chosen() and not self.processed_content_complete(self.RESIZE_STEP): +# self.purge_processed_content(purge_from=self.RESIZE_STEP) + +# if self.resynthesize_chosen() and not self.processed_content_complete(self.RESYNTH_STEP): +# self.purge_processed_content(purge_from=self.RESYNTH_STEP) + +# if self.inflate_chosen() and not self.processed_content_complete(self.INFLATE_STEP): +# self.purge_processed_content(purge_from=self.INFLATE_STEP) + +# if self.upscale_chosen() and not self.processed_content_complete(self.UPSCALE_STEP): +# self.purge_processed_content(purge_from=self.UPSCALE_STEP) + +# def scenes_source_path(self, processing_step): +# processing_path = self.scenes_path + +# if processing_step == self.RESIZE_STEP: +# # resize is the first processing step and always draws from the scenes path +# pass + +# elif processing_step == self.RESYNTH_STEP: +# # resynthesis is the second processing step +# if self.resize_chosen(): +# # if resize is enabled, draw from the resized scenes path +# processing_path = self.resize_path + +# elif processing_step == self.INFLATE_STEP: +# # inflation is the third processing step +# if self.resynthesize_chosen(): +# # if resynthesis is enabled, draw from the resyntheized scenes path +# processing_path = self.resynthesis_path +# elif self.resize_chosen(): +# # if resize is enabled, draw from the resized scenes path +# processing_path = self.resize_path + +# elif processing_step == self.UPSCALE_STEP: +# # upscaling is the fourth processing step +# if self.inflate_chosen(): +# # if inflation is enabled, draw from the inflation path +# processing_path = self.inflation_path +# elif self.resynthesize_chosen(): +# # if resynthesis is enabled, draw from the resyntheized scenes path +# processing_path = self.resynthesis_path +# elif self.resize_chosen(): +# # if resize is enabled, draw from the resized scenes path +# processing_path = self.resize_path + +# return processing_path + +# def get_resize_params(self, resize_w, resize_h, crop_w, crop_h, content_width, content_height, remixer_settings): +# if resize_w == content_width and resize_h == content_height: +# scale_type = "none" +# else: +# if resize_w <= content_width and resize_h <= content_height: +# # use the down scaling type if there are only reductions +# # the default "area" type preserves details better on reducing +# scale_type = remixer_settings["scale_type_down"] +# else: +# # otherwise use the upscaling type +# # the default "lanczos" type preserves details better on enlarging +# scale_type = remixer_settings["scale_type_up"] + +# if crop_w == resize_w and crop_h == resize_h: +# # disable cropping if none to do +# crop_type = "none" +# elif crop_w > resize_w or crop_h > resize_h: +# # disable cropping if it will wrap/is invalid +# # TODO put bounds on the crop parameters instead of disabling +# crop_type = "none" +# else: +# crop_type = "crop" +# return scale_type, crop_type + +# def prepare_save_remix(self, log_fn, global_options, remixer_settings, output_filepath : str, +# invalidate_video_clips=True): +# if not output_filepath: +# raise ValueError("Enter a path for the remixed video to proceed") + +# self.recompile_scenes() + +# kept_scenes = self.kept_scenes() +# if not kept_scenes: +# raise ValueError("No kept scenes were found") + +# self.drop_empty_processed_scenes(kept_scenes) +# self.save() + +# # get this again in case scenes have been auto-dropped +# kept_scenes = self.kept_scenes() +# if not kept_scenes: +# raise ValueError("No kept scenes after removing empties") + +# # create audio clips only if they do not already exist +# # this depends on the audio clips being purged at the time the scene selection are compiled +# if self.video_details["has_audio"] and not self.processed_content_complete( +# self.AUDIO_STEP): +# audio_format = remixer_settings["audio_format"] +# self.create_audio_clips(log_fn, global_options, audio_format=audio_format) +# self.save() + +# # leave video clips if they are complete since we may be only making audio changes +# if invalidate_video_clips or not self.processed_content_complete(self.VIDEO_STEP): +# self.clean_remix_content(purge_from="video_clips") +# else: +# # always recreate remix clips +# self.clean_remix_content(purge_from="remix_clips") + +# return kept_scenes + +# def save_remix(self, log_fn, global_options, kept_scenes): +# # leave video clips if they are complete since we may be only making audio changes +# if not self.processed_content_complete(self.VIDEO_STEP): +# self.create_video_clips(log_fn, kept_scenes, global_options) +# self.save() + +# self.create_scene_clips(log_fn, kept_scenes, global_options) +# self.save() + +# if not self.clips: +# raise ValueError("No processed video clips were found") + +# ffcmd = self.create_remix_video(log_fn, global_options, self.output_filepath) +# log_fn(f"FFmpeg command: {ffcmd}") +# self.save() + +# def save_custom_remix(self, +# log_fn, +# output_filepath, +# global_options, +# kept_scenes, +# custom_video_options, +# custom_audio_options, +# draw_text_options=None, +# use_scene_sorting=True): +# _, _, output_ext = split_filepath(output_filepath) +# output_ext = output_ext[1:] + +# # leave video clips if they are complete since we may be only making audio changes +# if not self.processed_content_complete(self.VIDEO_STEP): +# self.create_custom_video_clips(log_fn, kept_scenes, global_options, +# custom_video_options=custom_video_options, +# custom_ext=output_ext, +# draw_text_options=draw_text_options) +# self.save() + +# self.create_custom_scene_clips(kept_scenes, global_options, +# custom_audio_options=custom_audio_options, +# custom_ext=output_ext) +# self.save() + +# if not self.clips: +# raise ValueError("No processed video clips were found") + +# ffcmd = self.create_remix_video(log_fn, global_options, output_filepath, +# use_scene_sorting=use_scene_sorting) +# log_fn(f"FFmpeg command: {ffcmd}") +# self.save() + +# def resize_scene(self, +# log_fn, +# scene_input_path, +# scene_output_path, +# resize_w, +# resize_h, +# crop_w, +# crop_h, +# crop_offset_x, +# crop_offset_y, +# scale_type, +# crop_type, +# params_fn : Callable | None = None, +# params_context : any=None): + +# ResizeFrames(scene_input_path, +# scene_output_path, +# resize_w, +# resize_h, +# scale_type, +# log_fn, +# crop_type=crop_type, +# crop_width=crop_w, +# crop_height=crop_h, +# crop_offset_x=crop_offset_x, +# crop_offset_y=crop_offset_y).resize(type=self.frame_format, params_fn=params_fn, +# params_context=params_context) + +# def setup_resize_hint(self, content_width, content_height): +# # use the main resize/crop settings if resizing, or the content native +# # dimensions if not, as a foundation for handling resize hints +# if self.resize: +# main_resize_w = self.resize_w +# main_resize_h = self.resize_h +# main_crop_w = self.crop_w +# main_crop_h = self.crop_h +# if self.crop_offset_x < 0: +# main_offset_x = (main_resize_w - main_crop_w) / 2.0 +# else: +# main_offset_x = self.crop_offset_x +# if self.crop_offset_y < 0: +# main_offset_y = (main_resize_h - main_crop_h) / 2.0 +# else: +# main_offset_y = self.crop_offset_y +# else: +# main_resize_w = content_width +# main_resize_h = content_height +# main_crop_w = content_width +# main_crop_h = content_height +# main_offset_x = 0 +# main_offset_y = 0 +# return main_resize_w, main_resize_h, main_crop_w, main_crop_h, main_offset_x, main_offset_y + +# QUADRANT_ZOOM_HINT = "/" +# QUADRANT_GRID_CHAR = "X" +# PERCENT_ZOOM_HINT = "%" +# COMBINED_ZOOM_HINT = "@" +# ANIMATED_ZOOM_HINT = "-" +# QUADRANT_ZOOM_MIN_LEN = 3 # 1/3 +# PERCENT_ZOOM_MIN_LEN = 4 # 123% +# COMBINED_ZOOM_MIN_LEN = 8 # 1/1@100% +# ANIMATED_ZOOM_MIN_LEN = 7 # 1/3-5/7 + +# def get_quadrant_zoom(self, hint): +# if self.QUADRANT_ZOOM_HINT in hint: +# if len(hint) >= self.QUADRANT_ZOOM_MIN_LEN: +# split_pos = hint.index(self.QUADRANT_ZOOM_HINT) +# quadrant = hint[:split_pos] +# quadrants = hint[split_pos+1:] +# else: +# quadrant, quadrants = 1, 1 + +# return quadrant, quadrants +# else: +# return None, None + +# def get_percent_zoom(self, hint): +# if self.PERCENT_ZOOM_HINT in hint: +# if len(hint) >= self.PERCENT_ZOOM_MIN_LEN: +# zoom_percent = int(hint.replace(self.PERCENT_ZOOM_HINT, "")) +# if zoom_percent >= 100: +# return zoom_percent +# return 100 +# else: +# return None + +# def get_zoom_part(self, hint): +# if self.COMBINED_ZOOM_HINT in hint and len(hint) >= self.COMBINED_ZOOM_MIN_LEN: +# type = self.COMBINED_ZOOM_HINT +# quadrant, quadrants, zoom_percent = self.get_combined_zoom(hint) +# return type, quadrant, quadrants, zoom_percent +# if self.QUADRANT_ZOOM_HINT in hint and len(hint) >= self.QUADRANT_ZOOM_MIN_LEN: +# type = self.QUADRANT_ZOOM_HINT +# quadrant, quadrants = self.get_quadrant_zoom(hint) +# return type, quadrant, quadrants, None +# elif self.PERCENT_ZOOM_HINT in hint and len(hint) >= self.PERCENT_ZOOM_MIN_LEN: +# type = self.PERCENT_ZOOM_HINT +# self.get_percent_zoom(hint) +# zoom_percent = self.get_percent_zoom(hint) +# return type, None, None, zoom_percent +# return None, None, None, None + +# def get_combined_zoom(self, hint): +# if self.COMBINED_ZOOM_HINT in hint: +# if len(hint) >= self.COMBINED_ZOOM_MIN_LEN: +# split_pos = hint.index(self.COMBINED_ZOOM_HINT) +# hint_a = hint[:split_pos] +# hint_b = hint[split_pos+1:] +# a_type, a_quadrant, a_quadrants, a_zoom_percent = self.get_zoom_part(hint_a) +# b_type, b_quadrant, b_quadrants, b_zoom_percent = self.get_zoom_part(hint_b) +# if a_type == self.PERCENT_ZOOM_HINT and b_type == self.QUADRANT_ZOOM_HINT: +# zoom_percent = a_zoom_percent +# quadrant, quadrants = b_quadrant, b_quadrants +# elif a_type == self.QUADRANT_ZOOM_HINT and b_type == self.PERCENT_ZOOM_HINT: +# zoom_percent = b_zoom_percent +# quadrant, quadrants = a_quadrant, a_quadrants +# return quadrant, quadrants, zoom_percent +# return None, None, None + +# def get_animated_zoom(self, hint): +# if self.ANIMATED_ZOOM_HINT in hint: +# if len(hint) >= self.ANIMATED_ZOOM_MIN_LEN: +# split_pos = hint.index(self.ANIMATED_ZOOM_HINT) +# hint_from = hint[:split_pos] +# hint_to = hint[split_pos+1:] +# from_type, from_param1, from_param2, from_param3 = self.get_zoom_part(hint_from) +# to_type, to_param1, to_param2, to_param3 = self.get_zoom_part(hint_to) +# if from_type and to_type: +# return from_type, from_param1, from_param2, from_param3, to_type, to_param1, to_param2, to_param3 +# return None, None, None, None, None, None, None, None + +# def compute_zoom_type(self, type, param1, param2, param3, main_resize_w, main_resize_h, +# main_offset_x, main_offset_y, main_crop_w, main_crop_h, log_fn): +# if type == self.COMBINED_ZOOM_HINT: +# quadrant, quadrants, zoom_percent = param1, param2, param3 +# if quadrant and quadrants and zoom_percent: +# return self.compute_combined_zoom(quadrant, quadrants, zoom_percent, +# main_resize_w, main_resize_h, +# main_offset_x, main_offset_y, +# main_crop_w, main_crop_h, log_fn=log_fn) +# elif type == self.QUADRANT_ZOOM_HINT: +# quadrant, quadrants = param1, param2 +# if quadrant and quadrants: +# return self.compute_quadrant_zoom(quadrant, quadrants, +# main_resize_w, main_resize_h, +# main_offset_x, main_offset_y, +# main_crop_w, main_crop_h, log_fn=log_fn) +# elif type == self.PERCENT_ZOOM_HINT: +# zoom_percent = param3 +# if zoom_percent: +# return self.compute_percent_zoom(zoom_percent, +# main_resize_w, main_resize_h, +# main_offset_x, main_offset_y, +# main_crop_w, main_crop_h, log_fn=log_fn) + +# def compute_quadrant_zoom(self, quadrant, quadrants, main_resize_w, main_resize_h, +# main_offset_x, main_offset_y, main_crop_w, main_crop_h, log_fn): +# quadrant = int(quadrant) - 1 + +# if self.QUADRANT_GRID_CHAR in quadrants: +# parts = quadrants.split(self.QUADRANT_GRID_CHAR) +# if len(parts) == 2: +# grid_x = int(parts[0]) +# grid_y = int(parts[1]) +# magnitude_x = grid_x +# magnitude_y = grid_y + +# if magnitude_x >= magnitude_y: +# magnitude = magnitude_x +# row = int(quadrant / magnitude_x) +# column = quadrant % magnitude_x +# else: +# magnitude = magnitude_y +# row = int(quadrant / magnitude_x) +# column = quadrant % magnitude_x +# else: +# magnitude = 1 +# magnitude_x = magnitude +# magnitude_y = magnitude +# row = 0 +# column = 0 +# else: +# magnitude = int(math.sqrt(int(quadrants))) +# magnitude_x = magnitude +# magnitude_y = magnitude +# row = int(quadrant / magnitude) +# column = quadrant % magnitude + +# # compute frame scaling +# resize_w = main_resize_w * magnitude +# resize_h = main_resize_h * magnitude + +# # compute crop area scaling +# crop_w = main_crop_w * magnitude +# crop_h = main_crop_h * magnitude + +# # if the main crop offset is negative, auto-center it within the frame +# # otherwise scale up the specific offset +# offset_x, offset_y = 0, 0 +# if main_offset_x < 0: +# offset_x = (resize_w - crop_w) / 2.0 +# else: +# offset_x = main_offset_x * magnitude +# if main_offset_y < 0: +# offset_y = (resize_h - crop_h) / 2.0 +# else: +# offset_y = main_offset_y * magnitude + +# # compute the dimensions of one grid cell given the crop and magnitude(s) +# cell_width = crop_w / magnitude_x +# cell_height = crop_h / magnitude_y + +# # compute the upper left corner of the grid cell given the cell dimensions, +# # and row, column; unadjusted for main crop offset +# cell_offset_x = column * cell_width +# cell_offset_y = row * cell_height + +# # add the main offset +# cell_offset_x += offset_x +# cell_offset_y += offset_y + +# # compute the center point +# center_x = cell_offset_x + (cell_width / 2.0) +# center_y = cell_offset_y + (cell_height / 2.0) + +# return resize_w, resize_h, center_x, center_y + +# def compute_percent_zoom(self, zoom_percent, main_resize_w, main_resize_h, main_offset_x, +# main_offset_y, main_crop_w, main_crop_h, log_fn): +# magnitude = zoom_percent / 100.0 + +# # compute frame scaling +# resize_w = main_resize_w * magnitude +# resize_h = main_resize_h * magnitude + +# # compute crop area scaling +# crop_w = main_crop_w * magnitude +# crop_h = main_crop_h * magnitude + +# # if the main crop offset is negative, auto-center it within the frame +# # otherwise scale up the specific offset +# offset_x, offset_y = 0, 0 +# if main_offset_x < 0: +# offset_x = (resize_w - crop_w) / 2.0 +# else: +# offset_x = main_offset_x * magnitude +# if main_offset_y < 0: +# offset_y = (resize_h - crop_h) / 2.0 +# else: +# offset_y = main_offset_y * magnitude + +# # compute the centerpoint of the scaled crop area +# center_x = (crop_w / 2.0) + offset_x +# center_y = (crop_h / 2.0) + offset_y + +# return resize_w, resize_h, center_x, center_y + +# MAX_SELF_FIT_ZOOM = 1000 + +# def compute_combined_zoom(self, quadrant, quadrants, zoom_percent, main_resize_w, main_resize_h, +# main_offset_x, main_offset_y, main_crop_w, main_crop_h, log_fn): +# resize_w, resize_h, _, _ = self.compute_percent_zoom(zoom_percent, +# main_resize_w, main_resize_h, +# main_offset_x, main_offset_y, +# main_crop_w, main_crop_h, log_fn) +# quadrant_resize_w, _, quadrant_center_x, quadrant_center_y = self.compute_quadrant_zoom(quadrant, quadrants, +# main_resize_w, main_resize_h, +# main_offset_x, main_offset_y, +# main_crop_w, main_crop_h, log_fn) + +# # scale the quadrant center point to the percent resize +# scale = resize_w / quadrant_resize_w +# center_x = quadrant_center_x * scale +# center_y = quadrant_center_y * scale + +# if self.check_crop_bounds(resize_w, resize_h, center_x, center_y, main_crop_w, main_crop_h): +# # fit the requested zoom percent to be in bounds +# fit_zoom_percent = zoom_percent +# while fit_zoom_percent < self.MAX_SELF_FIT_ZOOM and \ +# self.check_crop_bounds(resize_w, resize_h, center_x, center_y, main_crop_w, main_crop_h): +# fit_zoom_percent += 1 +# resize_w, resize_h, _, _ = self.compute_percent_zoom(fit_zoom_percent, +# main_resize_w, main_resize_h, +# main_offset_x, main_offset_y, +# main_crop_w, main_crop_h, log_fn) +# quadrant_resize_w, _, quadrant_center_x, quadrant_center_y = self.compute_quadrant_zoom(quadrant, quadrants, +# main_resize_w, main_resize_h, +# main_offset_x, main_offset_y, +# main_crop_w, main_crop_h, log_fn) + +# # scale the quadrant center point to the percent resize +# # this seems to work on the left and middle but not on the right +# scale = resize_w / quadrant_resize_w +# center_x = quadrant_center_x * scale +# center_y = quadrant_center_y * scale + +# # if still out of bounds, restore to quadrant zoom +# if self.check_crop_bounds(resize_w, resize_h, center_x, center_y, main_crop_w, main_crop_h): +# log_fn("Can't find fitting zoom percentage; ignoring percent part.") +# resize_w, resize_h, center_x, center_y = \ +# self.compute_quadrant_zoom(quadrant, quadrants, main_resize_w, main_resize_h, +# main_offset_x, main_offset_y, main_crop_w, main_crop_h, log_fn) +# else: +# log_fn(f"Found fitting zoom percentage: {fit_zoom_percent}%.") + +# return resize_w, resize_h, center_x, center_y + +# def check_crop_bounds(self, resize_w, resize_h, center_x, center_y, main_crop_w, main_crop_h): +# crop_offset_x = center_x - (main_crop_w / 2.0) +# crop_offset_y = center_y - (main_crop_h / 2.0) +# return crop_offset_x < 0 or crop_offset_x + main_crop_w > resize_w \ +# or crop_offset_y < 0 or crop_offset_y + main_crop_h > resize_h + +# def compute_animated_zoom(self, num_frames, from_type, from_param1, from_param2, from_param3, +# to_type, to_param1, to_param2, to_param3, +# main_resize_w, main_resize_h, main_offset_x, main_offset_y, +# main_crop_w, main_crop_h, log_fn): + +# from_resize_w, from_resize_h, from_center_x, from_center_y = \ +# self.compute_zoom_type(from_type, from_param1, from_param2, from_param3, +# main_resize_w, main_resize_h, +# main_offset_x, main_offset_y, +# main_crop_w, main_crop_h, log_fn=log_fn) + +# to_resize_w, to_resize_h, to_center_x, to_center_y = \ +# self.compute_zoom_type(to_type, to_param1, to_param2, to_param3, +# main_resize_w, main_resize_h, +# main_offset_x, main_offset_y, +# main_crop_w, main_crop_h, log_fn=log_fn) + +# diff_resize_w = to_resize_w - from_resize_w +# diff_resize_h = to_resize_h - from_resize_h +# diff_center_x = to_center_x - from_center_x +# diff_center_y = to_center_y - from_center_y + +# # TODO why is this needed? without it, the last transition doesn't happen +# # - maybe it needs to be the number of transitions between frames not number of frames +# # ensure the final transition occurs +# num_frames -= 1 + +# step_resize_w = diff_resize_w / num_frames +# step_resize_h = diff_resize_h / num_frames +# step_center_x = diff_center_x / num_frames +# step_center_y = diff_center_y / num_frames + +# context = {} +# context["from_resize_w"] = from_resize_w +# context["from_resize_h"] = from_resize_h +# context["from_center_x"] = from_center_x +# context["from_center_y"] = from_center_y +# context["step_resize_w"] = step_resize_w +# context["step_resize_h"] = step_resize_h +# context["step_center_x"] = step_center_x +# context["step_center_y"] = step_center_y +# context["main_crop_w"] = main_crop_w +# context["main_crop_h"] = main_crop_h +# return context + +# def _resize_frame_param(self, index, context): +# from_resize_w = context["from_resize_w"] +# from_resize_h = context["from_resize_h"] +# from_center_x = context["from_center_x"] +# from_center_y = context["from_center_y"] +# step_resize_w = context["step_resize_w"] +# step_resize_h = context["step_resize_h"] +# step_center_x = context["step_center_x"] +# step_center_y = context["step_center_y"] +# main_crop_w = context["main_crop_w"] +# main_crop_h = context["main_crop_h"] + +# resize_w = from_resize_w + (index * step_resize_w) +# resize_h = from_resize_h + (index * step_resize_h) +# center_x = from_center_x + (index * step_center_x) +# center_y = from_center_y + (index * step_center_y) +# crop_offset_x = center_x - (main_crop_w / 2.0) +# crop_offset_y = center_y - (main_crop_h / 2.0) + +# return int(resize_w), int(resize_h), int(crop_offset_x), int(crop_offset_y) + +# def resize_scenes(self, log_fn, kept_scenes, remixer_settings): +# scenes_base_path = self.scenes_source_path(self.RESIZE_STEP) +# create_directory(self.resize_path) + +# content_width = self.video_details["content_width"] +# content_height = self.video_details["content_height"] +# scale_type, crop_type= self.get_resize_params(self.resize_w, self.resize_h, self.crop_w, +# self.crop_h, content_width, content_height, +# remixer_settings) + +# with Mtqdm().open_bar(total=len(kept_scenes), desc="Resize") as bar: +# for scene_name in kept_scenes: +# scene_input_path = os.path.join(scenes_base_path, scene_name) +# scene_output_path = os.path.join(self.resize_path, scene_name) +# create_directory(scene_output_path) + +# resize_handled = False +# resize_hint = self.get_hint(self.scene_labels.get(scene_name), "R") +# if resize_hint: +# main_resize_w, main_resize_h, main_crop_w, main_crop_h, main_offset_x, \ +# main_offset_y = self.setup_resize_hint(content_width, content_height) + +# try: +# if self.ANIMATED_ZOOM_HINT in resize_hint: +# # interprent 'any-any' as animating from one to the other zoom factor +# from_type, from_param1, from_param2, from_param3, to_type, to_param1, to_param2, to_param3 = \ +# self.get_animated_zoom(resize_hint) +# if from_type and to_type: +# first_frame, last_frame, _ = details_from_group_name(scene_name) +# num_frames = (last_frame - first_frame) + 1 +# context = self.compute_animated_zoom(num_frames, +# from_type, from_param1, from_param2, from_param3, +# to_type, to_param1, to_param2, to_param3, +# main_resize_w, main_resize_h, main_offset_x, main_offset_y, +# main_crop_w, main_crop_h, log_fn) + +# scale_type = remixer_settings["scale_type_up"] +# self.resize_scene(log_fn, +# scene_input_path, +# scene_output_path, +# None, +# None, +# main_crop_w, +# main_crop_h, +# None, +# None, +# scale_type, +# crop_type="crop", +# params_fn=self._resize_frame_param, +# params_context=context) +# resize_handled = True + +# elif self.COMBINED_ZOOM_HINT in resize_hint: +# quadrant, quadrants, zoom_percent = self.get_combined_zoom(resize_hint) +# if quadrant and quadrants and zoom_percent: +# resize_w, resize_h, center_x, center_y = \ +# self.compute_combined_zoom(quadrant, quadrants, zoom_percent, +# main_resize_w, main_resize_h, +# main_offset_x, main_offset_y, +# main_crop_w, main_crop_h, log_fn) + +# crop_offset_x = center_x - (main_crop_w / 2.0) +# crop_offset_y = center_y - (main_crop_h / 2.0) + +# scale_type = remixer_settings["scale_type_up"] +# self.resize_scene(log_fn, +# scene_input_path, +# scene_output_path, +# int(resize_w), +# int(resize_h), +# int(main_crop_w), +# int(main_crop_h), +# int(crop_offset_x), +# int(crop_offset_y), +# scale_type, +# crop_type="crop") +# resize_handled = True + +# elif self.QUADRANT_ZOOM_HINT in resize_hint: +# # interpret 'x/y' as x: quadrant, y: square-based number of quadrants +# # '5/9' and '13/25' would be the center squares of 3x3 and 5x5 grids +# # zoomed in at 300% and 500% +# quadrant, quadrants = self.get_quadrant_zoom(resize_hint) +# if quadrant and quadrants: +# resize_w, resize_h, center_x, center_y = \ +# self.compute_quadrant_zoom(quadrant, quadrants, +# main_resize_w, main_resize_h, +# main_offset_x, main_offset_y, +# main_crop_w, main_crop_h, log_fn) + +# scale_type = remixer_settings["scale_type_up"] +# crop_offset_x = center_x - (main_crop_w / 2.0) +# crop_offset_y = center_y - (main_crop_h / 2.0) +# self.resize_scene(log_fn, +# scene_input_path, +# scene_output_path, +# int(resize_w), +# int(resize_h), +# int(main_crop_w), +# int(main_crop_h), +# int(crop_offset_x), +# int(crop_offset_y), +# scale_type, +# crop_type="crop") +# resize_handled = True + +# elif self.PERCENT_ZOOM_HINT in resize_hint: +# # interpret z% as zoom percent to zoom into center +# zoom_percent = self.get_percent_zoom(resize_hint) +# if zoom_percent: +# resize_w, resize_h, center_x, center_y = \ +# self.compute_percent_zoom(zoom_percent, +# main_resize_w, main_resize_h, +# main_offset_x, main_offset_y, +# main_crop_w, main_crop_h, log_fn) +# scale_type = remixer_settings["scale_type_up"] +# crop_offset_x = center_x - (main_crop_w / 2.0) +# crop_offset_y = center_y - (main_crop_h / 2.0) +# self.resize_scene(log_fn, +# scene_input_path, +# scene_output_path, +# int(resize_w), +# int(resize_h), +# int(main_crop_w), +# int(main_crop_h), +# int(crop_offset_x), +# int(crop_offset_y), +# scale_type, +# crop_type="crop") +# resize_handled = True +# except Exception as error: +# # TODO +# print(error) +# raise +# log_fn( +# f"Error in resize_scenes() handling processing hint {resize_hint} - skipping processing: {error}") +# resize_handled = False + +# if not resize_handled: +# self.resize_scene(log_fn, +# scene_input_path, +# scene_output_path, +# int(self.resize_w), +# int(self.resize_h), +# int(self.crop_w), +# int(self.crop_h), +# int(self.crop_offset_x), +# int(self.crop_offset_y), +# scale_type, +# crop_type) + +# Mtqdm().update_bar(bar) + +# # TODO dry up this code with same in resynthesize_video_ui - maybe a specific resynth script +# def one_pass_resynthesis(self, log_fn, input_path, output_path, output_basename, +# engine : InterpolateSeries): +# file_list = sorted(get_files(input_path, extension=self.frame_format)) +# log_fn(f"beginning series of frame recreations at {output_path}") +# engine.interpolate_series(file_list, output_path, 1, "interframe", offset=2, +# type=self.frame_format) + +# log_fn(f"auto-resequencing recreated frames at {output_path}") +# ResequenceFiles(output_path, +# self.frame_format, +# "resynthesized_frame", +# 1, 1, # start, step +# 1, 0, # stride, offset +# -1, # auto-zero fill +# True, # rename +# log_fn).resequence() + +# def two_pass_resynth_pass(self, log_fn, input_path, output_path, output_basename, +# engine : InterpolateSeries): +# file_list = sorted(get_files(input_path, extension=self.frame_format)) + +# inflated_frames = os.path.join(output_path, "inflated_frames") +# log_fn(f"beginning series of interframe recreations at {inflated_frames}") +# create_directory(inflated_frames) +# engine.interpolate_series(file_list, inflated_frames, 1, "interframe", +# type=self.frame_format) + +# log_fn(f"selecting odd interframes only at {inflated_frames}") +# ResequenceFiles(inflated_frames, +# self.frame_format, +# output_basename, +# 1, 1, # start, step +# 2, 1, # stride, offset +# -1, # auto-zero fill +# False, # rename +# log_fn, +# output_path=output_path).resequence() +# remove_directories([inflated_frames]) + +# def two_pass_resynthesis(self, log_fn, input_path, output_path, output_basename, engine, one_pass_only=False): +# passes = 1 if one_pass_only else 2 +# with Mtqdm().open_bar(total=passes, desc="Two-Pass Resynthesis") as bar: +# if not one_pass_only: +# interframes = os.path.join(output_path, "interframes") +# create_directory(interframes) +# self.two_pass_resynth_pass(log_fn, input_path, interframes, "odd_interframe", engine) +# input_path = interframes + +# self.two_pass_resynth_pass(log_fn, input_path, output_path, output_basename, engine) + +# if not one_pass_only: +# remove_directories([interframes]) + +# def resynthesize_scenes(self, log_fn, kept_scenes, engine, engine_settings, resynth_option): +# interpolater = Interpolate(engine.model, log_fn) +# use_time_step = engine_settings["use_time_step"] +# deep_interpolater = DeepInterpolate(interpolater, use_time_step, log_fn) +# series_interpolater = InterpolateSeries(deep_interpolater, log_fn) +# output_basename = "resynthesized_frames" + +# scenes_base_path = self.scenes_source_path(self.RESYNTH_STEP) +# create_directory(self.resynthesis_path) + +# with Mtqdm().open_bar(total=len(kept_scenes), desc="Resynthesize") as bar: +# for scene_name in kept_scenes: +# scene_input_path = os.path.join(scenes_base_path, scene_name) +# scene_output_path = os.path.join(self.resynthesis_path, scene_name) +# create_directory(scene_output_path) + +# resynth_type = resynth_option if self.resynthesize else None +# resynth_hint = self.get_hint(self.scene_labels.get(scene_name), "Y") +# if resynth_hint: +# if "C" in resynth_hint: +# resynth_type = "Clean" +# elif "S" in resynth_hint: +# resynth_type = "Scrub" +# elif "R" in resynth_hint: +# resynth_type = "Replace" +# elif "N" in resynth_hint: +# resynth_type = None + +# if resynth_type == "Replace": +# self.one_pass_resynthesis(log_fn, scene_input_path, scene_output_path, +# output_basename, series_interpolater) +# elif resynth_type == "Clean" or resynth_type == "Scrub": +# one_pass_only = resynth_type == "Clean" +# self.two_pass_resynthesis(log_fn, scene_input_path, scene_output_path, +# output_basename, series_interpolater, +# one_pass_only=one_pass_only) +# else: +# # no need to resynthesize so just copy the files using the resequencer +# ResequenceFiles(scene_input_path, +# self.frame_format, +# "resynthesized_frame", +# 1, 1, +# 1, 0, +# -1, +# False, +# log_fn, +# output_path=scene_output_path).resequence() + +# Mtqdm().update_bar(bar) + +# def inflate_scenes(self, log_fn, kept_scenes, engine, engine_settings): +# interpolater = Interpolate(engine.model, log_fn) +# use_time_step = engine_settings["use_time_step"] +# deep_interpolater = DeepInterpolate(interpolater, use_time_step, log_fn) +# series_interpolater = InterpolateSeries(deep_interpolater, log_fn) + +# scenes_base_path = self.scenes_source_path(self.INFLATE_STEP) +# create_directory(self.inflation_path) + +# with Mtqdm().open_bar(total=len(kept_scenes), desc="Inflate") as bar: +# for scene_name in kept_scenes: +# scene_input_path = os.path.join(scenes_base_path, scene_name) +# scene_output_path = os.path.join(self.inflation_path, scene_name) +# create_directory(scene_output_path) + +# num_splits = 0 +# disable_inflation = False + +# project_splits = 0 +# if self.inflate: +# if self.inflate_by_option == "1X": +# project_splits = 0 +# if self.inflate_by_option == "2X": +# project_splits = 1 +# elif self.inflate_by_option == "4X": +# project_splits = 2 +# elif self.inflate_by_option == "8X": +# project_splits = 3 +# elif self.inflate_by_option == "16X": +# project_splits = 4 + +# # if it's for slow motion, the split should be relative to the +# # project inflation rate + +# hinted_splits = 0 +# force_inflation, force_audio, force_inflate_by, force_silent =\ +# self.compute_forced_inflation(scene_name) +# if force_inflation: +# if force_inflate_by == "1X": +# disable_inflation = True +# elif force_inflate_by == "2X": +# hinted_splits = 1 +# elif force_inflate_by == "4X": +# hinted_splits = 2 +# elif force_inflate_by == "8X": +# hinted_splits = 3 +# elif force_inflate_by == "16X": +# hinted_splits = 4 + +# if hinted_splits: +# if force_audio or force_silent: +# # the figures for audio slow motion are relative to the project split rate +# # splits are really exponents of 2^n +# num_splits = project_splits + hinted_splits +# else: +# # if not for slow motion, force an exact split +# num_splits = hinted_splits +# else: +# num_splits = 0 if disable_inflation else project_splits + +# if num_splits: +# # the scene needs inflating +# output_basename = "interpolated_frames" +# file_list = sorted(get_files(scene_input_path, extension=self.frame_format)) +# series_interpolater.interpolate_series(file_list, +# scene_output_path, +# num_splits, +# output_basename, +# type=self.frame_format) +# ResequenceFiles(scene_output_path, +# self.frame_format, +# "inflated_frame", +# 1, 1, +# 1, 0, +# -1, +# True, +# log_fn).resequence() +# else: +# # no need to inflate so just copy the files using the resequencer +# ResequenceFiles(scene_input_path, +# self.frame_format, +# "inflated_frame", +# 1, 1, +# 1, 0, +# -1, +# False, +# log_fn, +# output_path=scene_output_path).resequence() + +# Mtqdm().update_bar(bar) + +# def get_upscaler(self, log_fn, realesrgan_settings, remixer_settings): +# model_name = realesrgan_settings["model_name"] +# gpu_ids = realesrgan_settings["gpu_ids"] +# fp32 = realesrgan_settings["fp32"] + +# # determine if cropped image size is above memory threshold requiring tiling +# use_tiling_over = remixer_settings["use_tiling_over"] +# size = self.crop_w * self.crop_h + +# if size > use_tiling_over: +# tiling = realesrgan_settings["tiling"] +# tile_pad = realesrgan_settings["tile_pad"] +# else: +# tiling = 0 +# tile_pad = 0 +# return UpscaleSeries(model_name, gpu_ids, fp32, tiling, tile_pad, log_fn) + +# FIXED_UPSCALE_FACTOR = 4.0 +# TEMP_UPSCALE_PATH = "upscaled_frames" +# DEFAULT_DOWNSCALE_TYPE = "area" + +# def upscale_scene(self, +# log_fn, +# upscaler, +# scene_input_path, +# scene_output_path, +# upscale_factor, +# downscale_type=DEFAULT_DOWNSCALE_TYPE): +# log_fn(f"creating scene output path {scene_output_path}") +# create_directory(scene_output_path) + +# working_path = os.path.join(scene_output_path, self.TEMP_UPSCALE_PATH) +# log_fn(f"about to create working path {working_path}") +# create_directory(working_path) + +# # TODO make this logic general + +# # upscale first at the engine's native scale +# file_list = sorted(get_files(scene_input_path)) +# output_basename = "upscaled_frames" +# log_fn(f"about to upscale images to {working_path}") +# upscaler.upscale_series(file_list, working_path, self.FIXED_UPSCALE_FACTOR, output_basename, +# self.frame_format) + +# # get size of upscaled frames +# upscaled_files = sorted(get_files(working_path)) +# width, height = image_size(upscaled_files[0]) +# log_fn(f"size of upscaled images: {width} x {height}") + +# # compute downscale factor +# downscale_factor = self.FIXED_UPSCALE_FACTOR / upscale_factor +# log_fn(f"downscale factor is {downscale_factor}") + +# downscaled_width = int(width / downscale_factor) +# downscaled_height = int(height / downscale_factor) +# log_fn(f"size of downscaled images: {downscaled_width} x {downscaled_height}") + +# if downscaled_width != width or downscaled_height != height: +# # downsample to final size +# log_fn(f"about to downscale images in {working_path} to {scene_output_path}") +# ResizeFrames(scene_input_path, +# scene_output_path, +# downscaled_width, +# downscaled_height, +# downscale_type, +# log_fn).resize(type=self.frame_format) +# else: +# log_fn("copying instead of unneeded downscaling") +# copy_files(working_path, scene_output_path) + +# try: +# log_fn(f"about to delete working path {working_path}") +# shutil.rmtree(working_path) +# except OSError as error: +# log_fn(f"ignoring error deleting working path: {error}") + +# def upscale_factor_from_options(self) -> float: +# upscale_factor = 1.0 +# if self.upscale: +# if self.upscale_option == "2X": +# upscale_factor = 2.0 +# elif self.upscale_option == "3X": +# upscale_factor = 3.0 +# elif self.upscale_option == "4X": +# upscale_factor = 4.0 +# return upscale_factor + +# def upscale_scenes(self, log_fn, kept_scenes, realesrgan_settings, remixer_settings): +# upscaler = self.get_upscaler(log_fn, realesrgan_settings, remixer_settings) +# scenes_base_path = self.scenes_source_path(self.UPSCALE_STEP) +# downscale_type = remixer_settings["scale_type_down"] +# create_directory(self.upscale_path) + +# upscale_factor = self.upscale_factor_from_options() + +# with Mtqdm().open_bar(total=len(kept_scenes), desc="Upscale") as bar: +# for scene_name in kept_scenes: +# scene_input_path = os.path.join(scenes_base_path, scene_name) +# scene_output_path = os.path.join(self.upscale_path, scene_name) +# create_directory(scene_output_path) + +# upscale_handled = False +# upscale_hint = self.get_hint(self.scene_labels.get(scene_name), "U") + +# if upscale_hint and not self.upscale: +# # only apply the hint if not already upscaling, otherwise the +# # frames may have mismatched sizes +# try: +# # for now ignore the hint value and upscale just at 1X, to clean up zooming +# self.upscale_scene(log_fn, +# upscaler, +# scene_input_path, +# scene_output_path, +# 1.0, +# downscale_type=downscale_type) +# upscale_handled = True + +# except Exception as error: +# log_fn( +# f"Error in upscale_scenes() handling processing hint {upscale_hint} - skipping processing: {error}") +# upscale_handled = False + +# if not upscale_handled: +# if self.upscale: +# self.upscale_scene(log_fn, +# upscaler, +# scene_input_path, +# scene_output_path, +# upscale_factor, +# downscale_type=downscale_type) +# else: +# # no need to upscale so just copy the files using the resequencer +# ResequenceFiles(scene_input_path, +# self.frame_format, +# "upscaled_frames", +# 1, 1, +# 1, 0, +# -1, +# False, +# log_fn, +# output_path=scene_output_path).resequence() +# Mtqdm().update_bar(bar) + +# def remix_filename_suffix(self, extra_suffix): +# label = "remix" + +# if self.resize_chosen(): +# label += "-rc" if self.resize else "-rcH" +# else: +# label += "-or" + +# if self.resynthesize_chosen(): +# if self.resynthesize: +# label += "-re" +# if self.resynth_option == "Clean": +# label += "C" +# elif self.resynth_option == "Scrub": +# label += "S" +# elif self.resynth_option == "Replace": +# label += "R" +# else: +# label += "-reH" + +# if self.inflate_chosen(): +# if self.inflate: +# label += "-in" + self.inflate_by_option[0] +# if self.inflate_slow_option == "Audio": +# label += "SA" +# elif self.inflate_slow_option == "Silent": +# label += "SM" +# else: +# label += "-inH" + +# if self.upscale_chosen(): +# if self.upscale: +# label += "-up" + self.upscale_option[0] +# else: +# label += "-upH" + +# label += "-" + extra_suffix if extra_suffix else "" +# return label + +# def default_remix_filepath(self, extra_suffix=""): +# _, filename, _ = split_filepath(self.source_video) +# suffix = self.remix_filename_suffix(extra_suffix) +# return os.path.join(self.project_path, f"{filename}-{suffix}.mp4") + + ## Report ## def generate_remix_report(self, resize, resynthesize, inflate, upscale): report = Jot() @@ -2527,503 +2550,505 @@ def generate_remix_report(self, resize, resynthesize, inflate, upscale): return report.lines - # get path to the furthest processed content - def furthest_processed_path(self): - if self.upscale_chosen(): - path = self.upscale_path - elif self.inflate_chosen(): - path = self.inflation_path - elif self.resynthesize_chosen(): - path = self.resynthesis_path - elif self.resize_chosen(): - path = self.resize_path - else: - path = self.scenes_path - return path - - # drop a kept scene after scene compiling has already been done - # used for dropping empty processed scenes, and force dropping processed scenes - def drop_kept_scene(self, scene_name): - self.scene_states[scene_name] = "Drop" - current_path = os.path.join(self.scenes_path, scene_name) - dropped_path = os.path.join(self.dropped_scenes_path, scene_name) - if os.path.exists(current_path): - if not os.path.exists(dropped_path): - shutil.move(current_path, dropped_path) - else: - raise ValueError( - f"cannot move {current_path} to {dropped_path} which already exists") - - # find scenes that are empty now after processing and should be automatically dropped - # this can happen when resynthesis and/or inflation are used on scenes with only a few frames - def drop_empty_processed_scenes(self, kept_scenes): - scenes_base_path = self.furthest_processed_path() - with Mtqdm().open_bar(total=len(kept_scenes), desc="Checking Clips") as bar: - for scene_name in kept_scenes: - scene_input_path = os.path.join(scenes_base_path, scene_name) - files = get_files(scene_input_path) - if len(files) == 0: - self.drop_kept_scene(scene_name) - Mtqdm().update_bar(bar) - - def delete_processed_clip(self, path, scene_name): - removed = [] - if path and os.path.exists(path): - files = get_files(path) - # some clips are formatted like "original_namee[000-999].ext", - # and some like "000-000.ext" - # TODO resequence audio clips and thumbnails to make the naming consistent - for file in files: - if file.find(scene_name) != -1: - os.remove(file) - removed.append(file) - return removed - - # TODO the last three paths in the list won't have scene name directories but instead files - # also it should delete the audio wav file if found since that isn't deleted each save - # drop an already-processed scene to cut it from the remix video - def force_drop_processed_scene(self, scene_index): - scene_name = self.scene_names[scene_index] - self.drop_kept_scene(scene_name) - removed = [] - purge_dirs = [] - for path in [ - self.resize_path, - self.resynthesis_path, - self.inflation_path, - self.upscale_path, - self.video_clips_path, - self.audio_clips_path, - self.clips_path - ]: - content_path = os.path.join(path, scene_name) - if os.path.exists(content_path): - purge_dirs.append(content_path) - purge_root = self.purge_paths(purge_dirs) - removed += purge_dirs - - if purge_root: - self.copy_project_file(purge_root) - - # audio clips aren't cleaned each time a remix is saved - # clean now to ensure the dropped scene audio clip is removed - self.clean_remix_content(purge_from="audio_clips") - - # TODO this didn't ever work - # if self.audio_clips_path: - # self.audio_clips = sorted(get_files(self.audio_clips_path)) - - return removed - - AUDIO_CLIPS_PATH = "AUDIO" - - def create_audio_clips(self, log_fn, global_options, audio_format): - self.audio_clips_path = os.path.join(self.clips_path, self.AUDIO_CLIPS_PATH) - create_directory(self.audio_clips_path) - # save the project now to preserve the newly established path - self.save() - - edge_trim = 1 if self.resynthesize else 0 - SliceVideo(self.source_audio, - self.project_fps, - self.scenes_path, - self.audio_clips_path, - 0.0, - audio_format, - 0, - 1, - edge_trim, - False, - 0.0, - 0.0, - log_fn, - global_options=global_options).slice() - self.audio_clips = sorted(get_files(self.audio_clips_path)) - - VIDEO_CLIPS_PATH = "VIDEO" - - def compute_inflated_fps(self, force_inflation, force_audio, force_inflate_by, force_silent): - _, audio_slow_motion, silent_slow_motion, project_inflation_rate, forced_inflated_rate = \ - self.compute_effective_slow_motion(force_inflation, force_audio, force_inflate_by, - force_silent) - if audio_slow_motion or silent_slow_motion: - fps_factor = project_inflation_rate - else: - if force_inflation: - fps_factor = forced_inflated_rate - else: - fps_factor = project_inflation_rate - return self.project_fps * fps_factor - - def compute_forced_inflation(self, scene_name): - force_inflation = False - force_audio = False - force_inflate_by = None - force_silent = False - - inflation_hint = self.get_hint(self.scene_labels.get(scene_name), "I") - if inflation_hint: - if "16" in inflation_hint: - force_inflation = True - force_inflate_by = "16X" - elif "1" in inflation_hint: - force_inflation = True - force_inflate_by = "1X" - elif "2" in inflation_hint: - force_inflation = True - force_inflate_by = "2X" - elif "4" in inflation_hint: - force_inflation = True - force_inflate_by = "4X" - elif "8" in inflation_hint: - force_inflation = True - force_inflate_by = "8X" - - if "A" in inflation_hint: - force_audio = True - elif "S" in inflation_hint: - force_silent = True - # else "N" for no slow motion - return force_inflation, force_audio, force_inflate_by, force_silent - - def compute_scene_fps(self, scene_name): - force_inflation, force_audio, force_inflate_by, force_silent =\ - self.compute_forced_inflation(scene_name) - - return self.compute_inflated_fps(force_inflation, - force_audio, - force_inflate_by, - force_silent) - - def create_video_clips(self, log_fn, kept_scenes, global_options): - self.video_clips_path = os.path.join(self.clips_path, self.VIDEO_CLIPS_PATH) - create_directory(self.video_clips_path) - # save the project now to preserve the newly established path - self.save() - - scenes_base_path = self.furthest_processed_path() - with Mtqdm().open_bar(total=len(kept_scenes), desc="Video Clips") as bar: - for scene_name in kept_scenes: - scene_input_path = os.path.join(scenes_base_path, scene_name) - scene_output_filepath = os.path.join(self.video_clips_path, f"{scene_name}.mp4") - - video_clip_fps = self.compute_scene_fps(scene_name) - - ResequenceFiles(scene_input_path, - self.frame_format, - "processed_frame", - 1, - 1, - 1, - 0, - -1, - True, - log_fn).resequence() - - PNGtoMP4(scene_input_path, - None, - video_clip_fps, - scene_output_filepath, - crf=self.output_quality, - global_options=global_options, - type=self.frame_format) - Mtqdm().update_bar(bar) - - self.video_clips = sorted(get_files(self.video_clips_path)) - - def inflation_rate(self, inflate_by : str): - if not inflate_by: - return 1 - return int(inflate_by[:-1]) - - def compute_effective_slow_motion(self, force_inflation, force_audio, force_inflate_by, - force_silent): - - audio_slow_motion = force_audio or (self.inflate and self.inflate_slow_option == "Audio") - silent_slow_motion = force_silent or (self.inflate and self.inflate_slow_option == "Silent") - - project_inflation_rate = self.inflation_rate(self.inflate_by_option) if self.inflate else 1 - forced_inflation_rate = self.inflation_rate(force_inflate_by) if force_inflation else 1 - - # For slow motion hints, interpret the 'force_inflate_by' as relative to the project rate - # If the forced inflation rate is 1 it means no inflation, not even at the projecr fate - if audio_slow_motion or silent_slow_motion: - if forced_inflation_rate != 1: - forced_inflation_rate *= project_inflation_rate - - motion_factor = forced_inflation_rate / project_inflation_rate - return motion_factor, audio_slow_motion, silent_slow_motion, project_inflation_rate, \ - forced_inflation_rate - - def compute_inflated_audio_options(self, custom_audio_options, force_inflation, force_audio, - force_inflate_by, force_silent): - - motion_factor, audio_slow_motion, silent_slow_motion, _, _ = \ - self.compute_effective_slow_motion(force_inflation, force_audio, force_inflate_by, - force_silent) - - audio_motion_factor = motion_factor - - if audio_slow_motion: - if audio_motion_factor == 8: - output_options = '-filter:a "atempo=0.5,atempo=0.5,atempo=0.5" -c:v copy -shortest ' \ - + custom_audio_options - elif audio_motion_factor == 4: - output_options = '-filter:a "atempo=0.5,atempo=0.5" -c:v copy -shortest ' \ - + custom_audio_options - elif audio_motion_factor == 2: - output_options = '-filter:a "atempo=0.5" -c:v copy -shortest ' + custom_audio_options - elif audio_motion_factor == 1: - output_options = '-filter:a "atempo=1.0" -c:v copy -shortest ' + custom_audio_options - elif audio_motion_factor == 0.5: - output_options = '-filter:a "atempo=2.0" -c:v copy -shortest ' + custom_audio_options - elif audio_motion_factor == 0.25: - output_options = '-filter:a "atempo=2.0,atempo=2.0" -c:v copy -shortest ' \ - + custom_audio_options - elif audio_motion_factor == 0.125: - output_options = '-filter:a "atempo=2.0,atempo=2.0,atempo=2.0" -c:v copy -shortest ' \ - + custom_audio_options - else: - raise ValueError(f"audio_motion_factor {audio_motion_factor} is not supported") - elif silent_slow_motion: - # check for an existing audio sample rate, so the silent footage will blend properly - # with non-silent footage, otherwise there may be an audio/video data length mismatch - sample_rate = self.video_details.get("sample_rate", "48000") - output_options = \ - '-f lavfi -i anullsrc -ac 2 -ar ' + sample_rate + ' -map 0:v:0 -map 2:a:0 -c:v copy -shortest ' \ - + custom_audio_options - else: - output_options = custom_audio_options - - return output_options - - def create_scene_clips(self, log_fn, kept_scenes, global_options): - if self.video_details["has_audio"]: - with Mtqdm().open_bar(total=len(kept_scenes), desc="Remix Clips") as bar: - for index, scene_name in enumerate(kept_scenes): - scene_video_path = self.video_clips[index] - scene_audio_path = self.audio_clips[index] - scene_output_filepath = os.path.join(self.clips_path, f"{scene_name}.mp4") - - force_inflation, force_audio, force_inflate_by, force_silent =\ - self.compute_forced_inflation(scene_name) - - output_options = self.compute_inflated_audio_options("-c:a aac -shortest ", - force_inflation, - force_audio, - force_inflate_by, - force_silent) - combine_video_audio(scene_video_path, - scene_audio_path, - scene_output_filepath, - global_options=global_options, - output_options=output_options) - Mtqdm().update_bar(bar) - self.clips = sorted(get_files(self.clips_path)) - else: - self.clips = sorted(get_files(self.video_clips_path)) - - def create_custom_video_clips(self, - log_fn, - kept_scenes, - global_options, - custom_video_options, - custom_ext, - draw_text_options=None): - self.video_clips_path = os.path.join(self.clips_path, self.VIDEO_CLIPS_PATH) - create_directory(self.video_clips_path) - # save the project now to preserve the newly established path - self.save() - - scenes_base_path = self.furthest_processed_path() - if custom_video_options.find("