├── .github
├── test
│ └── .gitkeep
└── workflows
│ └── publish_action.yml
├── requirements.txt
├── nodes
├── convert
│ ├── FloatToInt.py
│ ├── FloatsToWeightsStrategy.py
│ └── MaskToFloat.py
├── utils
│ ├── InvertFloats.py
│ ├── RepeatImageToCount.py
│ └── FloatsVisualizer.py
└── audio
│ ├── AudioPromptSchedule.py
│ ├── AudioAnimateDiffSchedule.py
│ ├── AudioControlNetSchedule.py
│ ├── LoadAudioSeparationModel.py
│ ├── AudioPeaksDetection.py
│ ├── EditAudioWeights.py
│ ├── AudioIPAdapterTransitions.py
│ ├── AudioRemixer.py
│ └── AudioAnalysis.py
├── pyproject.toml
├── .gitignore
├── web
└── js
│ ├── appearance.js
│ └── help_popup.js
├── __init__.py
├── node_configs.py
├── README.md
├── yvann_web_async
├── purify.min.js
├── svg-path-properties.min.js
└── marked.min.js
└── LICENCE
/.github/test/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | torch
3 | openunmix
4 | matplotlib
5 | pillow
6 | torchaudio
7 | termcolor
--------------------------------------------------------------------------------
/.github/workflows/publish_action.yml:
--------------------------------------------------------------------------------
1 | name: Publish to Comfy registry
2 | on:
3 | workflow_dispatch:
4 | push:
5 | branches:
6 | - main
7 | paths:
8 | - "pyproject.toml"
9 |
10 | jobs:
11 | publish-node:
12 | name: Publish Custom Node to registry
13 | runs-on: ubuntu-latest
14 | steps:
15 | - name: Check out code
16 | uses: actions/checkout@v4
17 | - name: Publish Custom Node
18 | uses: Comfy-Org/publish-node-action@main
19 | with:
20 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }}
21 |
--------------------------------------------------------------------------------
/nodes/convert/FloatToInt.py:
--------------------------------------------------------------------------------
1 | from ... import Yvann
2 | import numpy as np
3 |
4 | class ConvertNodeBase(Yvann):
5 | CATEGORY = "👁️ Yvann Nodes/🔄 Convert"
6 |
7 | class FloatToInt(ConvertNodeBase):
8 | @classmethod
9 | def INPUT_TYPES(cls):
10 | return {
11 | "required": {
12 | "float": ("FLOAT", {"forceInput": True}),
13 | }
14 | }
15 | RETURN_TYPES = ("INT",)
16 | RETURN_NAMES = ("int",)
17 | FUNCTION = "convert_floats_to_ints"
18 |
19 | def convert_floats_to_ints(self, float):
20 |
21 | floats_array = np.array(float)
22 |
23 | ints_array = np.round(floats_array)
24 |
25 | ints_array = ints_array.astype(int)
26 | integers = ints_array.tolist()
27 |
28 | return (integers,)
29 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "comfyui_yvann-nodes"
3 | description = "Audio Reactive nodes for AI animations 🔊 Analyze audio, extract drums, bass, vocals. Compatible with IPAdapter, ControlNets, AnimateDiff... Generate reactive masks and weights. Create audio-driven visuals. Produce weight graphs and audio masks. Ideal for music videos and reactive animations. Features audio scheduling and waveform analysis"
4 | version = "2.0.2"
5 | license = {file = "LICENSE"}
6 | dependencies = ["openunmix", "numpy", "torch", "matplotlib", "pillow", "scipy.signal"]
7 |
8 | [project.urls]
9 | Repository = "https://github.com/yvann-ba/ComfyUI_Yvann-Nodes"
10 | # Used by Comfy Registry https://comfyregistry.org
11 |
12 | [tool.comfy]
13 | PublisherId = "yvann"
14 | DisplayName = "ComfyUI_Yvann-Nodes"
15 | Icon = ""
16 |
--------------------------------------------------------------------------------
/nodes/convert/FloatsToWeightsStrategy.py:
--------------------------------------------------------------------------------
1 | from ... import Yvann
2 |
3 | class ConvertNodeBase(Yvann):
4 | CATEGORY = "👁️ Yvann Nodes/🔄 Convert"
5 |
6 | class FloatsToWeightsStrategy(ConvertNodeBase):
7 | @classmethod
8 | def INPUT_TYPES(cls):
9 | return {
10 | "required": {
11 | "floats": ("FLOAT", {"forceInput": True}),
12 | }
13 | }
14 | RETURN_TYPES = ("WEIGHTS_STRATEGY",)
15 | RETURN_NAMES = ("WEIGHTS_STRATEGY",)
16 | FUNCTION = "convert"
17 |
18 | def convert(self, floats):
19 | frames = len(floats)
20 | weights_str = ", ".join(map(lambda x: f"{x:.3f}", floats))
21 |
22 | weights_strategy = {
23 | "weights": weights_str,
24 | "timing": "custom",
25 | "frames": frames,
26 | "start_frame": 0,
27 | "end_frame": frames,
28 | "add_starting_frames": 0,
29 | "add_ending_frames": 0,
30 | "method": "full batch",
31 | "frame_count": frames,
32 | }
33 | return (weights_strategy,)
34 |
--------------------------------------------------------------------------------
/nodes/utils/InvertFloats.py:
--------------------------------------------------------------------------------
1 | from ... import Yvann
2 | import numpy as np
3 |
4 |
5 | class UtilsNodeBase(Yvann):
6 | CATEGORY = "👁️ Yvann Nodes/🛠️ Utils"
7 |
8 | class InvertFloats(UtilsNodeBase):
9 | @classmethod
10 | def INPUT_TYPES(cls):
11 | return {
12 | "required": {
13 | "floats": ("FLOAT", {"forceInput": True}),
14 | }
15 | }
16 | RETURN_TYPES = ("FLOAT",)
17 | RETURN_NAMES = ("inverted_floats",)
18 | FUNCTION = "invert_floats"
19 |
20 | def invert_floats(self, floats):
21 | floats_array = np.array(floats)
22 | min_value = floats_array.min()
23 | max_value = floats_array.max()
24 |
25 | # Invert the values relative to the range midpoint
26 | range_midpoint = (max_value + min_value) / 2.0
27 | floats_invert_array = (2 * range_midpoint) - floats_array
28 | floats_invert_array = np.round(floats_invert_array, decimals=6)
29 |
30 | # Convert back to list
31 | floats_invert = floats_invert_array.tolist()
32 |
33 | return (floats_invert,)
34 |
--------------------------------------------------------------------------------
/nodes/utils/RepeatImageToCount.py:
--------------------------------------------------------------------------------
1 | from ... import Yvann
2 | import torch
3 | class UtilsNodeBase(Yvann):
4 | CATEGORY = "👁️ Yvann Nodes/🛠️ Utils"
5 |
6 | class RepeatImageToCount(UtilsNodeBase):
7 | @classmethod
8 | def INPUT_TYPES(cls):
9 | return {
10 | "required": {
11 | "image": ("IMAGE", {"forceInput": True}),
12 | "count": ("INT", {"default": 1, "min": 1}),
13 | }
14 | }
15 | RETURN_TYPES = ("IMAGE",)
16 | RETURN_NAMES = ("images",)
17 | FUNCTION = "repeat_image_to_count"
18 |
19 | def repeat_image_to_count(self, image, count):
20 | num_images = image.size(0) # Number of images in the input batch
21 |
22 | # Create indices to select images from input batch
23 | indices = [i % num_images for i in range(count)] # Cycle through images to reach the desired count
24 |
25 | # Select images using the computed indices
26 | images = image[indices]
27 | return (images,)
--------------------------------------------------------------------------------
/nodes/convert/MaskToFloat.py:
--------------------------------------------------------------------------------
1 | from ... import Yvann
2 |
3 | # To do the opposite (Float To Mask), you can install the node pack "ComfyUI-KJNodes"
4 | class ConvertNodeBase(Yvann):
5 | CATEGORY = "👁️ Yvann Nodes/🔄 Convert"
6 |
7 | class MaskToFloat(ConvertNodeBase):
8 | @classmethod
9 | def INPUT_TYPES(cls):
10 | return {
11 | "required": {
12 | "mask": ("MASK", {"forceInput": True}),
13 | },
14 | }
15 | RETURN_TYPES = ("FLOAT",)
16 | RETURN_NAMES = ("float",)
17 | FUNCTION = "mask_to_float"
18 |
19 | def mask_to_float(self, mask):
20 | import torch
21 |
22 | # Ensure mask is a torch.Tensor
23 | if not isinstance(mask, torch.Tensor):
24 | raise ValueError("Input 'mask' must be a torch.Tensor")
25 |
26 | # Handle case where mask may have shape [H, W] instead of [B, H, W]
27 | if mask.dim() == 2:
28 | mask = mask.unsqueeze(0) # Add batch dimension
29 |
30 | # mask has shape [B, H, W]
31 | batch_size = mask.shape[0]
32 | output_values = []
33 |
34 | for i in range(batch_size):
35 | single_mask = mask[i] # shape [H, W]
36 | mean_value = round(single_mask.mean().item(), 6) # Compute mean pixel value
37 | output_values.append(mean_value)
38 |
39 | return (output_values,)
40 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Python cache files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 | **/*.pyc
6 | **/__pycache__
7 | **/.pytest_cache
8 |
9 |
10 | audio_learn.ipynb
11 | # C extensions
12 | *.so
13 | readme.md
14 | # Distribution / packaging
15 | .Python
16 | build/
17 | develop-eggs/
18 | dist/
19 | downloads/
20 | eggs/
21 | .eggs/
22 | lib/
23 | lib64/
24 | parts/
25 | sdist/
26 | var/
27 | wheels/
28 | *.egg-info/
29 | .installed.cfg
30 | *.egg
31 |
32 | # PyInstaller
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | .hypothesis/
50 |
51 | # Jupyter Notebook
52 | .ipynb_checkpoints
53 |
54 | # pyenv
55 | .python-version
56 |
57 | # Environment directories
58 | .env
59 | .venv
60 | env/
61 | venv/
62 | ENV/
63 |
64 | # Spyder project settings
65 | .spyderproject
66 | .spyproject
67 |
68 | # Rope project settings
69 | .ropeproject
70 |
71 | # mkdocs documentation
72 | /site
73 |
74 | # mypy
75 | .mypy_cache/
76 |
77 | # IDE settings
78 | .vscode/
79 | .idea/
80 |
81 | # OS generated files
82 | .DS_Store
83 | .DS_Store?
84 | ._*
85 | .Spotlight-V100
86 | .Trashes
87 | ehthumbs.db
88 | Thumbs.db
89 |
90 | # ComfyUI specific (adjust as needed)
91 | # output/
92 | # input/
93 | # models/
--------------------------------------------------------------------------------
/nodes/audio/AudioPromptSchedule.py:
--------------------------------------------------------------------------------
1 | from ... import Yvann
2 |
3 | class AudioNodeBase(Yvann):
4 | CATEGORY = "👁️ Yvann Nodes/🔊 Audio"
5 |
6 | class AudioPromptSchedule(AudioNodeBase):
7 | @classmethod
8 | def INPUT_TYPES(cls):
9 | return {
10 | "required": {
11 | "peaks_index": ("STRING", {"forceInput": True}),
12 | "prompts": ("STRING", {"default": "", "multiline": True}),
13 | }
14 | }
15 |
16 | RETURN_TYPES = ("STRING",)
17 | RETURN_NAMES = ("prompt_schedule",)
18 | FUNCTION = "create_prompt_schedule"
19 |
20 | def create_prompt_schedule(self, peaks_index, prompts=""):
21 | switch_index = peaks_index
22 | if isinstance(switch_index, str):
23 | switch_index = [int(idx.strip()) for idx in peaks_index.split(",")]
24 | else:
25 | switch_index = [int(idx) for idx in switch_index]
26 |
27 | # Parse the prompts, split by newline, and remove empty lines
28 | prompts_list = [p.strip() for p in prompts.split("\n") if p.strip() != ""]
29 |
30 | # Ensure the number of prompts matches the number of indices by looping prompts
31 | num_indices = len(switch_index)
32 | num_prompts = len(prompts_list)
33 |
34 | if num_prompts > 0:
35 | # Loop prompts to match the number of indices
36 | extended_prompts = []
37 | while len(extended_prompts) < num_indices:
38 | for p in prompts_list:
39 | extended_prompts.append(p)
40 | if len(extended_prompts) == num_indices:
41 | break
42 | prompts_list = extended_prompts
43 | else:
44 | # If no prompts provided, fill with empty strings
45 | prompts_list = [""] * num_indices
46 |
47 | # Create the formatted prompt schedule string
48 | out = ""
49 | for idx, frame in enumerate(switch_index):
50 | out += f"\"{frame}\": \"{prompts_list[idx]}\",\n"
51 |
52 | return (out,)
53 |
--------------------------------------------------------------------------------
/web/js/appearance.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../../scripts/app.js";
2 |
3 | const COLOR_THEMES = {
4 | blue: { nodeColor: "#153a61", nodeBgColor: "#1A4870" },
5 | };
6 |
7 | const NODE_COLORS = {
8 | "Audio Analysis": "blue",
9 | "Audio IPAdapter Transitions": "blue",
10 | "Audio Prompt Schedule": "blue",
11 | "Audio Peaks Detection": "blue",
12 | "Audio Remixer": "blue",
13 | "Edit Audio Weights": "blue",
14 | "Audio ControlNet Schedule": "blue",
15 | "Load Audio Separation Model": "blue",
16 | "Floats To Weights Strategy": "blue",
17 | "Invert Floats": "blue",
18 | "Floats Visualizer": "blue",
19 | "Mask To Float": "blue",
20 | "Repeat Image To Count": "blue",
21 | "Float to Int": "blue"
22 | };
23 |
24 | function shuffleArray(array) {
25 | for (let i = array.length - 1; i > 0; i--) {
26 | const j = Math.floor(Math.random() * (i + 1));
27 | [array[i], array[j]] = [array[j], array[i]]; // Swap elements
28 | }
29 | }
30 |
31 | let colorKeys = Object.keys(COLOR_THEMES).filter(key => key !== "none");
32 | shuffleArray(colorKeys); // Shuffle the color themes initially
33 |
34 | function setNodeColors(node, theme) {
35 | if (!theme) { return; }
36 | node.shape = "box";
37 | if (theme.nodeColor && theme.nodeBgColor) {
38 | node.color = theme.nodeColor;
39 | node.bgcolor = theme.nodeBgColor;
40 | }
41 | }
42 |
43 | const ext = {
44 | name: "Yvann.appearance",
45 |
46 | nodeCreated(node) {
47 | const nclass = node.comfyClass;
48 | if (NODE_COLORS.hasOwnProperty(nclass)) {
49 | let colorKey = NODE_COLORS[nclass];
50 |
51 | if (colorKey === "random") {
52 | // Check for a valid color key before popping
53 | if (colorKeys.length === 0 || !COLOR_THEMES[colorKeys[colorKeys.length - 1]]) {
54 | colorKeys = Object.keys(COLOR_THEMES).filter(key => key !== "none");
55 | shuffleArray(colorKeys);
56 | }
57 | colorKey = colorKeys.pop();
58 | }
59 |
60 | const theme = COLOR_THEMES[colorKey];
61 | setNodeColors(node, theme);
62 | }
63 | }
64 | };
65 |
66 | app.registerExtension(ext);
67 |
--------------------------------------------------------------------------------
/nodes/audio/AudioAnimateDiffSchedule.py:
--------------------------------------------------------------------------------
1 | from ... import Yvann
2 | import numpy as np
3 |
4 | class AudioNodeBase(Yvann):
5 | CATEGORY = "👁️ Yvann Nodes/🔊 Audio"
6 |
7 | class AudioAnimateDiffSchedule(AudioNodeBase):
8 | @classmethod
9 | def INPUT_TYPES(cls):
10 | return {
11 | "required": {
12 | "any_audio_weights": ("FLOAT", {"forceInput": True}),
13 | "smooth": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
14 | "min_range": ("FLOAT", {"default": 0.95, "min": 0.8, "max": 1.49, "step": 0.01}),
15 | "max_range": ("FLOAT", {"default": 1.25, "min": 0.81, "max": 1.5, "step": 0.01}),
16 | }
17 | }
18 |
19 | RETURN_TYPES = ("FLOAT",)
20 | RETURN_NAMES = ("float_val",)
21 | FUNCTION = "process_any_audio_weights"
22 |
23 | CATEGORY = "👁️ Yvann Nodes/🔊 Audio"
24 |
25 | def process_any_audio_weights(self, any_audio_weights, smooth, min_range, max_range):
26 | if not isinstance(any_audio_weights, (list, np.ndarray)):
27 | print("Invalid any_audio_weights input")
28 | return None
29 |
30 | any_audio_weights = np.array(any_audio_weights, dtype=np.float32)
31 |
32 | # Apply smoothing
33 | smoothed_signal = np.zeros_like(any_audio_weights)
34 | for i in range(len(any_audio_weights)):
35 | if i == 0:
36 | smoothed_signal[i] = any_audio_weights[i]
37 | else:
38 | smoothed_signal[i] = smoothed_signal[i-1] * smooth + any_audio_weights[i] * (1 - smooth)
39 |
40 | # Normalize the smoothed signal
41 | min_val = np.min(smoothed_signal)
42 | max_val = np.max(smoothed_signal)
43 | if max_val - min_val != 0:
44 | normalized_signal = (smoothed_signal - min_val) / (max_val - min_val)
45 | else:
46 | normalized_signal = smoothed_signal - min_val # All values are the same
47 |
48 | # Rescale to specified range
49 | rescaled_signal = normalized_signal * (max_range - min_range) + min_range
50 | rescaled_signal.tolist()
51 | rounded_rescaled_signal = [round(elem, 6) for elem in rescaled_signal]
52 |
53 | return (rounded_rescaled_signal,)
54 |
--------------------------------------------------------------------------------
/nodes/audio/AudioControlNetSchedule.py:
--------------------------------------------------------------------------------
1 | from ... import Yvann
2 | import numpy as np
3 |
4 | class AudioNodeBase(Yvann):
5 | CATEGORY = "👁️ Yvann Nodes/🔊 Audio"
6 |
7 | class AudioControlNetSchedule(AudioNodeBase):
8 | @classmethod
9 | def INPUT_TYPES(cls):
10 | return {
11 | "required": {
12 | "any_audio_weights": ("FLOAT", {"forceInput": True}),
13 | "smooth": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
14 | "min_range": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 0.99, "step": 0.01}),
15 | "max_range": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 1.0, "step": 0.01}),
16 | }
17 | }
18 |
19 | RETURN_TYPES = ("FLOAT",)
20 | RETURN_NAMES = ("processed_weights",)
21 | FUNCTION = "process_any_audio_weights"
22 |
23 | CATEGORY = "👁️ Yvann Nodes/🔊 Audio"
24 |
25 | def process_any_audio_weights(self, any_audio_weights, smooth, min_range, max_range):
26 | if not isinstance(any_audio_weights, (list, np.ndarray)):
27 | print("Invalid any_audio_weights input")
28 | return None
29 |
30 | any_audio_weights = np.array(any_audio_weights, dtype=np.float32)
31 |
32 | # Apply smoothing
33 | smoothed_signal = np.zeros_like(any_audio_weights)
34 | for i in range(len(any_audio_weights)):
35 | if i == 0:
36 | smoothed_signal[i] = any_audio_weights[i]
37 | else:
38 | smoothed_signal[i] = smoothed_signal[i-1] * smooth + any_audio_weights[i] * (1 - smooth)
39 |
40 | # Normalize the smoothed signal
41 | min_val = np.min(smoothed_signal)
42 | max_val = np.max(smoothed_signal)
43 | if max_val - min_val != 0:
44 | normalized_signal = (smoothed_signal - min_val) / (max_val - min_val)
45 | else:
46 | normalized_signal = smoothed_signal - min_val # All values are the same
47 |
48 | # Rescale to specified range
49 | rescaled_signal = normalized_signal * (max_range - min_range) + min_range
50 | rescaled_signal.tolist()
51 | rounded_rescaled_signal = [round(elem, 6) for elem in rescaled_signal]
52 |
53 | return (rounded_rescaled_signal,)
54 |
55 |
--------------------------------------------------------------------------------
/nodes/audio/LoadAudioSeparationModel.py:
--------------------------------------------------------------------------------
1 | import os
2 | import folder_paths
3 | import torch
4 | from torchaudio.pipelines import HDEMUCS_HIGH_MUSDB_PLUS
5 | from typing import Any
6 | from ... import Yvann
7 | import comfy.model_management as mm
8 |
9 |
10 | class AudioNodeBase(Yvann):
11 | CATEGORY = "👁️ Yvann Nodes/🔊 Audio"
12 |
13 | class LoadAudioSeparationModel(AudioNodeBase):
14 | audio_models = ["Hybrid Demucs", "Open-Unmix"]
15 |
16 | @classmethod
17 | def INPUT_TYPES(cls) -> dict[str, dict[str, tuple]]:
18 | return {
19 | "required": {
20 | "model": (cls.audio_models,),
21 | }
22 | }
23 |
24 | RETURN_TYPES = ("AUDIO_SEPARATION_MODEL",)
25 | RETURN_NAMES = ("audio_sep_model",)
26 | FUNCTION = "main"
27 |
28 | def load_OpenUnmix(self, model):
29 | device = mm.get_torch_device()
30 | download_path = os.path.join(folder_paths.models_dir, "openunmix")
31 | os.makedirs(download_path, exist_ok=True)
32 |
33 | model_file = "umxl.pth"
34 | model_path = os.path.join(download_path, model_file)
35 |
36 | if not os.path.exists(model_path):
37 | print(f"Downloading {model} model...")
38 | try:
39 | separator = torch.hub.load('sigsep/open-unmix-pytorch', 'umxl', device='cpu')
40 | except RuntimeError as e:
41 | print(f"Failed to download model : {e}")
42 | return None
43 | torch.save(separator.state_dict(), model_path)
44 | print(f"Model saved to: {model_path}")
45 | else:
46 | print(f"Loading model from: {model_path}")
47 | separator = torch.hub.load('sigsep/open-unmix-pytorch', 'umxl', device='cpu')
48 | separator.load_state_dict(torch.load(model_path, map_location='cpu'))
49 |
50 | separator = separator.to(device)
51 | separator.eval()
52 |
53 | return (separator,)
54 |
55 | def load_HDemucs(self):
56 |
57 | device = mm.get_torch_device()
58 | bundle: Any = HDEMUCS_HIGH_MUSDB_PLUS
59 | print("Hybrid Demucs model is loaded")
60 | model_info = {
61 | "demucs": True,
62 | "model": bundle.get_model().to(device),
63 | "sample_rate": bundle.sample_rate
64 | }
65 | return (model_info,)
66 |
67 |
68 | def main(self, model):
69 |
70 | if model == "Open-Unmix":
71 | return (self.load_OpenUnmix(model))
72 | else:
73 | return (self.load_HDemucs())
--------------------------------------------------------------------------------
/nodes/utils/FloatsVisualizer.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import matplotlib.pyplot as plt
3 | import tempfile
4 | import numpy as np
5 | from PIL import Image
6 | from ... import Yvann
7 |
8 | class UtilsNodeBase(Yvann):
9 | CATEGORY = "👁️ Yvann Nodes/🛠️ Utils"
10 |
11 | class FloatsVisualizer(UtilsNodeBase):
12 | # Define class variables for line styles and colors
13 | line_styles = ["-", "--", "-."]
14 | line_colors = ["blue", "green", "red"]
15 |
16 | @classmethod
17 | def INPUT_TYPES(cls):
18 | return {
19 | "required": {
20 | "floats": ("FLOAT", {"forceInput": True}),
21 | "title": ("STRING", {"default": "Graph"}),
22 | "x_label": ("STRING", {"default": "X-Axis"}),
23 | "y_label": ("STRING", {"default": "Y-Axis"}),
24 | },
25 | "optional": {
26 | "floats_optional1": ("FLOAT", {"forceInput": True}),
27 | "floats_optional2": ("FLOAT", {"forceInput": True}),
28 | }
29 | }
30 |
31 | RETURN_TYPES = ("IMAGE",)
32 | RETURN_NAMES = ("visual_graph",)
33 | FUNCTION = "floats_to_graph"
34 |
35 | def floats_to_graph(self, floats, title="Graph", x_label="X-Axis", y_label="Y-Axis",
36 | floats_optional1=None, floats_optional2=None):
37 |
38 | try:
39 | # Create a list of tuples containing (label, data)
40 | floats_list = [("floats", floats)]
41 | if floats_optional1 is not None:
42 | floats_list.append(("floats_optional1", floats_optional1))
43 | if floats_optional2 is not None:
44 | floats_list.append(("floats_optional2", floats_optional2))
45 |
46 | # Convert all floats to NumPy arrays and ensure they are the same length
47 | processed_floats_list = []
48 | min_length = None
49 | for label, floats_data in floats_list:
50 | if isinstance(floats_data, list):
51 | floats_array = np.array(floats_data)
52 | elif isinstance(floats_data, torch.Tensor):
53 | floats_array = floats_data.cpu().numpy()
54 | else:
55 | raise ValueError(f"Unsupported type for '{label}' input")
56 | if min_length is None or len(floats_array) < min_length:
57 | min_length = len(floats_array)
58 | processed_floats_list.append((label, floats_array))
59 |
60 | # Truncate all arrays to the minimum length to match x-axis
61 | processed_floats_list = [
62 | (label, floats_array[:min_length]) for label, floats_array in processed_floats_list
63 | ]
64 |
65 | # Create the plot
66 | figsize = 12.0
67 | plt.figure(figsize=(figsize, figsize * 0.6), facecolor='white')
68 |
69 | x_values = range(min_length) # Use the minimum length
70 |
71 | for idx, (label, floats_array) in enumerate(processed_floats_list):
72 | color = self.line_colors[idx % len(self.line_colors)]
73 | style = self.line_styles[idx % len(self.line_styles)]
74 | plt.plot(x_values, floats_array, label=label, color=color, linestyle=style)
75 |
76 | plt.title(title)
77 | plt.xlabel(x_label)
78 | plt.ylabel(y_label)
79 | plt.grid(True)
80 | plt.legend()
81 |
82 | # Save the plot to a temporary file
83 | with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmpfile:
84 | plt.savefig(tmpfile.name, format='png', bbox_inches='tight')
85 | tmpfile_path = tmpfile.name
86 | plt.close()
87 |
88 | # Load the image and convert to tensor
89 | visualization = Image.open(tmpfile_path).convert("RGB")
90 | visualization = np.array(visualization).astype(np.float32) / 255.0
91 | visualization = torch.from_numpy(visualization).unsqueeze(0) # Shape: [1, C, H, W]
92 |
93 | except Exception as e:
94 | print(f"Error in creating visualization: {e}")
95 | visualization = None
96 |
97 | return (visualization,)
98 |
--------------------------------------------------------------------------------
/nodes/audio/AudioPeaksDetection.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import matplotlib.pyplot as plt
3 | from matplotlib.ticker import MaxNLocator # Import for integer x-axis labels
4 | import tempfile
5 | import numpy as np
6 | from PIL import Image
7 | from scipy.signal import find_peaks
8 | from ... import Yvann
9 |
10 | class AudioNodeBase(Yvann):
11 | CATEGORY = "👁️ Yvann Nodes/🔊 Audio"
12 |
13 | class AudioPeaksDetection(AudioNodeBase):
14 | @classmethod
15 | def INPUT_TYPES(cls):
16 | return {
17 | "required": {
18 | "audio_weights": ("FLOAT", {"forceInput": True}),
19 | "peaks_threshold": ("FLOAT", {"default": 0.4, "min": 0.0, "max": 1.0, "step": 0.01}),
20 | "min_peaks_distance": ("INT", {"default": 5, "min": 1, "max":100})
21 | }
22 | }
23 |
24 | RETURN_TYPES = ("FLOAT", "FLOAT", "STRING", "INT", "IMAGE")
25 | RETURN_NAMES = ("peaks_weights", "peaks_alternate_weights", "peaks_index", "peaks_count", "graph_peaks")
26 | FUNCTION = "detect_peaks"
27 |
28 | def detect_peaks(self, audio_weights, peaks_threshold, min_peaks_distance):
29 | if not isinstance(audio_weights, (list, np.ndarray)):
30 | print("Invalid audio_weights input")
31 | return None, None
32 |
33 | audio_weights = np.array(audio_weights, dtype=np.float32)
34 |
35 | peaks, _ = find_peaks(audio_weights, height=peaks_threshold, distance=min_peaks_distance)
36 |
37 | # Generate binary peaks array: 1 for peaks, 0 for non-peaks
38 | peaks_binary = np.zeros_like(audio_weights, dtype=int)
39 | peaks_binary[peaks] = 1
40 |
41 | actual_value = 0
42 | peaks_alternate = np.zeros_like(peaks_binary)
43 | for i in range (len(peaks_binary)):
44 | if peaks_binary[i] == 1:
45 | actual_value = 1 - actual_value
46 | peaks_alternate[i] = actual_value
47 |
48 | audio_peaks_index = np.array(peaks, dtype=int)
49 | audio_peaks_index = np.insert(audio_peaks_index, 0, 0)
50 |
51 | peaks_count = len(audio_peaks_index)
52 | str_peaks_index = ', '.join(map(str, audio_peaks_index))
53 | # Generate visualization
54 | try:
55 | figsize = 12.0
56 | plt.figure(figsize=(figsize, figsize * 0.6), facecolor='white')
57 | plt.plot(range(0, len(audio_weights)), audio_weights, label='Audio Weights', color='blue', alpha=0.5)
58 | plt.scatter(peaks, audio_weights[peaks], color='red', label='Detected Peaks')
59 |
60 | plt.xlabel('Frames')
61 | plt.ylabel('Audio Weights')
62 | plt.title('Audio Weights and Detected Peaks')
63 | plt.legend()
64 | plt.grid(True)
65 |
66 | # Ensure x-axis labels are integers
67 | ax = plt.gca()
68 | ax.xaxis.set_major_locator(MaxNLocator(integer=True))
69 |
70 | with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmpfile:
71 | plt.savefig(tmpfile.name, format='png', bbox_inches='tight')
72 | tmpfile_path = tmpfile.name
73 | plt.close()
74 |
75 | visualization = Image.open(tmpfile_path).convert("RGB")
76 | visualization = np.array(visualization).astype(np.float32) / 255.0
77 | visualization = torch.from_numpy(visualization).unsqueeze(0) # Shape: [1, H, W, C]
78 |
79 | except Exception as e:
80 | print(f"Error in creating visualization: {e}")
81 | visualization = None
82 |
83 | return (peaks_binary.tolist(), peaks_alternate.tolist(), str_peaks_index, peaks_count, visualization)
84 |
--------------------------------------------------------------------------------
/nodes/audio/EditAudioWeights.py:
--------------------------------------------------------------------------------
1 | from ... import Yvann
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | from matplotlib.ticker import MaxNLocator
5 | import tempfile
6 | from PIL import Image
7 | import torch
8 |
9 | class AudioNodeBase(Yvann):
10 | CATEGORY = "👁️ Yvann Nodes/🔊 Audio"
11 |
12 | class EditAudioWeights(AudioNodeBase):
13 | @classmethod
14 | def INPUT_TYPES(cls):
15 | return {
16 | "required": {
17 | "any_audio_weights": ("FLOAT", {"forceInput": True}),
18 | "smooth": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
19 | "min_range": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 2.99, "step": 0.01}),
20 | "max_range": ("FLOAT", {"default": 1, "min": 0.01, "max": 3, "step": 0.01}),
21 | }
22 | }
23 |
24 | RETURN_TYPES = ("FLOAT", "IMAGE")
25 | RETURN_NAMES = ("process_weights", "graph_audio")
26 | FUNCTION = "process_any_audio_weights"
27 |
28 | CATEGORY = "👁️ Yvann Nodes/🔊 Audio"
29 |
30 | def process_any_audio_weights(self, any_audio_weights, smooth, min_range, max_range):
31 | if not isinstance(any_audio_weights, (list, np.ndarray)):
32 | print("Invalid any_audio_weights input")
33 | return None
34 |
35 | any_audio_weights = np.array(any_audio_weights, dtype=np.float32)
36 |
37 | # Apply smoothing
38 | smoothed_signal = np.zeros_like(any_audio_weights)
39 | for i in range(len(any_audio_weights)):
40 | if i == 0:
41 | smoothed_signal[i] = any_audio_weights[i]
42 | else:
43 | smoothed_signal[i] = smoothed_signal[i-1] * smooth + any_audio_weights[i] * (1 - smooth)
44 |
45 | # Normalize the smoothed signal
46 | min_val = np.min(smoothed_signal)
47 | max_val = np.max(smoothed_signal)
48 | if max_val - min_val != 0:
49 | normalized_signal = (smoothed_signal - min_val) / (max_val - min_val)
50 | else:
51 | normalized_signal = smoothed_signal - min_val # All values are the same
52 |
53 | # Rescale to specified range
54 | rescaled_signal = normalized_signal * (max_range - min_range) + min_range
55 | rescaled_signal.tolist()
56 |
57 | rounded_rescaled_signal = [round(float(elem), 6) for elem in rescaled_signal]
58 |
59 | # Plot the rescaled signal
60 | try:
61 | figsize = 12.0
62 | plt.figure(figsize=(figsize, figsize * 0.6), facecolor='white')
63 | plt.plot(
64 | list(range(len(rounded_rescaled_signal))),
65 | rounded_rescaled_signal,
66 | label='Processed Weights',
67 | color='blue'
68 | )
69 | plt.xlabel('Frames')
70 | plt.ylabel('Weights')
71 | plt.title('Processed Audio Weights')
72 | plt.legend()
73 | plt.grid(True)
74 |
75 | ax = plt.gca()
76 | ax.xaxis.set_major_locator(MaxNLocator(integer=True))
77 |
78 | with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmpfile:
79 | plt.savefig(tmpfile.name, format='png', bbox_inches='tight')
80 | tmpfile_path = tmpfile.name
81 | plt.close()
82 |
83 | weights_graph = Image.open(tmpfile_path).convert("RGB")
84 | weights_graph = np.array(weights_graph).astype(np.float32) / 255.0
85 | weights_graph = torch.from_numpy(weights_graph).unsqueeze(0)
86 | except Exception as e:
87 | print(f"Error in creating weights graph: {e}")
88 | weights_graph = torch.zeros((1, 400, 300, 3))
89 |
90 | return (rounded_rescaled_signal, weights_graph)
91 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | # Thanks to RyanOnTheInside, KJNodes, MTB, Fill, Akatz, Matheo, their works helped me a lot
2 | from pathlib import Path
3 | from aiohttp import web
4 | from .node_configs import CombinedMeta
5 | from collections import OrderedDict
6 | from server import PromptServer
7 |
8 | class Yvann(metaclass=CombinedMeta):
9 | @classmethod
10 | def get_description(cls):
11 | footer = "\n\n"
12 | footer = "#### 🐙 Docs, Workflows and Code: [Yvann-Nodes GitHub](https://github.com/yvann-ba/ComfyUI_Yvann-Nodes) "
13 | footer += " 👁️ Tutorials: [Yvann Youtube](https://www.youtube.com/@yvann.mp4)\n"
14 |
15 | desc = ""
16 |
17 | if hasattr(cls, 'DESCRIPTION'):
18 | desc += f"{cls.DESCRIPTION}\n\n{footer}"
19 | return desc
20 |
21 | if hasattr(cls, 'TOP_DESCRIPTION'):
22 | desc += f"{cls.TOP_DESCRIPTION}\n\n"
23 |
24 | if hasattr(cls, "BASE_DESCRIPTION"):
25 | desc += cls.BASE_DESCRIPTION + "\n\n"
26 |
27 | additional_info = OrderedDict()
28 | for c in cls.mro()[::-1]:
29 | if hasattr(c, 'ADDITIONAL_INFO'):
30 | info = c.ADDITIONAL_INFO.strip()
31 | additional_info[c.__name__] = info
32 |
33 | if additional_info:
34 | desc += "\n\n".join(additional_info.values()) + "\n\n"
35 |
36 | if hasattr(cls, 'BOTTOM_DESCRIPTION'):
37 | desc += f"{cls.BOTTOM_DESCRIPTION}\n\n"
38 |
39 | desc += footer
40 | return desc
41 |
42 | from .nodes.audio.LoadAudioSeparationModel import LoadAudioSeparationModel
43 | from .nodes.audio.AudioAnalysis import AudioAnalysis
44 | from .nodes.audio.AudioPeaksDetection import AudioPeaksDetection
45 | from .nodes.audio.AudioIPAdapterTransitions import AudioIPAdapterTransitions
46 | from .nodes.audio.AudioPromptSchedule import AudioPromptSchedule
47 | from .nodes.audio.EditAudioWeights import EditAudioWeights
48 | from .nodes.audio.AudioRemixer import AudioRemixer
49 | #from .nodes.audio.AudioControlNetSchedule import AudioControlNetSchedule
50 |
51 | from .nodes.utils.RepeatImageToCount import RepeatImageToCount
52 | from .nodes.utils.InvertFloats import InvertFloats
53 | from .nodes.utils.FloatsVisualizer import FloatsVisualizer
54 |
55 | from .nodes.convert.MaskToFloat import MaskToFloat
56 | from .nodes.convert.FloatsToWeightsStrategy import FloatsToWeightsStrategy
57 | from .nodes.convert.FloatToInt import FloatToInt
58 |
59 | #"Audio ControlNet Schedule": AudioControlNetSchedule,
60 | NODE_CLASS_MAPPINGS = {
61 | "Load Audio Separation Model": LoadAudioSeparationModel,
62 | "Audio Analysis": AudioAnalysis,
63 | "Audio Peaks Detection": AudioPeaksDetection,
64 | "Audio IPAdapter Transitions": AudioIPAdapterTransitions,
65 | "Audio Prompt Schedule": AudioPromptSchedule,
66 | "Edit Audio Weights": EditAudioWeights,
67 | "Audio Remixer": AudioRemixer,
68 |
69 | "Repeat Image To Count": RepeatImageToCount,
70 | "Invert Floats": InvertFloats,
71 | "Floats Visualizer": FloatsVisualizer,
72 |
73 | "Mask To Float": MaskToFloat,
74 | "Floats To Weights Strategy": FloatsToWeightsStrategy,
75 | "Float to Int": FloatToInt
76 | }
77 |
78 | WEB_DIRECTORY = "./web/js"
79 |
80 | #"Audio ControlNet Schedule": "Audio ControlNet Schedule",
81 | NODE_DISPLAY_NAME_MAPPINGS = {
82 | "Load Audio Separation Model": "Load Audio Separation Model",
83 | "Audio Analysis": "Audio Analysis",
84 | "Audio Peaks Detection": "Audio Peaks Detection",
85 | "Audio IPAdapter Transitions": "Audio IPAdapter Transitions",
86 | "Audio Prompt Schedule": "Audio Prompt Schedule",
87 | "Edit Audio Weights": "Edit Audio Weights",
88 | "Audio Remixer": "Audio Remixer",
89 |
90 | "Repeat Image To Count": "Repeat Image To Count",
91 | "Invert Floats": "Invert Floats",
92 | "Floats Visualizer": "Floats Visualizer",
93 |
94 | "Mask To Float": "Mask To Float",
95 | "Floats To Weights Strategy": "Floats To Weights Strategy",
96 | "Float to Int" : "Float to Int",
97 | }
98 |
99 | Yvann_Print = """
100 | 🔊 Yvann Audio Reactive & Utils Node"""
101 |
102 | print("\033[38;5;195m" + Yvann_Print +
103 | "\033[38;5;222m" + " : Loaded\n" + "\033[0m")
104 |
105 |
106 | if hasattr(PromptServer, "instance"):
107 | # NOTE: we add an extra static path to avoid comfy mechanism
108 | # that loads every script in web.
109 | PromptServer.instance.app.add_routes(
110 | [web.static("/yvann_web_async",
111 | (Path(__file__).parent.absolute() / "yvann_web_async").as_posix())]
112 | )
113 |
114 | for node_name, node_class in NODE_CLASS_MAPPINGS.items():
115 | if hasattr(node_class, 'get_description'):
116 | desc = node_class.get_description()
117 | node_class.DESCRIPTION = desc
118 |
119 | __all__ = ["NODE_CLASS_MAPPINGS",
120 | "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"]
121 |
--------------------------------------------------------------------------------
/nodes/audio/AudioIPAdapterTransitions.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import matplotlib.pyplot as plt
3 | from matplotlib.ticker import MaxNLocator
4 | import tempfile
5 | import numpy as np
6 | import math
7 | from PIL import Image
8 | from ... import Yvann
9 |
10 | class AudioNodeBase(Yvann):
11 | CATEGORY = "👁️ Yvann Nodes/🔊 Audio"
12 |
13 | class AudioIPAdapterTransitions(AudioNodeBase):
14 | @classmethod
15 | def INPUT_TYPES(cls):
16 | return {
17 | "required": {
18 | "images": ("IMAGE", {"forceInput": True}),
19 | "peaks_weights": ("FLOAT", {"forceInput": True}),
20 | "transition_mode": (["linear", "ease_in_out", "ease_in", "ease_out"], {"default": "linear"}),
21 | "transition_length": ("INT", {"default": 5, "min": 1, "max":100, "step": 2}),
22 | "min_IPA_weight": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.99, "step": 0.01}),
23 | "max_IPA_weight": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 2.0, "step": 0.01}),
24 | }
25 | }
26 |
27 | RETURN_TYPES = ("IMAGE", "FLOAT", "IMAGE", "FLOAT", "IMAGE")
28 | RETURN_NAMES = ("image_1", "weights", "image_2", "weights_invert", "graph_transition")
29 | FUNCTION = "process_transitions"
30 |
31 | def process_transitions(self, images, peaks_weights, transition_mode, transition_length, min_IPA_weight, max_IPA_weight):
32 |
33 | if not isinstance(peaks_weights, (list, np.ndarray)):
34 | print("Invalid peaks_weights input")
35 | return None, None, None, None, None
36 |
37 | # Convert peaks_weights to numpy array and ensure it's binary (0 or 1)
38 | peaks_binary = np.array(peaks_weights, dtype=int)
39 | total_frames = len(peaks_binary)
40 |
41 | # Generate switch indices by incrementing index at each peak
42 | switch_indices = []
43 | index_value = 0
44 | for peak in peaks_binary:
45 | if peak == 1:
46 | index_value += 1
47 | switch_indices.append(index_value)
48 |
49 | # images is a batch of images: tensor of shape [B, H, W, C]
50 | if images.dim() == 3:
51 | images = images.unsqueeze(0) # Add batch dimension if missing
52 |
53 | num_images = images.shape[0]
54 | if num_images < 2:
55 | print("At least two images are required for transitions.")
56 | return None, None, None, None, None
57 |
58 | unique_indices = sorted(set(switch_indices))
59 | num_indices = len(unique_indices)
60 |
61 | # Map indices to image indices (cycling through images if necessary)
62 | image_indices = [i % num_images for i in unique_indices]
63 |
64 | # Create a mapping from switch index to image
65 | image_mapping = {idx: images[image_idx] for idx, image_idx in zip(unique_indices, image_indices)}
66 |
67 | # Initialize blending_weights, images1, images2
68 | blending_weights = np.zeros(total_frames, dtype=np.float32)
69 | images1 = [image_mapping[switch_indices[i]] for i in range(total_frames)]
70 | images2 = images1.copy()
71 |
72 | # Identify frames where index changes
73 | change_frames = [i for i in range(1, total_frames) if switch_indices[i] != switch_indices[i - 1]]
74 |
75 | # For each transition, compute blending weights
76 | for change_frame in change_frames:
77 | start = max(0, change_frame - transition_length // 2)
78 | end = min(total_frames, change_frame + (transition_length + 1) // 2)
79 | n = end - start - 1
80 | idx_prev = switch_indices[change_frame - 1] if change_frame > 0 else switch_indices[change_frame]
81 | idx_next = switch_indices[change_frame]
82 |
83 | for i in range(start, end):
84 | t = (i - start) / n if n > 0 else 1.0
85 |
86 | # Compute blending weight based on transition_mode
87 | if transition_mode == "linear":
88 | blending_weight = t
89 | elif transition_mode == "ease_in_out":
90 | blending_weight = (1 - math.cos(t * math.pi)) / 2
91 | elif transition_mode == "ease_in":
92 | blending_weight = math.sin(t * math.pi / 2)
93 | elif transition_mode == "ease_out":
94 | blending_weight = 1 - math.cos(t * math.pi / 2)
95 | else:
96 | blending_weight = t
97 |
98 | blending_weight = min(max(blending_weight, 0.0), 1.0)
99 |
100 | # Update blending_weights
101 | blending_weights[i] = blending_weight
102 |
103 | # Update images1 and images2
104 | images1[i] = image_mapping[idx_prev]
105 | images2[i] = image_mapping[idx_next]
106 |
107 | # Now, blending_weights correspond to image_2
108 | blending_weights_raw = blending_weights.copy() # Keep the raw weights for internal use
109 |
110 | # Apply custom range to weights
111 | blending_weights = blending_weights * (max_IPA_weight - min_IPA_weight) + min_IPA_weight
112 | blending_weights = [round(w, 6) for w in blending_weights]
113 | weights_invert = [(max_IPA_weight + min_IPA_weight) - w for w in blending_weights]
114 | weights_invert = [round(w, 6) for w in weights_invert]
115 |
116 | # Convert lists to tensors
117 | images1 = torch.stack(images1)
118 | images2 = torch.stack(images2)
119 | blending_weights_tensor = torch.tensor(blending_weights_raw, dtype=images1.dtype).view(-1, 1, 1, 1)
120 |
121 | # Ensure blending weights are compatible with image dimensions
122 | blending_weights_tensor = blending_weights_tensor.to(images1.device)
123 |
124 | # Generate visualization of transitions
125 | try:
126 | figsize = 12.0
127 | plt.figure(figsize=(figsize, 8), facecolor='white')
128 |
129 | blending_weights_array = np.array(blending_weights_raw)
130 | plt.plot(range(0, len(blending_weights_array)), blending_weights_array, label='Blending Weights', color='green', alpha=0.7)
131 | plt.scatter(change_frames, blending_weights_array[change_frames], color='red', label='Transition')
132 |
133 | plt.xlabel('Frames', fontsize=12)
134 | plt.title('Images Transitions over Frames', fontsize=14)
135 | plt.legend()
136 | plt.grid(True)
137 |
138 | # Remove Y-axis labels
139 | plt.yticks([])
140 |
141 | # Ensure x-axis labels are integers
142 | ax = plt.gca()
143 | ax.xaxis.set_major_locator(MaxNLocator(integer=True, prune='both'))
144 |
145 | with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmpfile:
146 | plt.savefig(tmpfile.name, format='png', bbox_inches='tight')
147 | tmpfile_path = tmpfile.name
148 | plt.close()
149 |
150 | visualization = Image.open(tmpfile_path).convert("RGB")
151 | visualization = np.array(visualization).astype(np.float32) / 255.0
152 | visualization = torch.from_numpy(visualization).unsqueeze(0) # Shape: [1, C, H, W]
153 |
154 | except Exception as e:
155 | print(f"Error in creating visualization: {e}")
156 | visualization = None
157 |
158 | # Return values with adjusted weights and images
159 | return images2, blending_weights, images1, weights_invert, visualization
160 |
--------------------------------------------------------------------------------
/node_configs.py:
--------------------------------------------------------------------------------
1 | #NOTE: this abstraction allows for both the documentation to be centrally managed and inherited
2 | from abc import ABCMeta
3 | class NodeConfigMeta(type):
4 | def __new__(cls, name, bases, attrs):
5 | new_class = super().__new__(cls, name, bases, attrs)
6 | if name in NODE_CONFIGS:
7 | for key, value in NODE_CONFIGS[name].items():
8 | setattr(new_class, key, value)
9 | return new_class
10 |
11 | class CombinedMeta(NodeConfigMeta, ABCMeta):
12 | pass
13 |
14 | def add_node_config(node_name, config):
15 | NODE_CONFIGS[node_name] = config
16 |
17 | NODE_CONFIGS = {}
18 |
19 | add_node_config("LoadAudioSeparationModel", {
20 | "BASE_DESCRIPTION": """
21 | Load an audio separation model, If unavailable :
22 | downloads to `ComfyUI/models/audio_separation_model/
23 |
24 | **Parameters:**
25 |
26 | - **model**: Audio separation model to load
27 | - [HybridDemucs](https://github.com/facebookresearch/demucs): Most accurate fastest and lightweight
28 | - [OpenUnmix](https://github.com/sigsep/open-unmix-pytorch): Alternative model
29 |
30 | **Outputs:**
31 |
32 | - **audio_sep_model**: Loaded audio separation model
33 | Connect it to "Audio Analysis" or "Audio Remixer"
34 | """
35 | })
36 |
37 | add_node_config("AudioAnalysis", {
38 | "BASE_DESCRIPTION": """
39 | Analyzes audio to generate reactive weights and graph
40 | Can extract specific elements like drums, vocals, bass
41 | Parameters allow manual control over audio weights
42 |
43 | **Inputs:**
44 |
45 | - **audio_sep_model**: Loaded model from "Load Audio Separation Model"
46 | - **audio**: Input audio file
47 | - **batch_size**: Number of frames to associate with audio weights
48 | - **fps**: Frames per second for processing audio weights
49 |
50 | **Parameters:**
51 |
52 | - **analysis_mode**: Select audio component to analyze
53 | - **threshold**: Minimum weight value to pass through
54 | - **multiply**: Amplification factor for weights before normalization
55 |
56 | **Outputs:**
57 |
58 | - **graph_audio**: Graph image of audio weights over frames
59 | - **processed_audio**: Separated or processed audio (e.g., drums vocals)
60 | - **original_audio**: Original unmodified audio input
61 | - **audio_weights**: List of audio-reactive weights based on processed audio
62 | """
63 | })
64 |
65 | add_node_config("AudioPeaksDetection", {
66 | "BASE_DESCRIPTION": """
67 | Detects peaks in audio weights based on a threshold and minimum distance
68 | Identifies significant audio events to trigger visual changes or actions
69 |
70 | **Inputs:**
71 |
72 | - **audio_weights**: "audio_weights" from "Audio Analysis"
73 |
74 | **Parameters:**
75 |
76 | - **peaks_threshold**: Threshold for peak detection
77 | - **min_peaks_distance**: Minimum frames between consecutive peaks
78 | help remove close unwanted peaks around big peaks
79 |
80 | **Outputs:**
81 |
82 | - **peaks_weights**: Binary list indicating peak presence (1 for peak 0 otherwise)
83 | - **peaks_alternate_weights**: Alternating binary list based on detected peaks
84 | - **peaks_index**: String of peak indices
85 | - **peaks_count**: Total number of detected peaks
86 | - **graph_peaks**: Visualization image of detected peaks over audio weights
87 | """
88 | })
89 |
90 | add_node_config("AudioIPAdapterTransitions", {
91 | "BASE_DESCRIPTION": """
92 | Uses "peaks_weights" from "Audio Peaks Detection" to control image transitions based on audio peaks
93 | Outputs images and weights for two IPAdapter batches, logic from "IPAdapter Weights", [IPAdapter_Plus](https://github.com/cubiq/ComfyUI_IPAdapter_plus)
94 |
95 | **Inputs:**
96 |
97 | - **images**: Batch of images for transitions, Loops images to match peak count
98 | - **peaks_weights**: List of audio peaks from "Audio Peaks Detection"
99 |
100 | **Parameters:**
101 |
102 | - **blend_mode**: transition method applied to weights
103 | - **transitions_length**: Frames used to blend between images
104 | - **min_IPA_weight**: Minimum weight applied by IPAdapter per frame
105 | - **max_IPA_weight**: Maximum weight applied by IPAdapter per frame
106 |
107 | **Outputs:**
108 |
109 | - **image_1**: Starting image for transition Connect to first IPAdapter batch "image"
110 | - **weights**: Blending weights for transitions Connect to first IPAdapter batch "weight"
111 | - **image_2**: Ending image for transition Connect to second IPAdapter batch "image"
112 | - **weights_invert**: Inversed weights Connect to second IPAdapter batch "weight"
113 | - **graph_transitions**: Visualization of weight transitions over frames
114 | """
115 | })
116 |
117 | add_node_config("AudioPromptSchedule", {
118 | "BASE_DESCRIPTION": """
119 | Associates "prompts" with "peaks_index" into a scheduled format
120 | Connect output to "batch prompt schedule" of [Fizz Nodes](https://github.com/FizzleDorf/ComfyUI_FizzNodes)
121 | add an empty line between each individual prompts
122 |
123 | **Inputs:**
124 |
125 | - **peaks_index**: frames where peaks occurs from "Audio Peaks Detections"
126 | - **prompts**: Multiline string of prompts for each index
127 |
128 | **Outputs:**
129 |
130 | - **prompt_schedule**: String mapping each audio index to a prompt
131 | """
132 | })
133 |
134 | add_node_config("EditAudioWeights", {
135 | "BASE_DESCRIPTION": """
136 | Smooths and rescales audio weights, Connect to "Multival [Float List]"
137 | from [AnimateDiff-Evolved](https://github.com/Kosinkadink/ComfyUI-AnimateDiff-Evolved) to schedule motion with audio
138 | or to "Latent Keyframe From List" to schedule ControlNet Apply
139 |
140 | **Inputs:**
141 |
142 | - **any_audio_weights**: audio weights from "Audio Peaks Detection"
143 | or "Audio Analysis", basically any *_weights audio
144 |
145 | **Parameters:**
146 |
147 | - **smooth**: Smoothing factor (0.0 to 1.0) Higher values result in smoother transitions
148 | - **min_range**: Minimum value of the rescaled weights
149 | AnimateDiff multival works better between 0.9 and 1.3 range
150 | - **max_range**: Maximum value of the rescaled weights
151 |
152 | **Outputs:**
153 |
154 | - **float_val**: Smoothed and rescaled audio weights
155 | connect it to AD multival to influence the motion with audio
156 | """
157 | })
158 |
159 | add_node_config("AudioRemixer", {
160 | "BASE_DESCRIPTION": """
161 | Modify input audio by adjusting the intensity of drums bass vocals or others elements
162 |
163 | **Inputs:**
164 |
165 | - **audio_sep_model**: Loaded model from "Load Audio Separation Model"
166 | - **audio**: Input audio file
167 |
168 | **Parameters:**
169 |
170 | - **bass_volume**: Adjusts bass volume
171 | - **drums_volume**: Adjusts drums volume
172 | - **others_volume**: Adjusts others elements' volume
173 | - **vocals_volume**: Adjusts vocals volume
174 |
175 | **Outputs:**
176 |
177 | - **merged_audio**: Composition of separated tracks with applied modifications
178 | """
179 | })
180 |
181 | add_node_config("RepeatImageToCount", {
182 | "BASE_DESCRIPTION": """
183 | Repeats images N times, Cycles inputs if N > images
184 | **Inputs:**
185 |
186 | - **image**: Batch of input images to repeat
187 | - **count**: Number of repetitions
188 |
189 | **Outputs:**
190 |
191 | - **images**: Batch of repeated images matching the specified count
192 | """
193 | })
194 |
195 | add_node_config("InvertFloats", {
196 | "BASE_DESCRIPTION": """
197 | Inverts each value in a list of floats
198 |
199 | **Inputs:**
200 |
201 | - **floats**: List of float values to invert
202 |
203 | **Outputs:**
204 |
205 | - **inverted_floats**: Inverted list of float values
206 | """
207 | })
208 |
209 | add_node_config("FloatsVisualizer", {
210 | "BASE_DESCRIPTION": """
211 | Generates a graph from floats for visual data comparison
212 | Useful to compare audio weights
213 |
214 | **Inputs:**
215 |
216 | - **floats**: Primary list of floats to visualize
217 | - **floats_optional1**: (Optional) Second list of floats
218 | - **floats_optional2**: (Optional) Third list of floats
219 |
220 | **Parameters:**
221 |
222 | - **title**: Graph title
223 | - **x_label**: Label for the x-axis
224 | - **y_label**: Label for the y-axis
225 |
226 | **Outputs:**
227 |
228 | - **visual_graph**: Visual graph of provided floats
229 | """
230 | })
231 |
232 | add_node_config("MaskToFloat", {
233 | "BASE_DESCRIPTION": """
234 | Converts mask into float
235 | works with batch of mask
236 | **Inputs:**
237 |
238 | - **mask**: Mask input to convert
239 |
240 | **Outputs:**
241 |
242 | - **float**: Float value
243 | """
244 | })
245 |
246 | add_node_config("FloatsToWeightsStrategy", {
247 | "BASE_DESCRIPTION": """
248 | Converts a list of floats into an IPAdapter weights strategy format
249 | Use with "IPAdapter Weights From Strategy" or "Prompt Schedule From Weights Strategy"
250 | to integrate output into [IPAdapter](https://github.com/cubiq/ComfyUI_IPAdapter_plus) pipeline
251 |
252 | **Inputs:**
253 |
254 | - **floats**: List of float values to convert
255 |
256 | **Outputs:**
257 |
258 | - **WEIGHTS_STRATEGY**: Dictionary of the weights strategy
259 | """
260 | })
261 |
--------------------------------------------------------------------------------
/nodes/audio/AudioRemixer.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torchaudio
3 |
4 | # from IPython.display import Audio
5 | # from mir_eval import separation
6 | from torchaudio.pipelines import HDEMUCS_HIGH_MUSDB_PLUS
7 |
8 | from typing import Dict, Tuple, Any
9 | from torchaudio.transforms import Fade, Resample
10 | from termcolor import colored
11 |
12 | from ... import Yvann
13 |
14 | class AudioNodeBase(Yvann):
15 | CATEGORY = "👁️ Yvann Nodes/🔊 Audio"
16 |
17 | class AudioRemixer(AudioNodeBase):
18 |
19 | @classmethod
20 | def INPUT_TYPES(cls):
21 | return {
22 | "required": {
23 | "audio_sep_model": ("AUDIO_SEPARATION_MODEL", {"forceInput": True}),
24 | "audio": ("AUDIO", {"forceInput": True}),
25 | "drums_volume": ("FLOAT", {"default": 0.0, "min": -10.0, "max": 10, "step": 0.1}),
26 | "vocals_volume": ("FLOAT", {"default": 0.0, "min": -10.0, "max": 10, "step": 0.1}),
27 | "bass_volume": ("FLOAT", {"default": 0.0, "min": -10.0, "max": 10, "step": 0.1}),
28 | "others_volume": ("FLOAT", {"default": 0.0, "min": -10.0, "max": 10, "step": 0.1}),
29 | }
30 | }
31 |
32 | RETURN_TYPES = ("AUDIO",)
33 | RETURN_NAMES = ("merged_audio",)
34 | FUNCTION = "main"
35 |
36 |
37 | def main(self, audio_sep_model, audio: Dict[str, torch.Tensor], drums_volume: float, vocals_volume: float, bass_volume: float, others_volume: float) -> tuple[torch.Tensor]:
38 |
39 | model = audio_sep_model
40 | # 1. Prepare audio and device
41 | device, waveform = self.prepare_audio_and_device(audio)
42 |
43 | # 2. Apply model and extract sources
44 | sources, sources_list = self.apply_model_and_extract_sources(model, waveform, device)
45 |
46 | if sources is None:
47 | return None # Return if the model is unrecognized
48 |
49 | # 3. Adjust volumes and merge sources
50 | merge_audio = self.process_and_merge_audio(sources, sources_list, drums_volume, vocals_volume, bass_volume, others_volume)
51 |
52 | return (merge_audio,)
53 |
54 |
55 | def prepare_audio_and_device(self, audio: Dict[str, torch.Tensor]) -> Tuple[torch.device, torch.Tensor]:
56 | """Prepares the device (GPU or CPU) and sets up the audio waveform."""
57 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
58 | waveform = audio['waveform'].squeeze(0).to(device)
59 | self.audio_sample_rate = audio['sample_rate']
60 | return device, waveform
61 |
62 |
63 | def apply_model_and_extract_sources(self, model, waveform: torch.Tensor, device: torch.device) -> Tuple[torch.Tensor, list[str]]:
64 | """Applies the model and extracts audio sources, handling both Open-Unmix and GDemucs cases."""
65 | sources, sources_list = None, []
66 |
67 | if isinstance(model, torch.nn.Module): # Open-Unmix model
68 | print(colored("Applying Open_Unmix model on audio.", 'green'))
69 | self.model_sample_rate = int(model.sample_rate)
70 |
71 | if self.audio_sample_rate != self.model_sample_rate:
72 | resampler = torchaudio.transforms.Resample(orig_freq=self.audio_sample_rate, new_freq=self.model_sample_rate).to(device)
73 | waveform = resampler(waveform)
74 | sources = model(waveform.unsqueeze(0)).squeeze(0)
75 | sources_list = ['bass', 'drums', 'other', 'vocals']
76 |
77 | elif "demucs" in model and model["demucs"]: # GDemucs model
78 | print(colored("Applying GDemucs model on audio", 'green'))
79 | self.model_sample_rate = int(model["sample_rate"])
80 | model = model["model"]
81 |
82 | if self.audio_sample_rate != self.model_sample_rate:
83 | resampler = torchaudio.transforms.Resample(orig_freq=self.audio_sample_rate, new_freq=self.model_sample_rate).to(device)
84 | waveform = resampler(waveform)
85 | ref = waveform.mean(0)
86 | waveform = (waveform - ref.mean()) / ref.std()
87 | sources = self.separate_sources(model, waveform[None], segment=10.0, overlap=0.1, device=device)[0]
88 | sources = sources * ref.std() + ref.mean()
89 | sources_list = model.sources
90 |
91 | else:
92 | print(colored("Unrecognized model type", 'red'))
93 | return None, []
94 |
95 | return sources, sources_list
96 |
97 |
98 | def process_and_merge_audio(self, sources: torch.Tensor, sources_list: list[str], drums_volume: float, vocals_volume: float, bass_volume: float, others_volume: float) -> torch.Tensor:
99 | """Adjusts source volumes and merges them into a single audio output"""
100 | required_sources = ['bass', 'drums', 'other', 'vocals']
101 | for source in required_sources:
102 | if source not in sources_list:
103 | print(colored(f"Warning: '{source}' not found in sources_list", 'yellow'))
104 |
105 | # Adjust volume levels
106 | drums_volume = self.adjust_volume_range(drums_volume)
107 | vocals_volume = self.adjust_volume_range(vocals_volume)
108 | bass_volume = self.adjust_volume_range(bass_volume)
109 | others_volume = self.adjust_volume_range(others_volume)
110 |
111 | # Convert to tuple and blend
112 | audios = self.sources_to_tuple(drums_volume, vocals_volume, bass_volume, others_volume, dict(zip(sources_list, sources)))
113 | return self.blend_audios([audios[0]["waveform"], audios[1]["waveform"], audios[2]["waveform"], audios[3]["waveform"]])
114 |
115 | def adjust_volume_range(self, value):
116 | if value <= -10:
117 | return 0
118 | elif value >= 10:
119 | return 10
120 | elif value <= 0:
121 | return (value + 10) / 10
122 | else:
123 | return 1 + (value / 10) * 9
124 |
125 | def blend_audios(self, audio_tensors):
126 | blended_audio = sum(audio_tensors)
127 |
128 | if blended_audio.dim() == 2:
129 | blended_audio = blended_audio.unsqueeze(0)
130 |
131 | return {
132 | "waveform": blended_audio.cpu(),
133 | "sample_rate": self.model_sample_rate,
134 | }
135 |
136 | def sources_to_tuple(self, drums_volume, vocals_volume, bass_volume, others_volume, sources: Dict[str, torch.Tensor]) -> Tuple[Any, Any, Any, Any]:
137 |
138 | threshold = 0.00
139 |
140 | output_order = ["bass", "drums", "other", "vocals"]
141 | outputs = []
142 |
143 | for source in output_order:
144 | if source not in sources:
145 | raise ValueError(f"Missing source {source} in the output")
146 | outputs.append(
147 | {
148 | "waveform": sources[source].cpu().unsqueeze(0),
149 | "sample_rate": self.model_sample_rate,
150 | }
151 | )
152 |
153 | for i, volume in enumerate([bass_volume, drums_volume, others_volume, vocals_volume]):
154 | waveform = outputs[i]["waveform"]
155 | mask = torch.abs(waveform) > threshold
156 | outputs[i]["waveform"] = waveform * volume * mask.float() + waveform * (1 - mask.float())
157 |
158 | return tuple(outputs)
159 |
160 | def separate_sources(self, model, mix, segment=10.0, overlap=0.1, device=None,
161 | ):
162 | """
163 | Apply model to a given mixture. Use fade, and add segments together in order to add model segment by segment.
164 |
165 | Args:
166 | segment (int): segment length in seconds
167 | device (torch.device, str, or None): if provided, device on which to
168 | execute the computation, otherwise `mix.device` is assumed.
169 | When `device` is different from `mix.device`, only local computations will
170 | be on `device`, while the entire tracks will be stored on `mix.device`.
171 | """
172 | if device is None:
173 | device = mix.device
174 | else:
175 | device = torch.device(device)
176 |
177 | batch, channels, length = mix.shape
178 | sample_rate = self.model_sample_rate
179 |
180 | chunk_len = int(sample_rate * segment * (1 + overlap))
181 | start = 0
182 | end = chunk_len
183 | overlap_frames = overlap * sample_rate
184 | fade = Fade(fade_in_len=0, fade_out_len=int(overlap_frames), fade_shape="linear")
185 |
186 | final = torch.zeros(batch, len(model.sources), channels, length, device=device)
187 |
188 | while start < length - overlap_frames:
189 | chunk = mix[:, :, start:end]
190 | with torch.no_grad():
191 | out = model.forward(chunk)
192 | out = fade(out)
193 | final[:, :, :, start:end] += out
194 | if start == 0:
195 | fade.fade_in_len = int(overlap_frames)
196 | start += int(chunk_len - overlap_frames)
197 | else:
198 | start += chunk_len
199 | end += chunk_len
200 | if end >= length:
201 | fade.fade_out_len = 0
202 | return final
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # 🔊 ComfyUI_Yvann-Nodes [](https://www.youtube.com/channel/yvann_ba)
2 |
3 | #### Made with the great help of [Lilien](https://x.com/Lilien_RIG) 😎
4 | ### **A pack of custom nodes that enable audio reactivity within [ComfyUI](https://github.com/comfyanonymous/ComfyUI), allowing you to generate AI-driven animations that sync with music**
5 |
6 | ---
7 |
8 | ## What Does This Do?
9 |
10 | - **Create** Audio Reactive AI videos, enable controls over AI generations styles, content and composition with any audio
11 | - **Simple**: Just Drop one of our [Workflows](/example_workflows) in ComfyUI and specify your audio and visuals input
12 | - **Flexible**: Works with existing ComfyUI AI tech and nodes (eg: IPAdapter, AnimateDiff, ControlNet, etc.)
13 |
14 | ---
15 |
16 | ## Video result (generated by our amazing users 😁)
17 |
18 |
19 | #### Audio Reactive Images to Video ⬇️
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 | #### Audio Reactive Video to Video ⬇️
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 | ## Quick Setup
72 |
73 | - Install [ComfyUI](https://github.com/comfyanonymous/ComfyUI) and [ComfyUI-Manager](https://github.com/ltdrdata/ComfyUI-Manager)
74 |
75 | ### Pick a Workflow (Images → Video or Video → Video)
76 |
77 | 1. **Images → Video**
78 | - Takes a **set of images** plus an **audio** track.
79 | - *Watch Tutorial*:
80 | [](https://www.youtube.com/watch?v=O2s6NseXlMc)
81 |
82 | 2. **Video → Video**
83 | - Takes a **source video** plus an **audio** track.
84 | - *Watch Tutorial*:
85 | [](https://www.youtube.com/watch?v=BiQHWKP3q0c)
86 |
87 | ---
88 |
89 | ### Load Your Chosen Workflow in ComfyUI
90 |
91 | 1. **Download** the `.json` file for the workflow you picked:
92 | - [AudioReactive_ImagesToVideo_Yvann.json](example_workflows/AudioReactive_ImagesToVideo_Yvann.json)
93 | - [AudioReactive_VideoToVideo_Yvann.json](example_workflows/AudioReactive_VideoToVideo_Yvann.json)
94 |
95 | 2. **Drop** the `.json` file into the **ComfyUI window**.
96 |
97 | 3. **Open the "🧩 Manager"** → **"Install Missing Custom Nodes"**
98 | - Install each pack of nodes that appears.
99 | - **Restart** ComfyUI if prompted.
100 |
101 | 4. **Set Your Inputs & Generate**
102 | - Provide the inputs needed (everything explained [here](https://www.youtube.com/@yvann_ba)
103 | - Click **Queue** button to produce your **audio-reactive** animation!
104 |
105 | **That's it!** Have fun playing with the differents settings now !!
106 | (if you have any questions or problems, check my [Youtube Tutorials](https://www.youtube.com/@yvann_ba)
107 |
108 | ---
109 |
110 | ## Nodes Details
111 |
112 |
113 | Click to Expand: Node-by-Node Reference
114 |
115 | ### Audio Analysis 🔍
116 | Analyzes audio to generate reactive weights for each frame.
117 |
118 | Node Parameters
119 |
120 | - **audio_sep_model**: Model from "Load Audio Separation Model"
121 | - **audio**: Input audio file
122 | - **batch_size**: Frames to associate with audio weights
123 | - **fps**: Frame rate for the analysis
124 |
125 | **Parameters**:
126 | - **analysis_mode**: e.g., Drums Only, Vocals, Full Audio
127 | - **threshold**: Minimum weight pass-through
128 | - **multiply**: Amplification factor
129 |
130 | **Outputs**:
131 | - **graph_audio** (image preview),
132 | - **processed_audio**, **original_audio**,
133 | - **audio_weights** (list of values).
134 |
135 |
136 |
137 | ---
138 |
139 | ### Load Audio Separation Model 🎧
140 | Loads or downloads an audio separation model (e.g., HybridDemucs, OpenUnmix).
141 |
142 | Node Parameters
143 |
144 | - **model**: Choose between HybridDemucs / OpenUnmix.
145 | - **Outputs**: **audio_sep_model** (connect to Audio Analysis or Remixer).
146 |
147 |
148 |
149 | ---
150 |
151 | ### Audio Peaks Detection 📈
152 | Identifies peaks in the audio weights to trigger transitions or events.
153 |
154 | Node Parameters
155 |
156 | - **peaks_threshold**: Sensitivity.
157 | - **min_peaks_distance**: Minimum gap in frames between peaks.
158 | - **Outputs**: Binary peak list, alternate list, peak indices/count, graph.
159 |
160 |
161 |
162 | ---
163 |
164 | ### Audio IP Adapter Transitions 🔄
165 | Manages transitions between images based on peaks. Great for stable or style transitions.
166 |
167 | Node Parameters
168 |
169 | - **images**: Batch of images.
170 | - **peaks_weights**: From "Audio Peaks Detection".
171 | - **blend_mode**, **transitions_length**, **min_IPA_weight**, etc.
172 |
173 |
174 |
175 | ---
176 |
177 | ### Audio Prompt Schedule 📝
178 | Links text prompts to peak indices.
179 |
180 | Node Parameters
181 |
182 | - **peaks_index**: Indices from peaks detection.
183 | - **prompts**: multiline string.
184 | - **Outputs**: mapped schedule string.
185 |
186 |
187 |
188 | ---
189 |
190 | ### Audio Remixer 🎛️
191 | Adjusts volume levels (drums, vocals, bass, others) in a track.
192 |
193 | Node Parameters
194 |
195 | - **drums_volume**, **vocals_volume**, **bass_volume**, **others_volume**
196 | - **Outputs**: single merged audio track.
197 |
198 |
199 |
200 | ---
201 |
202 | ### Repeat Image To Count 🔁
203 | Repeats a set of images N times.
204 |
205 | Node Parameters
206 |
207 | - **mask**: Mask input.
208 | - **Outputs**: Repeated images.
209 |
210 |
211 |
212 | ---
213 |
214 | ### Invert Floats 🔄
215 | Flips sign of float values.
216 |
217 | Node Parameters
218 |
219 | - **floats**: list of floats.
220 | - **Outputs**: inverted list.
221 |
222 |
223 |
224 | ---
225 |
226 | ### Floats Visualizer 📈
227 | Plots float values as a graph.
228 |
229 | Node Parameters
230 |
231 | - **floats** (and optional second/third).
232 | - **Outputs**: visual graph image.
233 |
234 |
235 |
236 | ---
237 |
238 | ### Mask To Float 🎭
239 | Converts a mask into a single float value.
240 |
241 | Node Parameters
242 |
243 | - **mask**: input.
244 | - **Outputs**: float.
245 |
246 |
247 |
248 | ---
249 |
250 | ### Floats To Weights Strategy 🏋️
251 | Transforms float lists into an IPAdapter "weight strategy."
252 |
253 | Node Parameters
254 |
255 | - **floats**: list of floats.
256 | - **Outputs**: dictionary with strategy info.
257 |
258 |
259 |
260 |
261 |
262 | ---
263 |
264 | Please give a ⭐ on GitHub it helps us enhance our Tool and it's Free !! (:
265 |
266 |
267 |
268 |
269 |
270 |
271 |
272 |
273 |
274 |
275 |
276 |
277 |
278 |
279 |
280 |
281 |
282 |
--------------------------------------------------------------------------------
/web/js/help_popup.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../../scripts/app.js";
2 |
3 | // code based on mtb nodes by Mel Massadian https://github.com/melMass/comfy_mtb/
4 | export const loadScript = (
5 | FILE_URL,
6 | async = true,
7 | type = 'text/javascript',
8 | ) => {
9 | return new Promise((resolve, reject) => {
10 | try {
11 | // Check if the script already exists
12 | const existingScript = document.querySelector(`script[src="${FILE_URL}"]`)
13 | if (existingScript) {
14 | resolve({ status: true, message: 'Script already loaded' })
15 | return
16 | }
17 |
18 | const scriptEle = document.createElement('script')
19 | scriptEle.type = type
20 | scriptEle.async = async
21 | scriptEle.src = FILE_URL
22 |
23 | scriptEle.addEventListener('load', (ev) => {
24 | resolve({ status: true })
25 | })
26 |
27 | scriptEle.addEventListener('error', (ev) => {
28 | reject({
29 | status: false,
30 | message: `Failed to load the script ${FILE_URL}`,
31 | })
32 | })
33 |
34 | document.body.appendChild(scriptEle)
35 | } catch (error) {
36 | reject(error)
37 | }
38 | })
39 | }
40 |
41 | loadScript('/yvann_web_async/marked.min.js').catch((e) => {
42 | console.log(e)
43 | })
44 | loadScript('/yvann_web_async/purify.min.js').catch((e) => {
45 | console.log(e)
46 | })
47 |
48 | const categories = ["👁️ Yvann Nodes"];
49 | app.registerExtension({
50 | name: "Yvann.HelpPopup",
51 | async beforeRegisterNodeDef(nodeType, nodeData) {
52 |
53 | if (app.ui.settings.getSettingValue("Yvann.helpPopup") === false) {
54 | return;
55 | }
56 | try {
57 | categories.forEach(category => {
58 | if (nodeData?.category?.startsWith(category)) {
59 | addDocumentation(nodeData, nodeType);
60 | }
61 | else return
62 | });
63 | } catch (error) {
64 | console.error("Error in registering Yvann.HelpPopup", error);
65 | }
66 | },
67 | });
68 |
69 | const create_documentation_stylesheet = () => {
70 | const tag = 'roti-documentation-stylesheet'
71 |
72 | let styleTag = document.head.querySelector(tag)
73 |
74 | if (!styleTag) {
75 | styleTag = document.createElement('style')
76 | styleTag.type = 'text/css'
77 | styleTag.id = tag
78 | styleTag.innerHTML = `
79 | .roti-documentation-popup {
80 | background: var(--comfy-menu-bg);
81 | position: absolute;
82 | color: var(--fg-color);
83 | font: 12px monospace;
84 | line-height: 1.5em;
85 | padding: 10px;
86 | border-radius: 10px;
87 | border-style: solid;
88 | border-width: medium;
89 | border-color: var(--border-color);
90 | z-index: 5;
91 | overflow: hidden;
92 | }
93 | .content-wrapper {
94 | overflow: auto;
95 | max-height: 100%;
96 | /* Scrollbar styling for Chrome */
97 | &::-webkit-scrollbar {
98 | width: 6px;
99 | }
100 | &::-webkit-scrollbar-track {
101 | background: var(--bg-color);
102 | }
103 | &::-webkit-scrollbar-thumb {
104 | background-color: var(--fg-color);
105 | border-radius: 6px;
106 | border: 3px solid var(--bg-color);
107 | }
108 |
109 | /* Scrollbar styling for Firefox */
110 | scrollbar-width: thin;
111 | scrollbar-color: var(--fg-color) var(--bg-color);
112 | a {
113 | color: yellow;
114 | }
115 | a:visited {
116 | color: orange;
117 | }
118 | a:hover {
119 | color: red;
120 | }
121 | }
122 | `
123 | document.head.appendChild(styleTag)
124 | }
125 | }
126 |
127 | /** Add documentation widget to the selected node */
128 | export const addDocumentation = (
129 | nodeData,
130 | nodeType,
131 | opts = { icon_size: 14, icon_margin: 4 },) => {
132 |
133 | opts = opts || {}
134 | const iconSize = opts.icon_size ? opts.icon_size : 14
135 | const iconMargin = opts.icon_margin ? opts.icon_margin : 4
136 | let docElement = null
137 | let contentWrapper = null
138 | //if no description in the node python code, don't do anything
139 | if (!nodeData.description) {
140 | return
141 | }
142 |
143 | const drawFg = nodeType.prototype.onDrawForeground
144 | nodeType.prototype.onDrawForeground = function (ctx) {
145 | const r = drawFg ? drawFg.apply(this, arguments) : undefined
146 | if (this.flags.collapsed) return r
147 |
148 | // icon position
149 | const x = this.size[0] - iconSize - iconMargin
150 |
151 | // create the popup
152 | if (this.show_doc && docElement === null) {
153 | docElement = document.createElement('div')
154 | contentWrapper = document.createElement('div');
155 | docElement.appendChild(contentWrapper);
156 |
157 | create_documentation_stylesheet()
158 | contentWrapper.classList.add('content-wrapper');
159 | docElement.classList.add('roti-documentation-popup')
160 |
161 | //parse the string from the python node code to html with marked, and sanitize the html with DOMPurify
162 | contentWrapper.innerHTML = DOMPurify.sanitize(marked.parse(nodeData.description,))
163 |
164 | // resize handle
165 | const resizeHandle = document.createElement('div');
166 | resizeHandle.style.width = '0';
167 | resizeHandle.style.height = '0';
168 | resizeHandle.style.position = 'absolute';
169 | resizeHandle.style.bottom = '0';
170 | resizeHandle.style.right = '0';
171 | resizeHandle.style.cursor = 'se-resize';
172 |
173 | // Add pseudo-elements to create a triangle shape
174 | const borderColor = getComputedStyle(document.documentElement).getPropertyValue('--border-color').trim();
175 | resizeHandle.style.borderTop = '10px solid transparent';
176 | resizeHandle.style.borderLeft = '10px solid transparent';
177 | resizeHandle.style.borderBottom = `10px solid ${borderColor}`;
178 | resizeHandle.style.borderRight = `10px solid ${borderColor}`;
179 |
180 | docElement.appendChild(resizeHandle)
181 | let isResizing = false
182 | let startX, startY, startWidth, startHeight
183 |
184 | resizeHandle.addEventListener('mousedown', function (e) {
185 | e.preventDefault();
186 | e.stopPropagation();
187 | isResizing = true;
188 | startX = e.clientX;
189 | startY = e.clientY;
190 | startWidth = parseInt(document.defaultView.getComputedStyle(docElement).width, 10);
191 | startHeight = parseInt(document.defaultView.getComputedStyle(docElement).height, 10);
192 | },
193 | { signal: this.docCtrl.signal },
194 | );
195 |
196 | // close button
197 | const closeButton = document.createElement('div');
198 | closeButton.textContent = '❌';
199 | closeButton.style.position = 'absolute';
200 | closeButton.style.top = '0';
201 | closeButton.style.right = '0';
202 | closeButton.style.cursor = 'pointer';
203 | closeButton.style.padding = '5px';
204 | closeButton.style.color = 'red';
205 | closeButton.style.fontSize = '12px';
206 |
207 | docElement.appendChild(closeButton)
208 |
209 | closeButton.addEventListener('mousedown', (e) => {
210 | e.stopPropagation();
211 | this.show_doc = !this.show_doc
212 | docElement.parentNode.removeChild(docElement)
213 | docElement = null
214 | if (contentWrapper) {
215 | contentWrapper.remove()
216 | contentWrapper = null
217 | }
218 | },
219 | { signal: this.docCtrl.signal },
220 | );
221 |
222 | document.addEventListener('mousemove', function (e) {
223 | if (!isResizing) return;
224 | const scale = app.canvas.ds.scale;
225 | const newWidth = startWidth + (e.clientX - startX) / scale;
226 | const newHeight = startHeight + (e.clientY - startY) / scale;;
227 | docElement.style.width = `${newWidth}px`;
228 | docElement.style.height = `${newHeight}px`;
229 | },
230 | { signal: this.docCtrl.signal },
231 | );
232 |
233 | document.addEventListener('mouseup', function () {
234 | isResizing = false
235 | },
236 | { signal: this.docCtrl.signal },
237 | )
238 |
239 | document.body.appendChild(docElement)
240 | }
241 | // close the popup
242 | else if (!this.show_doc && docElement !== null) {
243 | docElement.parentNode.removeChild(docElement)
244 | docElement = null
245 | }
246 | // update position of the popup
247 | if (this.show_doc && docElement !== null) {
248 | const rect = ctx.canvas.getBoundingClientRect()
249 | const scaleX = rect.width / ctx.canvas.width
250 | const scaleY = rect.height / ctx.canvas.height
251 |
252 | const transform = new DOMMatrix()
253 | .scaleSelf(scaleX, scaleY)
254 | .multiplySelf(ctx.getTransform())
255 | .translateSelf(this.size[0] * scaleX * Math.max(1.0,window.devicePixelRatio) , 0)
256 | .translateSelf(10, -32)
257 |
258 | const scale = new DOMMatrix()
259 | .scaleSelf(transform.a, transform.d);
260 |
261 | const styleObject = {
262 | transformOrigin: '0 0',
263 | transform: scale,
264 | left: `${transform.a + transform.e}px`,
265 | top: `${transform.d + transform.f}px`,
266 | };
267 | Object.assign(docElement.style, styleObject);
268 | }
269 |
270 | ctx.save()
271 | ctx.translate(x - 2, iconSize - 34)
272 | ctx.scale(iconSize / 32, iconSize / 32)
273 | ctx.strokeStyle = 'rgba(255,255,255,0.3)'
274 | ctx.lineCap = 'round'
275 | ctx.lineJoin = 'round'
276 | ctx.lineWidth = 2.4
277 | ctx.font = 'bold 36px monospace'
278 | ctx.fillStyle = '#EADFB4';
279 | ctx.fillText('?', 0, 24)
280 | ctx.restore()
281 | return r
282 | }
283 | // handle clicking of the icon
284 | const mouseDown = nodeType.prototype.onMouseDown
285 | nodeType.prototype.onMouseDown = function (e, localPos, canvas) {
286 | const r = mouseDown ? mouseDown.apply(this, arguments) : undefined
287 | const iconX = this.size[0] - iconSize - iconMargin
288 | const iconY = iconSize - 34
289 | if (
290 | localPos[0] > iconX &&
291 | localPos[0] < iconX + iconSize &&
292 | localPos[1] > iconY &&
293 | localPos[1] < iconY + iconSize
294 | ) {
295 | if (this.show_doc === undefined) {
296 | this.show_doc = true
297 | } else {
298 | this.show_doc = !this.show_doc
299 | }
300 | if (this.show_doc) {
301 | this.docCtrl = new AbortController()
302 | } else {
303 | this.docCtrl.abort()
304 | }
305 | return true;
306 | }
307 | return r;
308 | }
309 | const onRem = nodeType.prototype.onRemoved
310 |
311 | nodeType.prototype.onRemoved = function () {
312 | const r = onRem ? onRem.apply(this, []) : undefined
313 |
314 | if (docElement) {
315 | docElement.remove()
316 | docElement = null
317 | }
318 |
319 | if (contentWrapper) {
320 | contentWrapper.remove()
321 | contentWrapper = null
322 | }
323 | return r
324 | }
325 | }
--------------------------------------------------------------------------------
/nodes/audio/AudioAnalysis.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import matplotlib.pyplot as plt
3 | from matplotlib.ticker import MaxNLocator
4 | import tempfile
5 | import numpy as np
6 | from PIL import Image
7 | from typing import Tuple, List, Dict
8 | from ... import Yvann
9 | import comfy.model_management as mm
10 | import torchaudio
11 | from torchaudio.pipelines import HDEMUCS_HIGH_MUSDB_PLUS
12 | from termcolor import colored
13 | from torchaudio.transforms import Fade, Resample
14 |
15 | class AudioNodeBase(Yvann):
16 | CATEGORY = "👁️ Yvann Nodes/🔊 Audio"
17 |
18 | class AudioAnalysis(AudioNodeBase):
19 | analysis_modes = ["Drums Only", "Full Audio", "Vocals Only", "Bass Only", "Others Audio"]
20 |
21 | @classmethod
22 | def INPUT_TYPES(cls):
23 | return {
24 | "required": {
25 | "audio_sep_model": ("AUDIO_SEPARATION_MODEL", {"forceInput": True}),
26 | "batch_size": ("INT", {"forceInput": True}),
27 | "fps": ("FLOAT", {"forceInput": True}),
28 | "audio": ("AUDIO", {"forceInput": True}),
29 | "analysis_mode": (cls.analysis_modes,),
30 | "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1, "step": 0.01}),
31 | "multiply": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.01}),
32 | }
33 | }
34 |
35 | RETURN_TYPES = ("AUDIO", "AUDIO", "FLOAT", "IMAGE")
36 | RETURN_NAMES = ("processed_audio", "original_audio", "audio_weights", "graph_audio")
37 | FUNCTION = "process_audio"
38 |
39 | def _get_audio_frame(self, waveform: torch.Tensor, i: int, samples_per_frame: int) -> np.ndarray:
40 | start = i * samples_per_frame
41 | end = start + samples_per_frame
42 | return waveform[..., start:end].cpu().numpy().squeeze()
43 |
44 | def _rms_energy(self, waveform: torch.Tensor, batch_size: int, samples_per_frame: int) -> np.ndarray:
45 | try:
46 | rms_values = []
47 | for i in range(batch_size):
48 | frame = self._get_audio_frame(waveform, i, samples_per_frame)
49 | if frame.size == 0:
50 | rms = 0.0
51 | else:
52 | rms = np.sqrt(np.mean(frame ** 2))
53 | rms = round(rms, 6)
54 | rms_values.append(rms)
55 | return np.array(rms_values)
56 | except Exception as e:
57 | print(f"Error in RMS energy calculation: {e}")
58 | return np.zeros(batch_size)
59 |
60 | def prepare_audio_and_device(self, waveform, sample_rate) -> Tuple[torch.device, torch.Tensor]:
61 | """Prepares the device (GPU or CPU) and sets up the audio waveform."""
62 | device = mm.get_torch_device()
63 | waveform = waveform.squeeze(0).to(device)
64 | self.audio_sample_rate = sample_rate
65 | return device, waveform
66 |
67 | def apply_model_and_extract_sources(self, model, waveform: torch.Tensor, device: torch.device) -> Tuple[torch.Tensor, list[str]]:
68 | """Applies the model and extracts audio sources, handling both Open-Unmix and GDemucs cases."""
69 | sources, sources_list = None, []
70 |
71 |
72 | if isinstance(model, torch.nn.Module): # Open-Unmix model
73 | print(colored("Applying Open_Unmix model on audio", 'green'))
74 | self.model_sample_rate = model.sample_rate
75 |
76 | if self.audio_sample_rate != self.model_sample_rate:
77 | resampler = torchaudio.transforms.Resample(orig_freq=self.audio_sample_rate, new_freq=self.model_sample_rate).to(device)
78 | waveform = resampler(waveform)
79 | sources = model(waveform.unsqueeze(0)).squeeze(0)
80 | sources_list = ['bass', 'drums', 'others', 'vocals']
81 |
82 | elif "demucs" in model and model["demucs"]: # GDemucs model
83 | print(colored("Applying GDemucs model on audio", 'green'))
84 | self.model_sample_rate = model["sample_rate"]
85 | model = model["model"]
86 |
87 | if self.audio_sample_rate != self.model_sample_rate:
88 | resampler = torchaudio.transforms.Resample(orig_freq=self.audio_sample_rate, new_freq=self.model_sample_rate).to(device)
89 | waveform = resampler(waveform)
90 | ref = waveform.mean(0)
91 | waveform = (waveform - ref.mean()) / ref.std()
92 | sources = self.separate_sources(model, waveform[None], segment=10.0, overlap=0.1, device=device)[0]
93 | sources = sources * ref.std() + ref.mean()
94 |
95 | sources_list = getattr(model, 'sources', ['bass', 'drums', 'others', 'vocals'])
96 |
97 | else:
98 | print(colored("Unrecognized model type", 'red'))
99 | return None, []
100 |
101 | return sources, sources_list
102 |
103 | def separate_sources(self, model, mix, segment=10.0, overlap=0.1, device=None,
104 | ):
105 | """
106 | Apply model to a given mixture. Use fade, and add segments together in order to add model segment by segment.
107 |
108 | Args:
109 | segment (int): segment length in seconds
110 | device (torch.device, str, or None): if provided, device on which to
111 | execute the computation, otherwise `mix.device` is assumed.
112 | When `device` is different from `mix.device`, only local computations will
113 | be on `device`, while the entire tracks will be stored on `mix.device`.
114 | """
115 | if device is None:
116 | device = mix.device
117 | else:
118 | device = torch.device(device)
119 |
120 | batch, channels, length = mix.shape
121 | sample_rate = self.model_sample_rate
122 |
123 | chunk_len = int(sample_rate * segment * (1 + overlap))
124 | start = 0
125 | end = chunk_len
126 | overlap_frames = overlap * sample_rate
127 | fade = Fade(fade_in_len=0, fade_out_len=int(overlap_frames), fade_shape="linear")
128 |
129 | final = torch.zeros(batch, len(model.sources), channels, length, device=device)
130 |
131 | while start < length - overlap_frames:
132 | chunk = mix[:, :, start:end]
133 | with torch.no_grad():
134 | out = model.forward(chunk)
135 | out = fade(out)
136 | final[:, :, :, start:end] += out
137 | if start == 0:
138 | fade.fade_in_len = int(overlap_frames)
139 | start += int(chunk_len - overlap_frames)
140 | else:
141 | start += chunk_len
142 | end += chunk_len
143 | if end >= length:
144 | fade.fade_out_len = 0
145 | return final
146 |
147 | def adjust_waveform_dimensions(self, waveform):
148 |
149 | # Ensure waveform is at least 2D (channels, frames)
150 | if waveform.ndim == 1:
151 | # If waveform is 1D (frames,), add channel dimension
152 | waveform = waveform.unsqueeze(0)
153 |
154 | # Ensure waveform has batch dimension
155 | if waveform.ndim == 2:
156 | # Add batch dimension
157 | waveform = waveform.unsqueeze(0)
158 | elif waveform.ndim == 3:
159 | # Waveform already has batch dimension
160 | pass
161 | else:
162 | raise ValueError(f"Waveform has unexpected dimensions: {waveform.shape}")
163 |
164 | return waveform
165 |
166 | def convert_audio_format(self, audio, mode: int) -> torch.Tensor:
167 |
168 | if mode == 1:
169 | if audio['waveform'].shape[1] == 1:
170 | audio['waveform'] = audio['waveform'].repeat(1, 2, 1)
171 | print("Converted from mono to stereo.")
172 | else:
173 | print("The signal is already in stereo.")
174 |
175 | elif mode == 2:
176 | if audio['waveform'].shape[1] == 2:
177 | audio['waveform'] = audio['waveform'].mean(dim=1, keepdim=True)
178 | print("Converted from stereo to mono.")
179 | else:
180 | print("The signal is already in mono.")
181 |
182 | else:
183 | print("Invalid mode. Please use 1 for mono to stereo or 2 for stereo to mono.")
184 |
185 | return audio
186 |
187 | def process_audio(self, audio_sep_model, audio: Dict[str, torch.Tensor], batch_size: int, fps: float, analysis_mode: str, threshold: float, multiply: float,) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], Dict[str, torch.Tensor], List[float]]:
188 |
189 | if audio is None or 'waveform' not in audio or 'sample_rate' not in audio:
190 | raise ValueError("Invalid audio input")
191 |
192 | isMono = False
193 | if audio['waveform'].shape[1] == 1:
194 | audio = self.convert_audio_format(audio, 1)
195 | isMono = True
196 |
197 | model = audio_sep_model
198 | waveform = audio['waveform']
199 | sample_rate = audio['sample_rate']
200 | original_sample_rate = audio['sample_rate']
201 | self.audio_sample_rate = original_sample_rate
202 |
203 | num_samples = waveform.shape[-1]
204 | audio_duration = num_samples / sample_rate
205 | if batch_size == 0:
206 | batch_size = int(audio_duration * fps)
207 | else:
208 | audio_duration = batch_size / fps
209 | total_samples_needed = int(audio_duration * sample_rate)
210 |
211 |
212 | samples_per_frame = total_samples_needed // batch_size
213 |
214 | if waveform.shape[-1] > total_samples_needed:
215 | waveform = waveform[..., :total_samples_needed]
216 | elif waveform.shape[-1] < total_samples_needed:
217 | pad_length = total_samples_needed - waveform.shape[-1]
218 | waveform = torch.nn.functional.pad(waveform, (0, pad_length))
219 |
220 | #--------------------------------------------------#
221 |
222 | if analysis_mode != "Full Audio":
223 | try:
224 | device, waveform = self.prepare_audio_and_device(waveform, original_sample_rate)
225 |
226 | with torch.no_grad():
227 | estimates, estimates_list = self.apply_model_and_extract_sources(model, waveform, device)
228 |
229 | estimates_list = ['drums', 'bass', 'others', 'vocals']
230 |
231 | if isinstance(model, torch.nn.Module):
232 | source_name_mapping = {
233 | "Others Audio": "vocals",
234 | "Bass Only": "others",
235 | "Drums Only": "bass",
236 | "Vocals Only": "drums"
237 | }
238 | elif "demucs" in model and model["demucs"]:
239 | source_name_mapping = {
240 | "Drums Only": "drums",
241 | "Bass Only": "bass",
242 | "Others Audio": "others",
243 | "Vocals Only": "vocals"
244 | }
245 |
246 | source_name = source_name_mapping.get(analysis_mode)
247 | if source_name is not None:
248 | try:
249 | source_index = estimates_list.index(source_name)
250 | processed_waveform = estimates[source_index]
251 | print(colored("Checking sources in processed_waveform:", 'blue'))
252 | except ValueError:
253 | raise ValueError(f"Source '{source_name}' is not available in the model's provided sources.")
254 | else:
255 | raise ValueError(f"Analysis mode '{analysis_mode}' is invalid.")
256 | except Exception as e:
257 | print(f"Error in model processing: {e}")
258 | raise
259 | else:
260 | processed_waveform = waveform.clone()
261 |
262 | #--------------------------------------------------#
263 |
264 | # if waveform.shape[-1] > total_samples_needed:
265 | # waveform = waveform[..., :total_samples_needed]
266 | # elif waveform.shape[-1] < total_samples_needed:
267 | # pad_length = total_samples_needed - waveform.shape[-1]
268 | # waveform = torch.nn.functional.pad(waveform, (0, pad_length))
269 |
270 | processed_waveform = self.adjust_waveform_dimensions(processed_waveform)
271 | original_waveform = self.adjust_waveform_dimensions(waveform.clone())
272 |
273 |
274 | final_sample_rate = self.model_sample_rate if hasattr(self, 'model_sample_rate') else sample_rate
275 | if (analysis_mode != "Full Audio"):
276 |
277 |
278 | print(f"Resampling processed audio from {final_sample_rate} Hz back to original sample rate {original_sample_rate} Hz.")
279 | resampler = torchaudio.transforms.Resample(orig_freq=final_sample_rate, new_freq=original_sample_rate).to(processed_waveform.device)
280 | processed_waveform = processed_waveform.squeeze(0)
281 | processed_waveform = resampler(processed_waveform)
282 | processed_waveform = processed_waveform.unsqueeze(0)
283 |
284 | expected_num_samples = original_waveform.shape[-1]
285 | actual_num_samples = processed_waveform.shape[-1]
286 | if actual_num_samples > expected_num_samples:
287 | processed_waveform = processed_waveform[..., :expected_num_samples]
288 | elif actual_num_samples < expected_num_samples:
289 | pad_length = expected_num_samples - actual_num_samples
290 | processed_waveform = torch.nn.functional.pad(processed_waveform, (0, pad_length))
291 |
292 | final_sample_rate = original_sample_rate
293 |
294 | processed_audio = {
295 | 'waveform': processed_waveform.cpu().detach(),
296 | 'sample_rate': self.audio_sample_rate
297 | }
298 | original_audio = {
299 | 'waveform': original_waveform.cpu().detach(),
300 | 'sample_rate': self.audio_sample_rate
301 | }
302 |
303 | #--------------------------------------------------#
304 |
305 | waveform_for_rms = processed_waveform.squeeze(0).squeeze(0)
306 | audio_weights = self._rms_energy(waveform_for_rms, batch_size, samples_per_frame)
307 |
308 |
309 | if np.isnan(audio_weights).any() or np.isinf(audio_weights).any():
310 | raise ValueError("Invalid audio weights calculated")
311 |
312 | min_weight = np.min(audio_weights)
313 | max_weight = np.max(audio_weights)
314 | if max_weight - min_weight != 0:
315 | audio_weights_normalized = (audio_weights - min_weight) / (max_weight - min_weight)
316 | else:
317 | audio_weights_normalized = audio_weights - min_weight
318 |
319 | audio_weights_thresholded = np.where(audio_weights_normalized > threshold, audio_weights_normalized, 0)
320 | audio_weights_processed = np.clip(audio_weights_thresholded * multiply, 0, 1)
321 |
322 | try:
323 | figsize = 12.0
324 | plt.figure(figsize=(figsize, figsize * 0.6), facecolor='white')
325 | plt.plot(
326 | list(range(0, (len(audio_weights_processed)))),
327 | audio_weights_processed,
328 | label=f'{analysis_mode} Weights',
329 | color='blue'
330 | )
331 | plt.xlabel(f'Frames (batch_size = {batch_size})')
332 | plt.ylabel('Audio Weights')
333 | plt.title(f'Audio Weights Over Frames ({analysis_mode})')
334 | plt.legend()
335 | plt.grid(True)
336 |
337 | ax = plt.gca()
338 | ax.xaxis.set_major_locator(MaxNLocator(integer=True))
339 |
340 | with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmpfile:
341 | plt.savefig(tmpfile.name, format='png', bbox_inches='tight')
342 | tmpfile_path = tmpfile.name
343 | plt.close()
344 |
345 | weights_graph = Image.open(tmpfile_path).convert("RGB")
346 | weights_graph = np.array(weights_graph).astype(np.float32) / 255.0
347 | weights_graph = torch.from_numpy(weights_graph).unsqueeze(0)
348 | except Exception as e:
349 | print(f"Error in creating weights graph: {e}")
350 | weights_graph = torch.zeros((1, 400, 300, 3))
351 |
352 | rounded_audio_weights = [round(float(x), 6) for x in audio_weights_processed]
353 |
354 | if (isMono == True):
355 | self.convert_audio_format(processed_audio, 2)
356 | self.convert_audio_format(original_audio, 2)
357 |
358 | return (processed_audio, original_audio, rounded_audio_weights, weights_graph)
359 |
--------------------------------------------------------------------------------
/yvann_web_async/purify.min.js:
--------------------------------------------------------------------------------
1 | /*! @license DOMPurify 3.0.11 | (c) Cure53 and other contributors | Released under the Apache license 2.0 and Mozilla Public License 2.0 | github.com/cure53/DOMPurify/blob/3.0.11/LICENSE */
2 | !function(e,t){"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):(e="undefined"!=typeof globalThis?globalThis:e||self).DOMPurify=t()}(this,(function(){"use strict";const{entries:e,setPrototypeOf:t,isFrozen:n,getPrototypeOf:o,getOwnPropertyDescriptor:r}=Object;let{freeze:i,seal:a,create:l}=Object,{apply:c,construct:s}="undefined"!=typeof Reflect&&Reflect;i||(i=function(e){return e}),a||(a=function(e){return e}),c||(c=function(e,t,n){return e.apply(t,n)}),s||(s=function(e,t){return new e(...t)});const u=b(Array.prototype.forEach),m=b(Array.prototype.pop),p=b(Array.prototype.push),f=b(String.prototype.toLowerCase),d=b(String.prototype.toString),h=b(String.prototype.match),g=b(String.prototype.replace),T=b(String.prototype.indexOf),y=b(String.prototype.trim),E=b(Object.prototype.hasOwnProperty),A=b(RegExp.prototype.test),_=(N=TypeError,function(){for(var e=arguments.length,t=new Array(e),n=0;n1?n-1:0),r=1;r2&&void 0!==arguments[2]?arguments[2]:f;t&&t(e,null);let i=o.length;for(;i--;){let t=o[i];if("string"==typeof t){const e=r(t);e!==t&&(n(o)||(o[i]=e),t=e)}e[t]=!0}return e}function R(e){for(let t=0;t/gm),B=a(/\${[\w\W]*}/gm),W=a(/^data-[\-\w.\u00B7-\uFFFF]/),G=a(/^aria-[\-\w]+$/),Y=a(/^(?:(?:(?:f|ht)tps?|mailto|tel|callto|sms|cid|xmpp):|[^a-z]|[a-z+.\-]+(?:[^a-z+.\-:]|$))/i),j=a(/^(?:\w+script|data):/i),X=a(/[\u0000-\u0020\u00A0\u1680\u180E\u2000-\u2029\u205F\u3000]/g),q=a(/^html$/i),$=a(/^[a-z][.\w]*(-[.\w]+)+$/i);var K=Object.freeze({__proto__:null,MUSTACHE_EXPR:H,ERB_EXPR:z,TMPLIT_EXPR:B,DATA_ATTR:W,ARIA_ATTR:G,IS_ALLOWED_URI:Y,IS_SCRIPT_OR_DATA:j,ATTR_WHITESPACE:X,DOCTYPE_NAME:q,CUSTOM_ELEMENT:$});const V=function(){return"undefined"==typeof window?null:window},Z=function(e,t){if("object"!=typeof e||"function"!=typeof e.createPolicy)return null;let n=null;const o="data-tt-policy-suffix";t&&t.hasAttribute(o)&&(n=t.getAttribute(o));const r="dompurify"+(n?"#"+n:"");try{return e.createPolicy(r,{createHTML:e=>e,createScriptURL:e=>e})}catch(e){return console.warn("TrustedTypes policy "+r+" could not be created."),null}};var J=function t(){let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:V();const o=e=>t(e);if(o.version="3.0.11",o.removed=[],!n||!n.document||9!==n.document.nodeType)return o.isSupported=!1,o;let{document:r}=n;const a=r,c=a.currentScript,{DocumentFragment:s,HTMLTemplateElement:N,Node:b,Element:R,NodeFilter:H,NamedNodeMap:z=n.NamedNodeMap||n.MozNamedAttrMap,HTMLFormElement:B,DOMParser:W,trustedTypes:G}=n,j=R.prototype,X=L(j,"cloneNode"),$=L(j,"nextSibling"),J=L(j,"childNodes"),Q=L(j,"parentNode");if("function"==typeof N){const e=r.createElement("template");e.content&&e.content.ownerDocument&&(r=e.content.ownerDocument)}let ee,te="";const{implementation:ne,createNodeIterator:oe,createDocumentFragment:re,getElementsByTagName:ie}=r,{importNode:ae}=a;let le={};o.isSupported="function"==typeof e&&"function"==typeof Q&&ne&&void 0!==ne.createHTMLDocument;const{MUSTACHE_EXPR:ce,ERB_EXPR:se,TMPLIT_EXPR:ue,DATA_ATTR:me,ARIA_ATTR:pe,IS_SCRIPT_OR_DATA:fe,ATTR_WHITESPACE:de,CUSTOM_ELEMENT:he}=K;let{IS_ALLOWED_URI:ge}=K,Te=null;const ye=S({},[...D,...C,...O,...v,...M]);let Ee=null;const Ae=S({},[...I,...U,...P,...F]);let _e=Object.seal(l(null,{tagNameCheck:{writable:!0,configurable:!1,enumerable:!0,value:null},attributeNameCheck:{writable:!0,configurable:!1,enumerable:!0,value:null},allowCustomizedBuiltInElements:{writable:!0,configurable:!1,enumerable:!0,value:!1}})),Ne=null,be=null,Se=!0,Re=!0,we=!1,Le=!0,De=!1,Ce=!0,Oe=!1,xe=!1,ve=!1,ke=!1,Me=!1,Ie=!1,Ue=!0,Pe=!1;const Fe="user-content-";let He=!0,ze=!1,Be={},We=null;const Ge=S({},["annotation-xml","audio","colgroup","desc","foreignobject","head","iframe","math","mi","mn","mo","ms","mtext","noembed","noframes","noscript","plaintext","script","style","svg","template","thead","title","video","xmp"]);let Ye=null;const je=S({},["audio","video","img","source","image","track"]);let Xe=null;const qe=S({},["alt","class","for","id","label","name","pattern","placeholder","role","summary","title","value","style","xmlns"]),$e="http://www.w3.org/1998/Math/MathML",Ke="http://www.w3.org/2000/svg",Ve="http://www.w3.org/1999/xhtml";let Ze=Ve,Je=!1,Qe=null;const et=S({},[$e,Ke,Ve],d);let tt=null;const nt=["application/xhtml+xml","text/html"],ot="text/html";let rt=null,it=null;const at=r.createElement("form"),lt=function(e){return e instanceof RegExp||e instanceof Function},ct=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};if(!it||it!==e){if(e&&"object"==typeof e||(e={}),e=w(e),tt=-1===nt.indexOf(e.PARSER_MEDIA_TYPE)?ot:e.PARSER_MEDIA_TYPE,rt="application/xhtml+xml"===tt?d:f,Te=E(e,"ALLOWED_TAGS")?S({},e.ALLOWED_TAGS,rt):ye,Ee=E(e,"ALLOWED_ATTR")?S({},e.ALLOWED_ATTR,rt):Ae,Qe=E(e,"ALLOWED_NAMESPACES")?S({},e.ALLOWED_NAMESPACES,d):et,Xe=E(e,"ADD_URI_SAFE_ATTR")?S(w(qe),e.ADD_URI_SAFE_ATTR,rt):qe,Ye=E(e,"ADD_DATA_URI_TAGS")?S(w(je),e.ADD_DATA_URI_TAGS,rt):je,We=E(e,"FORBID_CONTENTS")?S({},e.FORBID_CONTENTS,rt):Ge,Ne=E(e,"FORBID_TAGS")?S({},e.FORBID_TAGS,rt):{},be=E(e,"FORBID_ATTR")?S({},e.FORBID_ATTR,rt):{},Be=!!E(e,"USE_PROFILES")&&e.USE_PROFILES,Se=!1!==e.ALLOW_ARIA_ATTR,Re=!1!==e.ALLOW_DATA_ATTR,we=e.ALLOW_UNKNOWN_PROTOCOLS||!1,Le=!1!==e.ALLOW_SELF_CLOSE_IN_ATTR,De=e.SAFE_FOR_TEMPLATES||!1,Ce=!1!==e.SAFE_FOR_XML,Oe=e.WHOLE_DOCUMENT||!1,ke=e.RETURN_DOM||!1,Me=e.RETURN_DOM_FRAGMENT||!1,Ie=e.RETURN_TRUSTED_TYPE||!1,ve=e.FORCE_BODY||!1,Ue=!1!==e.SANITIZE_DOM,Pe=e.SANITIZE_NAMED_PROPS||!1,He=!1!==e.KEEP_CONTENT,ze=e.IN_PLACE||!1,ge=e.ALLOWED_URI_REGEXP||Y,Ze=e.NAMESPACE||Ve,_e=e.CUSTOM_ELEMENT_HANDLING||{},e.CUSTOM_ELEMENT_HANDLING&<(e.CUSTOM_ELEMENT_HANDLING.tagNameCheck)&&(_e.tagNameCheck=e.CUSTOM_ELEMENT_HANDLING.tagNameCheck),e.CUSTOM_ELEMENT_HANDLING&<(e.CUSTOM_ELEMENT_HANDLING.attributeNameCheck)&&(_e.attributeNameCheck=e.CUSTOM_ELEMENT_HANDLING.attributeNameCheck),e.CUSTOM_ELEMENT_HANDLING&&"boolean"==typeof e.CUSTOM_ELEMENT_HANDLING.allowCustomizedBuiltInElements&&(_e.allowCustomizedBuiltInElements=e.CUSTOM_ELEMENT_HANDLING.allowCustomizedBuiltInElements),De&&(Re=!1),Me&&(ke=!0),Be&&(Te=S({},M),Ee=[],!0===Be.html&&(S(Te,D),S(Ee,I)),!0===Be.svg&&(S(Te,C),S(Ee,U),S(Ee,F)),!0===Be.svgFilters&&(S(Te,O),S(Ee,U),S(Ee,F)),!0===Be.mathMl&&(S(Te,v),S(Ee,P),S(Ee,F))),e.ADD_TAGS&&(Te===ye&&(Te=w(Te)),S(Te,e.ADD_TAGS,rt)),e.ADD_ATTR&&(Ee===Ae&&(Ee=w(Ee)),S(Ee,e.ADD_ATTR,rt)),e.ADD_URI_SAFE_ATTR&&S(Xe,e.ADD_URI_SAFE_ATTR,rt),e.FORBID_CONTENTS&&(We===Ge&&(We=w(We)),S(We,e.FORBID_CONTENTS,rt)),He&&(Te["#text"]=!0),Oe&&S(Te,["html","head","body"]),Te.table&&(S(Te,["tbody"]),delete Ne.tbody),e.TRUSTED_TYPES_POLICY){if("function"!=typeof e.TRUSTED_TYPES_POLICY.createHTML)throw _('TRUSTED_TYPES_POLICY configuration option must provide a "createHTML" hook.');if("function"!=typeof e.TRUSTED_TYPES_POLICY.createScriptURL)throw _('TRUSTED_TYPES_POLICY configuration option must provide a "createScriptURL" hook.');ee=e.TRUSTED_TYPES_POLICY,te=ee.createHTML("")}else void 0===ee&&(ee=Z(G,c)),null!==ee&&"string"==typeof te&&(te=ee.createHTML(""));i&&i(e),it=e}},st=S({},["mi","mo","mn","ms","mtext"]),ut=S({},["foreignobject","desc","title","annotation-xml"]),mt=S({},["title","style","font","a","script"]),pt=S({},[...C,...O,...x]),ft=S({},[...v,...k]),dt=function(e){let t=Q(e);t&&t.tagName||(t={namespaceURI:Ze,tagName:"template"});const n=f(e.tagName),o=f(t.tagName);return!!Qe[e.namespaceURI]&&(e.namespaceURI===Ke?t.namespaceURI===Ve?"svg"===n:t.namespaceURI===$e?"svg"===n&&("annotation-xml"===o||st[o]):Boolean(pt[n]):e.namespaceURI===$e?t.namespaceURI===Ve?"math"===n:t.namespaceURI===Ke?"math"===n&&ut[o]:Boolean(ft[n]):e.namespaceURI===Ve?!(t.namespaceURI===Ke&&!ut[o])&&(!(t.namespaceURI===$e&&!st[o])&&(!ft[n]&&(mt[n]||!pt[n]))):!("application/xhtml+xml"!==tt||!Qe[e.namespaceURI]))},ht=function(e){p(o.removed,{element:e});try{e.parentNode.removeChild(e)}catch(t){e.remove()}},gt=function(e,t){try{p(o.removed,{attribute:t.getAttributeNode(e),from:t})}catch(e){p(o.removed,{attribute:null,from:t})}if(t.removeAttribute(e),"is"===e&&!Ee[e])if(ke||Me)try{ht(t)}catch(e){}else try{t.setAttribute(e,"")}catch(e){}},Tt=function(e){let t=null,n=null;if(ve)e=" "+e;else{const t=h(e,/^[\r\n\t ]+/);n=t&&t[0]}"application/xhtml+xml"===tt&&Ze===Ve&&(e=''+e+"");const o=ee?ee.createHTML(e):e;if(Ze===Ve)try{t=(new W).parseFromString(o,tt)}catch(e){}if(!t||!t.documentElement){t=ne.createDocument(Ze,"template",null);try{t.documentElement.innerHTML=Je?te:o}catch(e){}}const i=t.body||t.documentElement;return e&&n&&i.insertBefore(r.createTextNode(n),i.childNodes[0]||null),Ze===Ve?ie.call(t,Oe?"html":"body")[0]:Oe?t.documentElement:i},yt=function(e){return oe.call(e.ownerDocument||e,e,H.SHOW_ELEMENT|H.SHOW_COMMENT|H.SHOW_TEXT|H.SHOW_PROCESSING_INSTRUCTION|H.SHOW_CDATA_SECTION,null)},Et=function(e){return e instanceof B&&("string"!=typeof e.nodeName||"string"!=typeof e.textContent||"function"!=typeof e.removeChild||!(e.attributes instanceof z)||"function"!=typeof e.removeAttribute||"function"!=typeof e.setAttribute||"string"!=typeof e.namespaceURI||"function"!=typeof e.insertBefore||"function"!=typeof e.hasChildNodes)},At=function(e){return"function"==typeof b&&e instanceof b},_t=function(e,t,n){le[e]&&u(le[e],(e=>{e.call(o,t,n,it)}))},Nt=function(e){let t=null;if(_t("beforeSanitizeElements",e,null),Et(e))return ht(e),!0;const n=rt(e.nodeName);if(_t("uponSanitizeElement",e,{tagName:n,allowedTags:Te}),e.hasChildNodes()&&!At(e.firstElementChild)&&A(/<[/\w]/g,e.innerHTML)&&A(/<[/\w]/g,e.textContent))return ht(e),!0;if(7===e.nodeType)return ht(e),!0;if(Ce&&8===e.nodeType&&A(/<[/\w]/g,e.data))return ht(e),!0;if(!Te[n]||Ne[n]){if(!Ne[n]&&St(n)){if(_e.tagNameCheck instanceof RegExp&&A(_e.tagNameCheck,n))return!1;if(_e.tagNameCheck instanceof Function&&_e.tagNameCheck(n))return!1}if(He&&!We[n]){const t=Q(e)||e.parentNode,n=J(e)||e.childNodes;if(n&&t){for(let o=n.length-1;o>=0;--o)t.insertBefore(X(n[o],!0),$(e))}}return ht(e),!0}return e instanceof R&&!dt(e)?(ht(e),!0):"noscript"!==n&&"noembed"!==n&&"noframes"!==n||!A(/<\/no(script|embed|frames)/i,e.innerHTML)?(De&&3===e.nodeType&&(t=e.textContent,u([ce,se,ue],(e=>{t=g(t,e," ")})),e.textContent!==t&&(p(o.removed,{element:e.cloneNode()}),e.textContent=t)),_t("afterSanitizeElements",e,null),!1):(ht(e),!0)},bt=function(e,t,n){if(Ue&&("id"===t||"name"===t)&&(n in r||n in at))return!1;if(Re&&!be[t]&&A(me,t));else if(Se&&A(pe,t));else if(!Ee[t]||be[t]){if(!(St(e)&&(_e.tagNameCheck instanceof RegExp&&A(_e.tagNameCheck,e)||_e.tagNameCheck instanceof Function&&_e.tagNameCheck(e))&&(_e.attributeNameCheck instanceof RegExp&&A(_e.attributeNameCheck,t)||_e.attributeNameCheck instanceof Function&&_e.attributeNameCheck(t))||"is"===t&&_e.allowCustomizedBuiltInElements&&(_e.tagNameCheck instanceof RegExp&&A(_e.tagNameCheck,n)||_e.tagNameCheck instanceof Function&&_e.tagNameCheck(n))))return!1}else if(Xe[t]);else if(A(ge,g(n,de,"")));else if("src"!==t&&"xlink:href"!==t&&"href"!==t||"script"===e||0!==T(n,"data:")||!Ye[e]){if(we&&!A(fe,g(n,de,"")));else if(n)return!1}else;return!0},St=function(e){return"annotation-xml"!==e&&h(e,he)},Rt=function(e){_t("beforeSanitizeAttributes",e,null);const{attributes:t}=e;if(!t)return;const n={attrName:"",attrValue:"",keepAttr:!0,allowedAttributes:Ee};let r=t.length;for(;r--;){const i=t[r],{name:a,namespaceURI:l,value:c}=i,s=rt(a);let p="value"===a?c:y(c);if(n.attrName=s,n.attrValue=p,n.keepAttr=!0,n.forceKeepAttr=void 0,_t("uponSanitizeAttribute",e,n),p=n.attrValue,n.forceKeepAttr)continue;if(gt(a,e),!n.keepAttr)continue;if(!Le&&A(/\/>/i,p)){gt(a,e);continue}De&&u([ce,se,ue],(e=>{p=g(p,e," ")}));const f=rt(e.nodeName);if(bt(f,s,p)){if(!Pe||"id"!==s&&"name"!==s||(gt(a,e),p=Fe+p),ee&&"object"==typeof G&&"function"==typeof G.getAttributeType)if(l);else switch(G.getAttributeType(f,s)){case"TrustedHTML":p=ee.createHTML(p);break;case"TrustedScriptURL":p=ee.createScriptURL(p)}try{l?e.setAttributeNS(l,a,p):e.setAttribute(a,p),m(o.removed)}catch(e){}}}_t("afterSanitizeAttributes",e,null)},wt=function e(t){let n=null;const o=yt(t);for(_t("beforeSanitizeShadowDOM",t,null);n=o.nextNode();)_t("uponSanitizeShadowNode",n,null),Nt(n)||(n.content instanceof s&&e(n.content),Rt(n));_t("afterSanitizeShadowDOM",t,null)};return o.sanitize=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=null,r=null,i=null,l=null;if(Je=!e,Je&&(e="\x3c!--\x3e"),"string"!=typeof e&&!At(e)){if("function"!=typeof e.toString)throw _("toString is not a function");if("string"!=typeof(e=e.toString()))throw _("dirty is not a string, aborting")}if(!o.isSupported)return e;if(xe||ct(t),o.removed=[],"string"==typeof e&&(ze=!1),ze){if(e.nodeName){const t=rt(e.nodeName);if(!Te[t]||Ne[t])throw _("root node is forbidden and cannot be sanitized in-place")}}else if(e instanceof b)n=Tt("\x3c!----\x3e"),r=n.ownerDocument.importNode(e,!0),1===r.nodeType&&"BODY"===r.nodeName||"HTML"===r.nodeName?n=r:n.appendChild(r);else{if(!ke&&!De&&!Oe&&-1===e.indexOf("<"))return ee&&Ie?ee.createHTML(e):e;if(n=Tt(e),!n)return ke?null:Ie?te:""}n&&ve&&ht(n.firstChild);const c=yt(ze?e:n);for(;i=c.nextNode();)Nt(i)||(i.content instanceof s&&wt(i.content),Rt(i));if(ze)return e;if(ke){if(Me)for(l=re.call(n.ownerDocument);n.firstChild;)l.appendChild(n.firstChild);else l=n;return(Ee.shadowroot||Ee.shadowrootmode)&&(l=ae.call(a,l,!0)),l}let m=Oe?n.outerHTML:n.innerHTML;return Oe&&Te["!doctype"]&&n.ownerDocument&&n.ownerDocument.doctype&&n.ownerDocument.doctype.name&&A(q,n.ownerDocument.doctype.name)&&(m="\n"+m),De&&u([ce,se,ue],(e=>{m=g(m,e," ")})),ee&&Ie?ee.createHTML(m):m},o.setConfig=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};ct(e),xe=!0},o.clearConfig=function(){it=null,xe=!1},o.isValidAttribute=function(e,t,n){it||ct({});const o=rt(e),r=rt(t);return bt(o,r,n)},o.addHook=function(e,t){"function"==typeof t&&(le[e]=le[e]||[],p(le[e],t))},o.removeHook=function(e){if(le[e])return m(le[e])},o.removeHooks=function(e){le[e]&&(le[e]=[])},o.removeAllHooks=function(){le={}},o}();return J}));
3 | //# sourceMappingURL=purify.min.js.map
4 |
--------------------------------------------------------------------------------
/yvann_web_async/svg-path-properties.min.js:
--------------------------------------------------------------------------------
1 | // http://geoexamples.com/path-properties/ v1.2.0 Copyright 2023 Roger Veciana i Rovira
2 | !function(t,n){"object"==typeof exports&&"undefined"!=typeof module?n(exports):"function"==typeof define&&define.amd?define(["exports"],n):n((t="undefined"!=typeof globalThis?globalThis:t||self).svgPathProperties={})}(this,(function(t){"use strict";function n(t,n){for(var e=0;et.length)&&(n=t.length);for(var e=0,i=new Array(n);eu.length&&(t=u.length);var n=f({x:u.x0,y:u.y0},u.rx,u.ry,u.xAxisRotate,u.LargeArcFlag,u.SweepFlag,{x:u.x1,y:u.y1},t/u.length);return{x:n.x,y:n.y}})),i(this,"getTangentAtLength",(function(t){t<0?t=0:t>u.length&&(t=u.length);var n,e=.05,i=u.getPointAtLength(t);t<0?t=0:t>u.length&&(t=u.length);var r=(n=t1&&(n=Math.sqrt(c)*n,e=Math.sqrt(c)*e);var f=(Math.pow(n,2)*Math.pow(e,2)-Math.pow(n,2)*Math.pow(l.y,2)-Math.pow(e,2)*Math.pow(l.x,2))/(Math.pow(n,2)*Math.pow(l.y,2)+Math.pow(e,2)*Math.pow(l.x,2));f=f<0?0:f;var y=(r!==h?1:-1)*Math.sqrt(f),v=y*(n*l.y/e),M=y*(-e*l.x/n),L={x:Math.cos(o)*v-Math.sin(o)*M+(t.x+s.x)/2,y:Math.sin(o)*v+Math.cos(o)*M+(t.y+s.y)/2},d={x:(l.x-v)/n,y:(l.y-M)/e},A=w({x:1,y:0},d),b=w(d,{x:(-l.x-v)/n,y:(-l.y-M)/e});!h&&b>0?b-=2*Math.PI:h&&b<0&&(b+=2*Math.PI);var P=A+(b%=2*Math.PI)*a,m=n*Math.cos(P),T=e*Math.sin(P);return{x:Math.cos(o)*m-Math.sin(o)*T+L.x,y:Math.sin(o)*m+Math.cos(o)*T+L.y,ellipticalArcStartAngle:A,ellipticalArcEndAngle:A+b,ellipticalArcAngle:P,ellipticalArcCenter:L,resultantRx:n,resultantRy:e}},y=function(t,n){t=t||500;for(var e,i=0,r=[],h=[],s=n(0),a=0;a0?Math.sqrt(l*l+c):0,y=u*u+c>0?Math.sqrt(u*u+c):0,p=u+Math.sqrt(u*u+c)!==0&&(l+f)/(u+y)!=0?c*Math.log(Math.abs((l+f)/(u+y))):0;return Math.sqrt(a)/2*(l*f-u*y+p)},_=function(t,n,e){return{x:2*(1-e)*(t[1]-t[0])+2*e*(t[2]-t[1]),y:2*(1-e)*(n[1]-n[0])+2*e*(n[2]-n[1])}};function S(t,n,e){var i=N(1,e,t),r=N(1,e,n),h=i*i+r*r;return Math.sqrt(h)}var N=function t(n,e,i){var r,h,s=i.length-1;if(0===s)return 0;if(0===n){h=0;for(var a=0;a<=s;a++)h+=A[s][a]*Math.pow(1-e,s-a)*Math.pow(e,a)*i[a];return h}r=new Array(s);for(var o=0;o.001;){var a=e(r+h),o=Math.abs(t-a)/n;if(o500)break}return r},j=e((function(t,n,e,r,h,s,a,o){var g=this;i(this,"a",void 0),i(this,"b",void 0),i(this,"c",void 0),i(this,"d",void 0),i(this,"length",void 0),i(this,"getArcLength",void 0),i(this,"getPoint",void 0),i(this,"getDerivative",void 0),i(this,"getTotalLength",(function(){return g.length})),i(this,"getPointAtLength",(function(t){var n=[g.a.x,g.b.x,g.c.x,g.d.x],e=[g.a.y,g.b.y,g.c.y,g.d.y],i=C(t,g.length,(function(t){return g.getArcLength(n,e,t)}));return g.getPoint(n,e,i)})),i(this,"getTangentAtLength",(function(t){var n=[g.a.x,g.b.x,g.c.x,g.d.x],e=[g.a.y,g.b.y,g.c.y,g.d.y],i=C(t,g.length,(function(t){return g.getArcLength(n,e,t)})),r=g.getDerivative(n,e,i),h=Math.sqrt(r.x*r.x+r.y*r.y);return h>0?{x:r.x/h,y:r.y/h}:{x:0,y:0}})),i(this,"getPropertiesAtLength",(function(t){var n,e=[g.a.x,g.b.x,g.c.x,g.d.x],i=[g.a.y,g.b.y,g.c.y,g.d.y],r=C(t,g.length,(function(t){return g.getArcLength(e,i,t)})),h=g.getDerivative(e,i,r),s=Math.sqrt(h.x*h.x+h.y*h.y);n=s>0?{x:h.x/s,y:h.y/s}:{x:0,y:0};var a=g.getPoint(e,i,r);return{x:a.x,y:a.y,tangentX:n.x,tangentY:n.y}})),i(this,"getC",(function(){return g.c})),i(this,"getD",(function(){return g.d})),this.a={x:t,y:n},this.b={x:e,y:r},this.c={x:h,y:s},void 0!==a&&void 0!==o?(this.getArcLength=m,this.getPoint=b,this.getDerivative=P,this.d={x:a,y:o}):(this.getArcLength=q,this.getPoint=T,this.getDerivative=_,this.d={x:0,y:0}),this.length=this.getArcLength([this.a.x,this.b.x,this.c.x,this.d.x],[this.a.y,this.b.y,this.c.y,this.d.y],1)})),O=e((function(t){var n=this;i(this,"length",0),i(this,"partial_lengths",[]),i(this,"functions",[]),i(this,"initial_point",null),i(this,"getPartAtLength",(function(t){t<0?t=0:t>n.length&&(t=n.length);for(var e=n.partial_lengths.length-1;n.partial_lengths[e]>=t&&e>0;)e--;return e++,{fraction:t-n.partial_lengths[e-1],i:e}})),i(this,"getTotalLength",(function(){return n.length})),i(this,"getPointAtLength",(function(t){var e=n.getPartAtLength(t),i=n.functions[e.i];if(i)return i.getPointAtLength(e.fraction);if(n.initial_point)return n.initial_point;throw new Error("Wrong function at this part.")})),i(this,"getTangentAtLength",(function(t){var e=n.getPartAtLength(t),i=n.functions[e.i];if(i)return i.getTangentAtLength(e.fraction);if(n.initial_point)return{x:0,y:0};throw new Error("Wrong function at this part.")})),i(this,"getPropertiesAtLength",(function(t){var e=n.getPartAtLength(t),i=n.functions[e.i];if(i)return i.getPropertiesAtLength(e.fraction);if(n.initial_point)return{x:n.initial_point.x,y:n.initial_point.y,tangentX:0,tangentY:0};throw new Error("Wrong function at this part.")})),i(this,"getParts",(function(){for(var t=[],e=0;e0?t:"M0,0").match(o);if(!n)throw new Error("No path elements found in string ".concat(t));return n.reduce((function(t,n){var e=n.charAt(0),i=e.toLowerCase(),h=u(n.substring(1));if("m"===i&&h.length>2&&(t.push([e].concat(r(h.splice(0,2)))),i="l",e="m"===e?"l":"L"),"a"===i.toLowerCase()&&(5===h.length||6===h.length)){var s=n.substring(1).trim().split(" ");h=[Number(s[0]),Number(s[1]),Number(s[2]),Number(s[3].charAt(0)),Number(s[3].charAt(1)),Number(s[3].substring(2)),Number(s[4])]}for(;h.length>=0;){if(h.length===a[i]){t.push([e].concat(r(h.splice(0,a[i]))));break}if(h.length0?(this.length+=e.getTotalLength(),this.functions.push(e),s=[h[y][5]+s[0],h[y][6]+s[1]]):this.functions.push(new l(s[0],s[0],s[1],s[1]));else if("S"===h[y][0]){if(y>0&&["C","c","S","s"].indexOf(h[y-1][0])>-1){if(e){var p=e.getC();e=new j(s[0],s[1],2*s[0]-p.x,2*s[1]-p.y,h[y][1],h[y][2],h[y][3],h[y][4])}}else e=new j(s[0],s[1],s[0],s[1],h[y][1],h[y][2],h[y][3],h[y][4]);e&&(this.length+=e.getTotalLength(),s=[h[y][3],h[y][4]],this.functions.push(e))}else if("s"===h[y][0]){if(y>0&&["C","c","S","s"].indexOf(h[y-1][0])>-1){if(e){var x=e.getC(),v=e.getD();e=new j(s[0],s[1],s[0]+v.x-x.x,s[1]+v.y-x.y,s[0]+h[y][1],s[1]+h[y][2],s[0]+h[y][3],s[1]+h[y][4])}}else e=new j(s[0],s[1],s[0],s[1],s[0]+h[y][1],s[1]+h[y][2],s[0]+h[y][3],s[1]+h[y][4]);e&&(this.length+=e.getTotalLength(),s=[h[y][3]+s[0],h[y][4]+s[1]],this.functions.push(e))}else if("Q"===h[y][0]){if(s[0]==h[y][1]&&s[1]==h[y][2]){var M=new l(h[y][1],h[y][3],h[y][2],h[y][4]);this.length+=M.getTotalLength(),this.functions.push(M)}else e=new j(s[0],s[1],h[y][1],h[y][2],h[y][3],h[y][4],void 0,void 0),this.length+=e.getTotalLength(),this.functions.push(e);s=[h[y][3],h[y][4]],g=[h[y][1],h[y][2]]}else if("q"===h[y][0]){if(0!=h[y][1]||0!=h[y][2])e=new j(s[0],s[1],s[0]+h[y][1],s[1]+h[y][2],s[0]+h[y][3],s[1]+h[y][4],void 0,void 0),this.length+=e.getTotalLength(),this.functions.push(e);else{var w=new l(s[0]+h[y][1],s[0]+h[y][3],s[1]+h[y][2],s[1]+h[y][4]);this.length+=w.getTotalLength(),this.functions.push(w)}g=[s[0]+h[y][1],s[1]+h[y][2]],s=[h[y][3]+s[0],h[y][4]+s[1]]}else if("T"===h[y][0]){if(y>0&&["Q","q","T","t"].indexOf(h[y-1][0])>-1)e=new j(s[0],s[1],2*s[0]-g[0],2*s[1]-g[1],h[y][1],h[y][2],void 0,void 0),this.functions.push(e),this.length+=e.getTotalLength();else{var L=new l(s[0],h[y][1],s[1],h[y][2]);this.functions.push(L),this.length+=L.getTotalLength()}g=[2*s[0]-g[0],2*s[1]-g[1]],s=[h[y][1],h[y][2]]}else if("t"===h[y][0]){if(y>0&&["Q","q","T","t"].indexOf(h[y-1][0])>-1)e=new j(s[0],s[1],2*s[0]-g[0],2*s[1]-g[1],s[0]+h[y][1],s[1]+h[y][2],void 0,void 0),this.length+=e.getTotalLength(),this.functions.push(e);else{var d=new l(s[0],s[0]+h[y][1],s[1],s[1]+h[y][2]);this.length+=d.getTotalLength(),this.functions.push(d)}g=[2*s[0]-g[0],2*s[1]-g[1]],s=[h[y][1]+s[0],h[y][2]+s[1]]}else if("A"===h[y][0]){var A=new c(s[0],s[1],h[y][1],h[y][2],h[y][3],1===h[y][4],1===h[y][5],h[y][6],h[y][7]);this.length+=A.getTotalLength(),s=[h[y][6],h[y][7]],this.functions.push(A)}else if("a"===h[y][0]){var b=new c(s[0],s[1],h[y][1],h[y][2],h[y][3],1===h[y][4],1===h[y][5],s[0]+h[y][6],s[1]+h[y][7]);this.length+=b.getTotalLength(),s=[s[0]+h[y][6],s[1]+h[y][7]],this.functions.push(b)}this.partial_lengths.push(this.length)}})),E=e((function(t){var n=this;if(i(this,"inst",void 0),i(this,"getTotalLength",(function(){return n.inst.getTotalLength()})),i(this,"getPointAtLength",(function(t){return n.inst.getPointAtLength(t)})),i(this,"getTangentAtLength",(function(t){return n.inst.getTangentAtLength(t)})),i(this,"getPropertiesAtLength",(function(t){return n.inst.getPropertiesAtLength(t)})),i(this,"getParts",(function(){return n.inst.getParts()})),this.inst=new O(t),!(this instanceof E))return new E(t)}));t.svgPathProperties=E}));
3 |
--------------------------------------------------------------------------------
/yvann_web_async/marked.min.js:
--------------------------------------------------------------------------------
1 | /**
2 | * marked v12.0.1 - a markdown parser
3 | * Copyright (c) 2011-2024, Christopher Jeffrey. (MIT Licensed)
4 | * https://github.com/markedjs/marked
5 | */
6 | !function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports):"function"==typeof define&&define.amd?define(["exports"],t):t((e="undefined"!=typeof globalThis?globalThis:e||self).marked={})}(this,(function(e){"use strict";function t(){return{async:!1,breaks:!1,extensions:null,gfm:!0,hooks:null,pedantic:!1,renderer:null,silent:!1,tokenizer:null,walkTokens:null}}function n(t){e.defaults=t}e.defaults={async:!1,breaks:!1,extensions:null,gfm:!0,hooks:null,pedantic:!1,renderer:null,silent:!1,tokenizer:null,walkTokens:null};const s=/[&<>"']/,r=new RegExp(s.source,"g"),i=/[<>"']|&(?!(#\d{1,7}|#[Xx][a-fA-F0-9]{1,6}|\w+);)/,l=new RegExp(i.source,"g"),o={"&":"&","<":"<",">":">",'"':""","'":"'"},a=e=>o[e];function c(e,t){if(t){if(s.test(e))return e.replace(r,a)}else if(i.test(e))return e.replace(l,a);return e}const h=/&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/gi;function p(e){return e.replace(h,((e,t)=>"colon"===(t=t.toLowerCase())?":":"#"===t.charAt(0)?"x"===t.charAt(1)?String.fromCharCode(parseInt(t.substring(2),16)):String.fromCharCode(+t.substring(1)):""))}const u=/(^|[^\[])\^/g;function k(e,t){let n="string"==typeof e?e:e.source;t=t||"";const s={replace:(e,t)=>{let r="string"==typeof t?t:t.source;return r=r.replace(u,"$1"),n=n.replace(e,r),s},getRegex:()=>new RegExp(n,t)};return s}function g(e){try{e=encodeURI(e).replace(/%25/g,"%")}catch(e){return null}return e}const f={exec:()=>null};function d(e,t){const n=e.replace(/\|/g,((e,t,n)=>{let s=!1,r=t;for(;--r>=0&&"\\"===n[r];)s=!s;return s?"|":" |"})).split(/ \|/);let s=0;if(n[0].trim()||n.shift(),n.length>0&&!n[n.length-1].trim()&&n.pop(),t)if(n.length>t)n.splice(t);else for(;n.length0)return{type:"space",raw:t[0]}}code(e){const t=this.rules.block.code.exec(e);if(t){const e=t[0].replace(/^ {1,4}/gm,"");return{type:"code",raw:t[0],codeBlockStyle:"indented",text:this.options.pedantic?e:x(e,"\n")}}}fences(e){const t=this.rules.block.fences.exec(e);if(t){const e=t[0],n=function(e,t){const n=e.match(/^(\s+)(?:```)/);if(null===n)return t;const s=n[1];return t.split("\n").map((e=>{const t=e.match(/^\s+/);if(null===t)return e;const[n]=t;return n.length>=s.length?e.slice(s.length):e})).join("\n")}(e,t[3]||"");return{type:"code",raw:e,lang:t[2]?t[2].trim().replace(this.rules.inline.anyPunctuation,"$1"):t[2],text:n}}}heading(e){const t=this.rules.block.heading.exec(e);if(t){let e=t[2].trim();if(/#$/.test(e)){const t=x(e,"#");this.options.pedantic?e=t.trim():t&&!/ $/.test(t)||(e=t.trim())}return{type:"heading",raw:t[0],depth:t[1].length,text:e,tokens:this.lexer.inline(e)}}}hr(e){const t=this.rules.block.hr.exec(e);if(t)return{type:"hr",raw:t[0]}}blockquote(e){const t=this.rules.block.blockquote.exec(e);if(t){const e=x(t[0].replace(/^ *>[ \t]?/gm,""),"\n"),n=this.lexer.state.top;this.lexer.state.top=!0;const s=this.lexer.blockTokens(e);return this.lexer.state.top=n,{type:"blockquote",raw:t[0],tokens:s,text:e}}}list(e){let t=this.rules.block.list.exec(e);if(t){let n=t[1].trim();const s=n.length>1,r={type:"list",raw:"",ordered:s,start:s?+n.slice(0,-1):"",loose:!1,items:[]};n=s?`\\d{1,9}\\${n.slice(-1)}`:`\\${n}`,this.options.pedantic&&(n=s?n:"[*+-]");const i=new RegExp(`^( {0,3}${n})((?:[\t ][^\\n]*)?(?:\\n|$))`);let l="",o="",a=!1;for(;e;){let n=!1;if(!(t=i.exec(e)))break;if(this.rules.block.hr.test(e))break;l=t[0],e=e.substring(l.length);let s=t[2].split("\n",1)[0].replace(/^\t+/,(e=>" ".repeat(3*e.length))),c=e.split("\n",1)[0],h=0;this.options.pedantic?(h=2,o=s.trimStart()):(h=t[2].search(/[^ ]/),h=h>4?1:h,o=s.slice(h),h+=t[1].length);let p=!1;if(!s&&/^ *$/.test(c)&&(l+=c+"\n",e=e.substring(c.length+1),n=!0),!n){const t=new RegExp(`^ {0,${Math.min(3,h-1)}}(?:[*+-]|\\d{1,9}[.)])((?:[ \t][^\\n]*)?(?:\\n|$))`),n=new RegExp(`^ {0,${Math.min(3,h-1)}}((?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$)`),r=new RegExp(`^ {0,${Math.min(3,h-1)}}(?:\`\`\`|~~~)`),i=new RegExp(`^ {0,${Math.min(3,h-1)}}#`);for(;e;){const a=e.split("\n",1)[0];if(c=a,this.options.pedantic&&(c=c.replace(/^ {1,4}(?=( {4})*[^ ])/g," ")),r.test(c))break;if(i.test(c))break;if(t.test(c))break;if(n.test(e))break;if(c.search(/[^ ]/)>=h||!c.trim())o+="\n"+c.slice(h);else{if(p)break;if(s.search(/[^ ]/)>=4)break;if(r.test(s))break;if(i.test(s))break;if(n.test(s))break;o+="\n"+c}p||c.trim()||(p=!0),l+=a+"\n",e=e.substring(a.length+1),s=c.slice(h)}}r.loose||(a?r.loose=!0:/\n *\n *$/.test(l)&&(a=!0));let u,k=null;this.options.gfm&&(k=/^\[[ xX]\] /.exec(o),k&&(u="[ ] "!==k[0],o=o.replace(/^\[[ xX]\] +/,""))),r.items.push({type:"list_item",raw:l,task:!!k,checked:u,loose:!1,text:o,tokens:[]}),r.raw+=l}r.items[r.items.length-1].raw=l.trimEnd(),r.items[r.items.length-1].text=o.trimEnd(),r.raw=r.raw.trimEnd();for(let e=0;e"space"===e.type)),n=t.length>0&&t.some((e=>/\n.*\n/.test(e.raw)));r.loose=n}if(r.loose)for(let e=0;e$/,"$1").replace(this.rules.inline.anyPunctuation,"$1"):"",s=t[3]?t[3].substring(1,t[3].length-1).replace(this.rules.inline.anyPunctuation,"$1"):t[3];return{type:"def",tag:e,raw:t[0],href:n,title:s}}}table(e){const t=this.rules.block.table.exec(e);if(!t)return;if(!/[:|]/.test(t[2]))return;const n=d(t[1]),s=t[2].replace(/^\||\| *$/g,"").split("|"),r=t[3]&&t[3].trim()?t[3].replace(/\n[ \t]*$/,"").split("\n"):[],i={type:"table",raw:t[0],header:[],align:[],rows:[]};if(n.length===s.length){for(const e of s)/^ *-+: *$/.test(e)?i.align.push("right"):/^ *:-+: *$/.test(e)?i.align.push("center"):/^ *:-+ *$/.test(e)?i.align.push("left"):i.align.push(null);for(const e of n)i.header.push({text:e,tokens:this.lexer.inline(e)});for(const e of r)i.rows.push(d(e,i.header.length).map((e=>({text:e,tokens:this.lexer.inline(e)}))));return i}}lheading(e){const t=this.rules.block.lheading.exec(e);if(t)return{type:"heading",raw:t[0],depth:"="===t[2].charAt(0)?1:2,text:t[1],tokens:this.lexer.inline(t[1])}}paragraph(e){const t=this.rules.block.paragraph.exec(e);if(t){const e="\n"===t[1].charAt(t[1].length-1)?t[1].slice(0,-1):t[1];return{type:"paragraph",raw:t[0],text:e,tokens:this.lexer.inline(e)}}}text(e){const t=this.rules.block.text.exec(e);if(t)return{type:"text",raw:t[0],text:t[0],tokens:this.lexer.inline(t[0])}}escape(e){const t=this.rules.inline.escape.exec(e);if(t)return{type:"escape",raw:t[0],text:c(t[1])}}tag(e){const t=this.rules.inline.tag.exec(e);if(t)return!this.lexer.state.inLink&&/^/i.test(t[0])&&(this.lexer.state.inLink=!1),!this.lexer.state.inRawBlock&&/^<(pre|code|kbd|script)(\s|>)/i.test(t[0])?this.lexer.state.inRawBlock=!0:this.lexer.state.inRawBlock&&/^<\/(pre|code|kbd|script)(\s|>)/i.test(t[0])&&(this.lexer.state.inRawBlock=!1),{type:"html",raw:t[0],inLink:this.lexer.state.inLink,inRawBlock:this.lexer.state.inRawBlock,block:!1,text:t[0]}}link(e){const t=this.rules.inline.link.exec(e);if(t){const e=t[2].trim();if(!this.options.pedantic&&/^$/.test(e))return;const t=x(e.slice(0,-1),"\\");if((e.length-t.length)%2==0)return}else{const e=function(e,t){if(-1===e.indexOf(t[1]))return-1;let n=0;for(let s=0;s-1){const n=(0===t[0].indexOf("!")?5:4)+t[1].length+e;t[2]=t[2].substring(0,e),t[0]=t[0].substring(0,n).trim(),t[3]=""}}let n=t[2],s="";if(this.options.pedantic){const e=/^([^'"]*[^\s])\s+(['"])(.*)\2/.exec(n);e&&(n=e[1],s=e[3])}else s=t[3]?t[3].slice(1,-1):"";return n=n.trim(),/^$/.test(e)?n.slice(1):n.slice(1,-1)),b(t,{href:n?n.replace(this.rules.inline.anyPunctuation,"$1"):n,title:s?s.replace(this.rules.inline.anyPunctuation,"$1"):s},t[0],this.lexer)}}reflink(e,t){let n;if((n=this.rules.inline.reflink.exec(e))||(n=this.rules.inline.nolink.exec(e))){const e=t[(n[2]||n[1]).replace(/\s+/g," ").toLowerCase()];if(!e){const e=n[0].charAt(0);return{type:"text",raw:e,text:e}}return b(n,e,n[0],this.lexer)}}emStrong(e,t,n=""){let s=this.rules.inline.emStrongLDelim.exec(e);if(!s)return;if(s[3]&&n.match(/[\p{L}\p{N}]/u))return;if(!(s[1]||s[2]||"")||!n||this.rules.inline.punctuation.exec(n)){const n=[...s[0]].length-1;let r,i,l=n,o=0;const a="*"===s[0][0]?this.rules.inline.emStrongRDelimAst:this.rules.inline.emStrongRDelimUnd;for(a.lastIndex=0,t=t.slice(-1*e.length+n);null!=(s=a.exec(t));){if(r=s[1]||s[2]||s[3]||s[4]||s[5]||s[6],!r)continue;if(i=[...r].length,s[3]||s[4]){l+=i;continue}if((s[5]||s[6])&&n%3&&!((n+i)%3)){o+=i;continue}if(l-=i,l>0)continue;i=Math.min(i,i+l+o);const t=[...s[0]][0].length,a=e.slice(0,n+s.index+t+i);if(Math.min(n,i)%2){const e=a.slice(1,-1);return{type:"em",raw:a,text:e,tokens:this.lexer.inlineTokens(e)}}const c=a.slice(2,-2);return{type:"strong",raw:a,text:c,tokens:this.lexer.inlineTokens(c)}}}}codespan(e){const t=this.rules.inline.code.exec(e);if(t){let e=t[2].replace(/\n/g," ");const n=/[^ ]/.test(e),s=/^ /.test(e)&&/ $/.test(e);return n&&s&&(e=e.substring(1,e.length-1)),e=c(e,!0),{type:"codespan",raw:t[0],text:e}}}br(e){const t=this.rules.inline.br.exec(e);if(t)return{type:"br",raw:t[0]}}del(e){const t=this.rules.inline.del.exec(e);if(t)return{type:"del",raw:t[0],text:t[2],tokens:this.lexer.inlineTokens(t[2])}}autolink(e){const t=this.rules.inline.autolink.exec(e);if(t){let e,n;return"@"===t[2]?(e=c(t[1]),n="mailto:"+e):(e=c(t[1]),n=e),{type:"link",raw:t[0],text:e,href:n,tokens:[{type:"text",raw:e,text:e}]}}}url(e){let t;if(t=this.rules.inline.url.exec(e)){let e,n;if("@"===t[2])e=c(t[0]),n="mailto:"+e;else{let s;do{s=t[0],t[0]=this.rules.inline._backpedal.exec(t[0])?.[0]??""}while(s!==t[0]);e=c(t[0]),n="www."===t[1]?"http://"+t[0]:t[0]}return{type:"link",raw:t[0],text:e,href:n,tokens:[{type:"text",raw:e,text:e}]}}}inlineText(e){const t=this.rules.inline.text.exec(e);if(t){let e;return e=this.lexer.state.inRawBlock?t[0]:c(t[0]),{type:"text",raw:t[0],text:e}}}}const m=/^ {0,3}((?:-[\t ]*){3,}|(?:_[ \t]*){3,}|(?:\*[ \t]*){3,})(?:\n+|$)/,y=/(?:[*+-]|\d{1,9}[.)])/,$=k(/^(?!bull |blockCode|fences|blockquote|heading|html)((?:.|\n(?!\s*?\n|bull |blockCode|fences|blockquote|heading|html))+?)\n {0,3}(=+|-+) *(?:\n+|$)/).replace(/bull/g,y).replace(/blockCode/g,/ {4}/).replace(/fences/g,/ {0,3}(?:`{3,}|~{3,})/).replace(/blockquote/g,/ {0,3}>/).replace(/heading/g,/ {0,3}#{1,6}/).replace(/html/g,/ {0,3}<[^\n>]+>\n/).getRegex(),z=/^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html|table| +\n)[^\n]+)*)/,T=/(?!\s*\])(?:\\.|[^\[\]\\])+/,R=k(/^ {0,3}\[(label)\]: *(?:\n *)?([^<\s][^\s]*|<.*?>)(?:(?: +(?:\n *)?| *\n *)(title))? *(?:\n+|$)/).replace("label",T).replace("title",/(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/).getRegex(),_=k(/^( {0,3}bull)([ \t][^\n]+?)?(?:\n|$)/).replace(/bull/g,y).getRegex(),A="address|article|aside|base|basefont|blockquote|body|caption|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option|p|param|search|section|summary|table|tbody|td|tfoot|th|thead|title|tr|track|ul",S=/|$))/,I=k("^ {0,3}(?:<(script|pre|style|textarea)[\\s>][\\s\\S]*?(?:\\1>[^\\n]*\\n+|$)|comment[^\\n]*(\\n+|$)|<\\?[\\s\\S]*?(?:\\?>\\n*|$)|\\n*|$)|\\n*|$)|?(tag)(?: +|\\n|/?>)[\\s\\S]*?(?:(?:\\n *)+\\n|$)|<(?!script|pre|style|textarea)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)|(?!script|pre|style|textarea)[a-z][\\w-]*\\s*>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$))","i").replace("comment",S).replace("tag",A).replace("attribute",/ +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/).getRegex(),E=k(z).replace("hr",m).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("|lheading","").replace("|table","").replace("blockquote"," {0,3}>").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html","?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)").replace("tag",A).getRegex(),q={blockquote:k(/^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/).replace("paragraph",E).getRegex(),code:/^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/,def:R,fences:/^ {0,3}(`{3,}(?=[^`\n]*(?:\n|$))|~{3,})([^\n]*)(?:\n|$)(?:|([\s\S]*?)(?:\n|$))(?: {0,3}\1[~`]* *(?=\n|$)|$)/,heading:/^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)/,hr:m,html:I,lheading:$,list:_,newline:/^(?: *(?:\n|$))+/,paragraph:E,table:f,text:/^[^\n]+/},Z=k("^ *([^\\n ].*)\\n {0,3}((?:\\| *)?:?-+:? *(?:\\| *:?-+:? *)*(?:\\| *)?)(?:\\n((?:(?! *\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)").replace("hr",m).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("blockquote"," {0,3}>").replace("code"," {4}[^\\n]").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html","?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)").replace("tag",A).getRegex(),L={...q,table:Z,paragraph:k(z).replace("hr",m).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("|lheading","").replace("table",Z).replace("blockquote"," {0,3}>").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html","?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)").replace("tag",A).getRegex()},P={...q,html:k("^ *(?:comment *(?:\\n|\\s*$)|<(tag)[\\s\\S]+?\\1> *(?:\\n{2,}|\\s*$)| \\s]*)*?/?> *(?:\\n{2,}|\\s*$))").replace("comment",S).replace(/tag/g,"(?!(?:a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)\\b)\\w+(?!:|[^\\w\\s@]*@)\\b").getRegex(),def:/^ *\[([^\]]+)\]: *([^\s>]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/,heading:/^(#{1,6})(.*)(?:\n+|$)/,fences:f,lheading:/^(.+?)\n {0,3}(=+|-+) *(?:\n+|$)/,paragraph:k(z).replace("hr",m).replace("heading"," *#{1,6} *[^\n]").replace("lheading",$).replace("|table","").replace("blockquote"," {0,3}>").replace("|fences","").replace("|list","").replace("|html","").replace("|tag","").getRegex()},Q=/^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/,v=/^( {2,}|\\)\n(?!\s*$)/,B="\\p{P}\\p{S}",C=k(/^((?![*_])[\spunctuation])/,"u").replace(/punctuation/g,B).getRegex(),M=k(/^(?:\*+(?:((?!\*)[punct])|[^\s*]))|^_+(?:((?!_)[punct])|([^\s_]))/,"u").replace(/punct/g,B).getRegex(),O=k("^[^_*]*?__[^_*]*?\\*[^_*]*?(?=__)|[^*]+(?=[^*])|(?!\\*)[punct](\\*+)(?=[\\s]|$)|[^punct\\s](\\*+)(?!\\*)(?=[punct\\s]|$)|(?!\\*)[punct\\s](\\*+)(?=[^punct\\s])|[\\s](\\*+)(?!\\*)(?=[punct])|(?!\\*)[punct](\\*+)(?!\\*)(?=[punct])|[^punct\\s](\\*+)(?=[^punct\\s])","gu").replace(/punct/g,B).getRegex(),D=k("^[^_*]*?\\*\\*[^_*]*?_[^_*]*?(?=\\*\\*)|[^_]+(?=[^_])|(?!_)[punct](_+)(?=[\\s]|$)|[^punct\\s](_+)(?!_)(?=[punct\\s]|$)|(?!_)[punct\\s](_+)(?=[^punct\\s])|[\\s](_+)(?!_)(?=[punct])|(?!_)[punct](_+)(?!_)(?=[punct])","gu").replace(/punct/g,B).getRegex(),j=k(/\\([punct])/,"gu").replace(/punct/g,B).getRegex(),H=k(/^<(scheme:[^\s\x00-\x1f<>]*|email)>/).replace("scheme",/[a-zA-Z][a-zA-Z0-9+.-]{1,31}/).replace("email",/[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/).getRegex(),U=k(S).replace("(?:--\x3e|$)","--\x3e").getRegex(),X=k("^comment|^[a-zA-Z][\\w:-]*\\s*>|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>|^<\\?[\\s\\S]*?\\?>|^|^").replace("comment",U).replace("attribute",/\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/).getRegex(),F=/(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/,N=k(/^!?\[(label)\]\(\s*(href)(?:\s+(title))?\s*\)/).replace("label",F).replace("href",/<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/).replace("title",/"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/).getRegex(),G=k(/^!?\[(label)\]\[(ref)\]/).replace("label",F).replace("ref",T).getRegex(),J=k(/^!?\[(ref)\](?:\[\])?/).replace("ref",T).getRegex(),K={_backpedal:f,anyPunctuation:j,autolink:H,blockSkip:/\[[^[\]]*?\]\([^\(\)]*?\)|`[^`]*?`|<[^<>]*?>/g,br:v,code:/^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/,del:f,emStrongLDelim:M,emStrongRDelimAst:O,emStrongRDelimUnd:D,escape:Q,link:N,nolink:J,punctuation:C,reflink:G,reflinkSearch:k("reflink|nolink(?!\\()","g").replace("reflink",G).replace("nolink",J).getRegex(),tag:X,text:/^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\t+" ".repeat(n.length)));e;)if(!(this.options.extensions&&this.options.extensions.block&&this.options.extensions.block.some((s=>!!(n=s.call({lexer:this},e,t))&&(e=e.substring(n.raw.length),t.push(n),!0)))))if(n=this.tokenizer.space(e))e=e.substring(n.raw.length),1===n.raw.length&&t.length>0?t[t.length-1].raw+="\n":t.push(n);else if(n=this.tokenizer.code(e))e=e.substring(n.raw.length),s=t[t.length-1],!s||"paragraph"!==s.type&&"text"!==s.type?t.push(n):(s.raw+="\n"+n.raw,s.text+="\n"+n.text,this.inlineQueue[this.inlineQueue.length-1].src=s.text);else if(n=this.tokenizer.fences(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.heading(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.hr(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.blockquote(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.list(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.html(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.def(e))e=e.substring(n.raw.length),s=t[t.length-1],!s||"paragraph"!==s.type&&"text"!==s.type?this.tokens.links[n.tag]||(this.tokens.links[n.tag]={href:n.href,title:n.title}):(s.raw+="\n"+n.raw,s.text+="\n"+n.raw,this.inlineQueue[this.inlineQueue.length-1].src=s.text);else if(n=this.tokenizer.table(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.lheading(e))e=e.substring(n.raw.length),t.push(n);else{if(r=e,this.options.extensions&&this.options.extensions.startBlock){let t=1/0;const n=e.slice(1);let s;this.options.extensions.startBlock.forEach((e=>{s=e.call({lexer:this},n),"number"==typeof s&&s>=0&&(t=Math.min(t,s))})),t<1/0&&t>=0&&(r=e.substring(0,t+1))}if(this.state.top&&(n=this.tokenizer.paragraph(r)))s=t[t.length-1],i&&"paragraph"===s.type?(s.raw+="\n"+n.raw,s.text+="\n"+n.text,this.inlineQueue.pop(),this.inlineQueue[this.inlineQueue.length-1].src=s.text):t.push(n),i=r.length!==e.length,e=e.substring(n.raw.length);else if(n=this.tokenizer.text(e))e=e.substring(n.raw.length),s=t[t.length-1],s&&"text"===s.type?(s.raw+="\n"+n.raw,s.text+="\n"+n.text,this.inlineQueue.pop(),this.inlineQueue[this.inlineQueue.length-1].src=s.text):t.push(n);else if(e){const t="Infinite loop on byte: "+e.charCodeAt(0);if(this.options.silent){console.error(t);break}throw new Error(t)}}return this.state.top=!0,t}inline(e,t=[]){return this.inlineQueue.push({src:e,tokens:t}),t}inlineTokens(e,t=[]){let n,s,r,i,l,o,a=e;if(this.tokens.links){const e=Object.keys(this.tokens.links);if(e.length>0)for(;null!=(i=this.tokenizer.rules.inline.reflinkSearch.exec(a));)e.includes(i[0].slice(i[0].lastIndexOf("[")+1,-1))&&(a=a.slice(0,i.index)+"["+"a".repeat(i[0].length-2)+"]"+a.slice(this.tokenizer.rules.inline.reflinkSearch.lastIndex))}for(;null!=(i=this.tokenizer.rules.inline.blockSkip.exec(a));)a=a.slice(0,i.index)+"["+"a".repeat(i[0].length-2)+"]"+a.slice(this.tokenizer.rules.inline.blockSkip.lastIndex);for(;null!=(i=this.tokenizer.rules.inline.anyPunctuation.exec(a));)a=a.slice(0,i.index)+"++"+a.slice(this.tokenizer.rules.inline.anyPunctuation.lastIndex);for(;e;)if(l||(o=""),l=!1,!(this.options.extensions&&this.options.extensions.inline&&this.options.extensions.inline.some((s=>!!(n=s.call({lexer:this},e,t))&&(e=e.substring(n.raw.length),t.push(n),!0)))))if(n=this.tokenizer.escape(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.tag(e))e=e.substring(n.raw.length),s=t[t.length-1],s&&"text"===n.type&&"text"===s.type?(s.raw+=n.raw,s.text+=n.text):t.push(n);else if(n=this.tokenizer.link(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.reflink(e,this.tokens.links))e=e.substring(n.raw.length),s=t[t.length-1],s&&"text"===n.type&&"text"===s.type?(s.raw+=n.raw,s.text+=n.text):t.push(n);else if(n=this.tokenizer.emStrong(e,a,o))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.codespan(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.br(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.del(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.autolink(e))e=e.substring(n.raw.length),t.push(n);else if(this.state.inLink||!(n=this.tokenizer.url(e))){if(r=e,this.options.extensions&&this.options.extensions.startInline){let t=1/0;const n=e.slice(1);let s;this.options.extensions.startInline.forEach((e=>{s=e.call({lexer:this},n),"number"==typeof s&&s>=0&&(t=Math.min(t,s))})),t<1/0&&t>=0&&(r=e.substring(0,t+1))}if(n=this.tokenizer.inlineText(r))e=e.substring(n.raw.length),"_"!==n.raw.slice(-1)&&(o=n.raw.slice(-1)),l=!0,s=t[t.length-1],s&&"text"===s.type?(s.raw+=n.raw,s.text+=n.text):t.push(n);else if(e){const t="Infinite loop on byte: "+e.charCodeAt(0);if(this.options.silent){console.error(t);break}throw new Error(t)}}else e=e.substring(n.raw.length),t.push(n);return t}}class se{options;constructor(t){this.options=t||e.defaults}code(e,t,n){const s=(t||"").match(/^\S*/)?.[0];return e=e.replace(/\n$/,"")+"\n",s?''+(n?e:c(e,!0))+" \n":""+(n?e:c(e,!0))+" \n"}blockquote(e){return`\n${e} \n`}html(e,t){return e}heading(e,t,n){return`${e} \n`}hr(){return" \n"}list(e,t,n){const s=t?"ol":"ul";return"<"+s+(t&&1!==n?' start="'+n+'"':"")+">\n"+e+""+s+">\n"}listitem(e,t,n){return`${e} \n`}checkbox(e){return" '}paragraph(e){return`${e}
\n`}table(e,t){return t&&(t=`${t} `),"\n"}tablerow(e){return`\n${e} \n`}tablecell(e,t){const n=t.header?"th":"td";return(t.align?`<${n} align="${t.align}">`:`<${n}>`)+e+`${n}>\n`}strong(e){return`${e} `}em(e){return`${e} `}codespan(e){return`${e}`}br(){return" "}del(e){return`${e}`}link(e,t,n){const s=g(e);if(null===s)return n;let r='"+n+" ",r}image(e,t,n){const s=g(e);if(null===s)return n;let r=` ",r}text(e){return e}}class re{strong(e){return e}em(e){return e}codespan(e){return e}del(e){return e}html(e){return e}text(e){return e}link(e,t,n){return""+n}image(e,t,n){return""+n}br(){return""}}class ie{options;renderer;textRenderer;constructor(t){this.options=t||e.defaults,this.options.renderer=this.options.renderer||new se,this.renderer=this.options.renderer,this.renderer.options=this.options,this.textRenderer=new re}static parse(e,t){return new ie(t).parse(e)}static parseInline(e,t){return new ie(t).parseInline(e)}parse(e,t=!0){let n="";for(let s=0;s0&&"paragraph"===n.tokens[0].type?(n.tokens[0].text=e+" "+n.tokens[0].text,n.tokens[0].tokens&&n.tokens[0].tokens.length>0&&"text"===n.tokens[0].tokens[0].type&&(n.tokens[0].tokens[0].text=e+" "+n.tokens[0].tokens[0].text)):n.tokens.unshift({type:"text",text:e+" "}):o+=e+" "}o+=this.parse(n.tokens,i),l+=this.renderer.listitem(o,r,!!s)}n+=this.renderer.list(l,t,s);continue}case"html":{const e=r;n+=this.renderer.html(e.text,e.block);continue}case"paragraph":{const e=r;n+=this.renderer.paragraph(this.parseInline(e.tokens));continue}case"text":{let i=r,l=i.tokens?this.parseInline(i.tokens):i.text;for(;s+1{const r=e[s].flat(1/0);n=n.concat(this.walkTokens(r,t))})):e.tokens&&(n=n.concat(this.walkTokens(e.tokens,t)))}}return n}use(...e){const t=this.defaults.extensions||{renderers:{},childTokens:{}};return e.forEach((e=>{const n={...e};if(n.async=this.defaults.async||n.async||!1,e.extensions&&(e.extensions.forEach((e=>{if(!e.name)throw new Error("extension name required");if("renderer"in e){const n=t.renderers[e.name];t.renderers[e.name]=n?function(...t){let s=e.renderer.apply(this,t);return!1===s&&(s=n.apply(this,t)),s}:e.renderer}if("tokenizer"in e){if(!e.level||"block"!==e.level&&"inline"!==e.level)throw new Error("extension level must be 'block' or 'inline'");const n=t[e.level];n?n.unshift(e.tokenizer):t[e.level]=[e.tokenizer],e.start&&("block"===e.level?t.startBlock?t.startBlock.push(e.start):t.startBlock=[e.start]:"inline"===e.level&&(t.startInline?t.startInline.push(e.start):t.startInline=[e.start]))}"childTokens"in e&&e.childTokens&&(t.childTokens[e.name]=e.childTokens)})),n.extensions=t),e.renderer){const t=this.defaults.renderer||new se(this.defaults);for(const n in e.renderer){if(!(n in t))throw new Error(`renderer '${n}' does not exist`);if("options"===n)continue;const s=n,r=e.renderer[s],i=t[s];t[s]=(...e)=>{let n=r.apply(t,e);return!1===n&&(n=i.apply(t,e)),n||""}}n.renderer=t}if(e.tokenizer){const t=this.defaults.tokenizer||new w(this.defaults);for(const n in e.tokenizer){if(!(n in t))throw new Error(`tokenizer '${n}' does not exist`);if(["options","rules","lexer"].includes(n))continue;const s=n,r=e.tokenizer[s],i=t[s];t[s]=(...e)=>{let n=r.apply(t,e);return!1===n&&(n=i.apply(t,e)),n}}n.tokenizer=t}if(e.hooks){const t=this.defaults.hooks||new le;for(const n in e.hooks){if(!(n in t))throw new Error(`hook '${n}' does not exist`);if("options"===n)continue;const s=n,r=e.hooks[s],i=t[s];le.passThroughHooks.has(n)?t[s]=e=>{if(this.defaults.async)return Promise.resolve(r.call(t,e)).then((e=>i.call(t,e)));const n=r.call(t,e);return i.call(t,n)}:t[s]=(...e)=>{let n=r.apply(t,e);return!1===n&&(n=i.apply(t,e)),n}}n.hooks=t}if(e.walkTokens){const t=this.defaults.walkTokens,s=e.walkTokens;n.walkTokens=function(e){let n=[];return n.push(s.call(this,e)),t&&(n=n.concat(t.call(this,e))),n}}this.defaults={...this.defaults,...n}})),this}setOptions(e){return this.defaults={...this.defaults,...e},this}lexer(e,t){return ne.lex(e,t??this.defaults)}parser(e,t){return ie.parse(e,t??this.defaults)}#e(e,t){return(n,s)=>{const r={...s},i={...this.defaults,...r};!0===this.defaults.async&&!1===r.async&&(i.silent||console.warn("marked(): The async option was set to true by an extension. The async: false option sent to parse will be ignored."),i.async=!0);const l=this.#t(!!i.silent,!!i.async);if(null==n)return l(new Error("marked(): input parameter is undefined or null"));if("string"!=typeof n)return l(new Error("marked(): input parameter is of type "+Object.prototype.toString.call(n)+", string expected"));if(i.hooks&&(i.hooks.options=i),i.async)return Promise.resolve(i.hooks?i.hooks.preprocess(n):n).then((t=>e(t,i))).then((e=>i.hooks?i.hooks.processAllTokens(e):e)).then((e=>i.walkTokens?Promise.all(this.walkTokens(e,i.walkTokens)).then((()=>e)):e)).then((e=>t(e,i))).then((e=>i.hooks?i.hooks.postprocess(e):e)).catch(l);try{i.hooks&&(n=i.hooks.preprocess(n));let s=e(n,i);i.hooks&&(s=i.hooks.processAllTokens(s)),i.walkTokens&&this.walkTokens(s,i.walkTokens);let r=t(s,i);return i.hooks&&(r=i.hooks.postprocess(r)),r}catch(e){return l(e)}}}#t(e,t){return n=>{if(n.message+="\nPlease report this to https://github.com/markedjs/marked.",e){const e="An error occurred:
"+c(n.message+"",!0)+" ";return t?Promise.resolve(e):e}if(t)return Promise.reject(n);throw n}}}const ae=new oe;function ce(e,t){return ae.parse(e,t)}ce.options=ce.setOptions=function(e){return ae.setOptions(e),ce.defaults=ae.defaults,n(ce.defaults),ce},ce.getDefaults=t,ce.defaults=e.defaults,ce.use=function(...e){return ae.use(...e),ce.defaults=ae.defaults,n(ce.defaults),ce},ce.walkTokens=function(e,t){return ae.walkTokens(e,t)},ce.parseInline=ae.parseInline,ce.Parser=ie,ce.parser=ie.parse,ce.Renderer=se,ce.TextRenderer=re,ce.Lexer=ne,ce.lexer=ne.lex,ce.Tokenizer=w,ce.Hooks=le,ce.parse=ce;const he=ce.options,pe=ce.setOptions,ue=ce.use,ke=ce.walkTokens,ge=ce.parseInline,fe=ce,de=ie.parse,xe=ne.lex;e.Hooks=le,e.Lexer=ne,e.Marked=oe,e.Parser=ie,e.Renderer=se,e.TextRenderer=re,e.Tokenizer=w,e.getDefaults=t,e.lexer=xe,e.marked=ce,e.options=he,e.parse=fe,e.parseInline=ge,e.parser=de,e.setOptions=pe,e.use=ue,e.walkTokens=ke}));
7 |
--------------------------------------------------------------------------------
/LICENCE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 |
635 | Copyright (C)
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | Copyright (C)
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
--------------------------------------------------------------------------------