├── __init__.py
├── GreatConditioningModifier.jpg
├── workflow
├── 🍄Great_Conditioning_node.png
└── 🍄Great Conditioning Modifier.json
├── web
├── NDDG_Great_Nodes.js
├── docs
│ └── GreatConditioningModifier
│ │ ├── fr.md
│ │ └── en.md
├── great_random_gradient_colorpicker.js
├── interactive_organic_gradient.js
├── Great_thick_border.js
└── great_conditioning_modifier_slider.js
├── README.md
└── NDDG_Great_Nodes.py
/__init__.py:
--------------------------------------------------------------------------------
1 | from .NDDG_Great_Nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS
2 | WEB_DIRECTORY = "./web"
3 |
--------------------------------------------------------------------------------
/GreatConditioningModifier.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeoDroleDeGueule/NDDG_Great_Nodes/HEAD/GreatConditioningModifier.jpg
--------------------------------------------------------------------------------
/workflow/🍄Great_Conditioning_node.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeoDroleDeGueule/NDDG_Great_Nodes/HEAD/workflow/🍄Great_Conditioning_node.png
--------------------------------------------------------------------------------
/web/NDDG_Great_Nodes.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 |
3 | // Configuration des couleurs pour vos nodes
4 | const NODE_COLORS = {
5 | "KSamplerQwenRandomNoise": {
6 | nodeColor: "#353535",
7 | nodeBgColor: "#080808",
8 | titleTextColor: "#ffffff"
9 | },
10 | "QwenToSDLatent": {
11 | nodeColor: "#353535",
12 | nodeBgColor: "#080808",
13 | titleTextColor: "#ffffff"
14 | },
15 | "GreatConditioningModifier": {
16 | nodeColor: "#353535",
17 | nodeBgColor: "#080808",
18 | titleTextColor: "#ffffff"
19 | },
20 | "ImageBlendNode": {
21 | nodeColor: "#353535",
22 | nodeBgColor: "#080808",
23 | titleTextColor: "#ffffff"
24 | },
25 | "InteractiveOrganicGradientNode": {
26 | nodeColor: "#353535",
27 | nodeBgColor: "#080808",
28 | titleTextColor: "#ffffff"
29 | }
30 | };
31 |
32 | app.registerExtension({
33 | name: "great.custom.node.colors",
34 |
35 | async nodeCreated(node) {
36 | const nodeName = node.constructor.name || node.type;
37 | const colors = NODE_COLORS[nodeName];
38 |
39 | if (!colors) return;
40 |
41 | console.log(`[GreatNodes] Applying colors to: ${nodeName}`);
42 |
43 | // Méthode moderne pour ComfyUI récent
44 | node.color = colors.nodeColor;
45 | node.bgcolor = colors.nodeBgColor;
46 |
47 | // Alternative si les propriétés ci-dessus ne marchent pas
48 | if (node.properties) {
49 | node.properties.color = colors.nodeColor;
50 | node.properties.bgcolor = colors.nodeBgColor;
51 | }
52 |
53 | // Force le style via le système de widgets
54 | const originalOnDrawForeground = node.onDrawForeground;
55 | node.onDrawForeground = function(ctx) {
56 | // Appliquer les couleurs au contexte canvas
57 | const oldFillStyle = ctx.fillStyle;
58 | const oldStrokeStyle = ctx.strokeStyle;
59 |
60 | // Appeler le draw original
61 | if (originalOnDrawForeground) {
62 | originalOnDrawForeground.call(this, ctx);
63 | }
64 |
65 | // Restaurer
66 | ctx.fillStyle = oldFillStyle;
67 | ctx.strokeStyle = oldStrokeStyle;
68 | };
69 |
70 | // Forcer le redraw
71 | node.setDirtyCanvas?.(true, true);
72 | }
73 | });
74 |
75 |
76 |
--------------------------------------------------------------------------------
/web/docs/GreatConditioningModifier/fr.md:
--------------------------------------------------------------------------------
1 | # Great Conditiong Modifier
2 |
3 | 💡 Conseils généraux d'utilisation
4 |
5 | • Débutants : commencez avec guided_noise (0,2--0,4) et
6 | temperature_scale (0,5--0,7)
7 | • Variations subtiles : perlin_noise (0,1--0,3), semantic_drift (0,2)
8 | • Exploration créative : style_shift (0,5--0,8), spherical_rotation
9 | (0,6--1,0)
10 | • Stabilisation : valeurs négatives sur temperature_scale (--0,3 à
11 | --0,5)
12 | • Effets artistiques : quantize (0,7--1,0), block_shuffle (0,5--0,8)
13 |
14 | N'oubliez pas : changez le seed du nœud pour obtenir différentes
15 | variations avec les mêmes paramètres !
16 |
17 | 📚 Guide des Modificateurs
18 |
19 | 🔹 > degré d'importance pour les valeurs POSITIVES
20 | 🔸 > degré d'importance pour les valeurs NÉGATIVES
21 | ❌ > aucune utilité en positif
22 |
23 | 🔸 semantic_drift 🔹
24 | Dérive sémantique progressive
25 | Ce modificateur mélange progressivement votre prompt original avec une
26 | version plus bruitée, comme si vous ajoutiez un flou artistique à vos
27 | instructions. Avec des valeurs positives, l'image s'éloigne doucement du
28 | prompt initial tout en conservant sa cohérence globale --- imaginez un
29 | concept qui « dérive » vers des interprétations voisines. Avec des
30 | valeurs négatives, l'effet inverse renforce le prompt et réduit la
31 | variabilité. Parfait pour obtenir des variations créatives sans perdre
32 | le sens central.
33 |
34 | 🔸🔸🔸 token_dropout 🔹🔹 *(ne fonctionne pas avec Flux)*
35 | Suppression sélective de tokens
36 | Ce modificateur ignore aléatoirement certaines parties de votre prompt,
37 | comme si vous changiez brièvement de sujet. Avec des valeurs positives,
38 | certaines informations sont omises, produisant des images plus
39 | abstraites ou surprenantes. Avec des valeurs négatives, le modèle se
40 | concentre davantage sur quelques tokens clés.
41 |
42 | 🔸🔸🔸 gradient_amplify 🔹🔹
43 | Amplification des transitions conceptuelles
44 | Il agit comme un contrôle de contraste conceptuel : valeurs positives →
45 | transitions accentuées et rendu dramatique ; valeurs négatives →
46 | transitions adoucies et rendu harmonieux.
47 |
48 | 🔸🔸🔸 guided_noise 🔹🔹🔹
49 | Bruit guidé proportionnel
50 | Ajoute un « bruit créatif » naturel comparable au grain d'une photo.
51 | Valeurs positives (0,2--0,5) → variations naturelles du rendu. Valeurs
52 | négatives → stabilisation et images plus prévisibles. C'est l'un des
53 | modificateurs les plus fiables.
54 |
55 | 🔸 quantize 🔹🔹🔹🔹
56 | Quantification et stabilisation
57 | Réduit la précision des instructions, comme passer d'un large spectre de
58 | couleurs à une palette limitée. Valeurs positives (0,5--1,0) → rendu
59 | stylisé et graphique. Valeurs négatives → ajout de dithering, détails
60 | enrichis et textures organiques.
61 |
62 | 🔸🔸🔸 perlin_noise 🔹🔹🔹🔹
63 | Bruit structuré cohérent
64 | Produit des variations organiques proches de motifs naturels (nuages,
65 | bois, etc.). Positif → transformations progressives et naturelles.
66 | Négatif → fragmentation des motifs.
67 |
68 | 🔸🔸🔸 fourier_filter ❌
69 | Filtrage fréquentiel (non fonctionnel en positif)
70 | Agit comme un filtre passe-bas conceptuel : seules les grandes formes et
71 | idées générales sont conservées.
72 |
73 | 🔸 style_shift 🔹
74 | Changement directionnel de style
75 | Modifie de manière cohérente le style global tout en gardant le sujet.
76 | Utile pour explorer divers rendus stylistiques.
77 |
78 | 🔸 temperature_scale 🔹
79 | Contrôle de créativité
80 | Positif (0,5--1,0) → plus de liberté créative et surprises.
81 | Négatif → interprétation stricte et cohérente.
82 |
83 | 🔸 embedding_mix 🔹 *(ne fonctionne pas avec Flux)*
84 | Mélange et réorganisation interne des concepts.
85 |
86 | 🔸 svd_filter 🔹
87 | Filtrage basé sur la complexité
88 | Positif → enrichit les détails.
89 | Négatif → simplifie l'image.
90 |
91 | 🔸 spherical_rotation 🔹
92 | Rotation conceptuelle (avancé)
93 | Conserve l'intensité du prompt mais change l'interprétation de manière
94 | radicale.
95 |
96 | 🔸 principal_component 🔹
97 | Modification des axes principaux du prompt.
98 |
99 | 🔸 block_shuffle 🔹
100 | Réorganisation en blocs
101 | Crée des compositions inattendues tout en préservant la cohérence
102 | locale.
103 |
--------------------------------------------------------------------------------
/web/great_random_gradient_colorpicker.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 | import { ComfyWidgets } from "../../scripts/widgets.js";
3 |
4 | // Créer un widget color picker personnalisé
5 | function colorWidget(node, inputName, inputData, app) {
6 | const widget = {
7 | type: "JOVI_COLOR",
8 | name: inputName,
9 | value: inputData[1]?.default || widget.value || "#c7c7c7",
10 | options: {},
11 |
12 | draw: function(ctx, node, width, y) {
13 | const margin = 15;
14 | const labelWidth = ctx.measureText(inputName).width + 5;
15 | const colorX = margin + labelWidth;
16 | const colorWidth = width - colorX - margin;
17 |
18 | if (colorWidth < 10) {
19 | // Fallback : petite pastille à droite
20 | const colorBoxSize = 20;
21 | const x = width - colorBoxSize - margin;
22 | ctx.fillStyle = "#AAA";
23 | ctx.font = "12px Arial";
24 | ctx.fillText(inputName, margin, y + 14);
25 | ctx.fillStyle = this.value || "#c7c7c7";
26 | ctx.fillRect(x, y + 2, colorBoxSize, colorBoxSize);
27 | ctx.strokeStyle = "#ffffff";
28 | ctx.strokeRect(x, y + 2, colorBoxSize, colorBoxSize);
29 | this._clickArea = { x, width: colorBoxSize, y: y + 2, height: 20 };
30 | return;
31 | }
32 |
33 | ctx.fillStyle = "#AAA";
34 | ctx.font = "12px Arial";
35 | ctx.fillText(inputName, margin, y + 14);
36 |
37 | ctx.fillStyle = this.value || "#c7c7c7";
38 | ctx.fillRect(colorX, y + 2, colorWidth, 20);
39 |
40 | ctx.strokeStyle = "#ffffff";
41 | ctx.strokeRect(colorX, y + 2, colorWidth, 20);
42 |
43 | this._clickArea = {
44 | x: colorX,
45 | width: colorWidth,
46 | y: y + 2,
47 | height: 20
48 | };
49 | },
50 |
51 | mouse: function(event, pos, node) {
52 | const area = this._clickArea || {};
53 | if (event.type === "pointerdown" &&
54 | pos[0] >= area.x &&
55 | pos[0] <= area.x + area.width &&
56 | pos[1] >= area.y &&
57 | pos[1] <= area.y + area.height) {
58 |
59 | if (!this.inputEl) {
60 | this.inputEl = document.createElement("input");
61 | this.inputEl.type = "color";
62 | this.inputEl.style.position = "absolute";
63 | this.inputEl.style.opacity = "0";
64 | this.inputEl.style.pointerEvents = "none";
65 | this.inputEl.style.zIndex = "-1";
66 | document.body.appendChild(this.inputEl);
67 |
68 | this.inputEl.addEventListener("change", (e) => {
69 | this.value = e.target.value;
70 | node.graph.setDirtyCanvas(true, false);
71 | });
72 | }
73 |
74 | if (!this.inputEl.parentNode) {
75 | document.body.appendChild(this.inputEl);
76 | }
77 |
78 | this.inputEl.value = this.value;
79 | this.inputEl.focus();
80 | this.inputEl.click();
81 |
82 | return true;
83 | }
84 | return false;
85 | },
86 |
87 | computeSize: function(width) {
88 | return [width, 25];
89 | }
90 | };
91 |
92 | // ⚠️ NE PAS ajouter ici — on gère l'ajout manuellement dans beforeRegisterNodeDef
93 | // node.addCustomWidget(widget);
94 |
95 | return widget;
96 | }
97 |
98 | ComfyWidgets.JOVI_COLOR = colorWidget;
99 |
100 | app.registerExtension({
101 | name: "great.random.gradient.colorpicker",
102 |
103 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
104 | if (nodeData.name !== "GreatRandomOrganicGradientNode") return;
105 |
106 | const onNodeCreated = nodeType.prototype.onNodeCreated;
107 | nodeType.prototype.onNodeCreated = function() {
108 | const result = onNodeCreated?.apply(this, arguments);
109 |
110 | const colorWidgets = ["color1", "color2", "color3", "color4",
111 | "color5", "color6", "color7", "color8",
112 | "background_color"];
113 |
114 | const newWidgets = [];
115 | for (let i = 0; i < this.widgets.length; i++) {
116 | const widget = this.widgets[i];
117 | if (colorWidgets.includes(widget.name)) {
118 | const newWidget = colorWidget(this, widget.name, [null, {default: widget.value}], app);
119 | newWidget.value = widget.value;
120 | newWidgets.push(newWidget);
121 | } else {
122 | newWidgets.push(widget);
123 | }
124 | }
125 |
126 | this.widgets = newWidgets;
127 | this.setSize(this.computeSize());
128 |
129 | return result;
130 | };
131 | }
132 | });
--------------------------------------------------------------------------------
/web/docs/GreatConditioningModifier/en.md:
--------------------------------------------------------------------------------
1 | # Great Conditiong Modifier
2 |
3 | 💡 General Usage Tips
4 |
5 | • Beginners: Start with guided_noise (0.2–0.4) and temperature_scale (0.5–0.7) • Subtle variations: perlin_noise (0.1–0.3), semantic_drift (0.2) • Creative exploration: style_shift (0.5–0.8), spherical_rotation (0.6–1.0) • Stabilization: Negative values on temperature_scale (–0.3 to –0.5) • Artistic effects: quantize (0.7–1.0), block_shuffle (0.5–0.8)
6 |
7 | Don't forget: Change the seed of the node to get different variations with the same parameters!
8 |
9 | 📚 Modifier Guide
10 |
11 | 🔹 > degree of importance for POSITIVE value modifications
12 |
13 | 🔸 > degree of importance for NEGATIVE value modifications
14 |
15 | ❌ > no use in Positive
16 |
17 | 🔸 semantic_drift 🔹
18 | Progressive semantic drift
19 | This modifier gradually blends your original prompt with a noisier version of itself, as if adding artistic blur to your instructions. With positive values, the image gently drifts away from the initial prompt while keeping overall coherence — imagine a concept "drifting" into neighboring interpretations. With negative values, the opposite occurs: the prompt is reinforced and becomes less prone to variation. Perfect for achieving creative variations without losing the original meaning.
20 |
21 | 🔸🔸🔸 token_dropout 🔹🔹 (Does not work with Flux)
22 | Selective token removal
23 | This modifier Imagine your prompt is composed of several keywords the model "listens to." This modifier randomly ignores some of them, as if you temporarily changed the subject mid-generation. With positive values, some elements of your description are skipped, creating more abstract or surprising images because the model must "guess" the missing parts. With negative values, the opposite effect forces the model to concentrate on only a few specific tokens, producing cleaner, more focused images.
24 |
25 | 🔸🔸🔸 gradient_amplify 🔹🔹
26 | Amplification of conceptual transitions
27 | This modifier acts on the "transitions" between different elements of your prompt. Think of it as a contrast control for concepts: with positive values, the differences between parts of your description are exaggerated, creating more dramatic images with sharper contrasts between elements. With negative values, transitions are smoothed out, resulting in more harmonious, blended images where everything merges gently. Useful for controlling the dramatic intensity of your generations.
28 |
29 | 🔸🔸🔸 guided_noise 🔹🔹🔹
30 | Proportional guided noise
31 | This is the most universal and predictable modifier. It adds "creative noise" proportional to the intensity of your prompt — like adding film grain to a photo. With positive values (0.2–0.5), you get natural variations of your base image, perfect for generating several similar but unique versions. With negative values, you subtract this noise, stabilizing the image and making it more predictable. It's the ideal starting tool because its effects are progressive and controllable.
32 |
33 | 🔸 quantize 🔹🔹🔹🔹
34 | Quantization and stabilization
35 | This modifier reduces the “precision” of the instructions given to the model, like switching from millions of colors to a limited palette. With high positive values (0.5–1.0), the image becomes more stylized and graphic, with stronger choices and fewer subtle nuances — ideal for a simplified artistic rendering. With negative values, the opposite effect adds dithering (fine grain) that enriches details and micro-variations, creating more organic and textured images.
36 |
37 | 🔸🔸🔸 perlin_noise 🔹🔹🔹🔹
38 | Coherent structured noise
39 | Unlike classic random noise, Perlin noise creates smooth, “natural” variations, like cloud patterns or wood grain. With positive values, your images gain an organic, flowing quality, with soft variations that look natural rather than chaotic. Elements transform gradually instead of changing abruptly. With negative values, you get the opposite effect, which “de-structures” these patterns, creating more fragmented images. Excellent for natural or fluid abstract renderings.
40 |
41 | 🔸🔸🔸 fourier_filter ❌
42 | NON-FUNCTIONAL frequency filtering
43 | This modifier analyzes your prompt like a sound wave and filters certain conceptual “frequencies.” It only works with negative values: it's a low-pass filter that smooths the image by keeping only large shapes and general concepts (like keeping only bass tones). Think of it as an equalizer for your visual concepts.
44 |
45 | 🔸 style_shift 🔹
46 | Directional style shift
47 | This modifier pushes your prompt in a random but coherent “direction” in concept space, like turning a knob that gradually changes the global style. With positive values, you explore significant stylistic variations while keeping the subject — the image may shift from photorealistic to painterly, or from one lighting style to another. With negative values, the direction is reversed. Perfect for discovering unexpected stylistic interpretations of your prompt.
48 |
49 | 🔸 temperature_scale 🔹
50 | Creativity control
51 | This modifier controls the model’s “creative freedom,” exactly like the temperature parameter in text-based AIs. With positive values (0.5–1.0), the model becomes bolder and more unpredictable, taking artistic liberties with your prompt — ideal for creative exploration. With negative values, the model becomes conservative and predictable, following your prompt strictly with few variations — perfect for consistency and replication. It's the slider between “surprise me” and “do exactly what I say.”
52 |
53 | 🔸 embedding_mix 🔹 (Does not work with Flux)
54 | Mixing and reorganization
55 | This modifier rearranges the internal order of elements in your prompt, like shuffling a deck of cards. With positive values, different parts of your description are “mixed,” creating unexpected combinations — a character might inherit attributes intended for the background. With negative values, the effect “unmixes” by accentuating separations, making each element more distinct. Useful for creative hybridizations or, on the contrary, clearly separating concepts.
56 |
57 | 🔸 svd_filter 🔹
58 | Complexity-based filtering (Advanced)
59 | This modifier mathematically decomposes your prompt into “complexity components” and selectively modifies them. With positive values, it amplifies mid-level details, enriching nuances and visual sophistication. With negative values, it simplifies the concept by reducing those components, producing more minimalistic, clean images. Think of it as a filter that controls the “conceptual richness” of your generation.
60 |
61 | 🔸 spherical_rotation 🔹
62 | Conceptual rotation (Advanced)
63 | This modifier “rotates” your prompt in the multidimensional concept space while preserving its overall intensity, like rotating a 3D object. With high positive values, you get radical variations that keep the “weight” of the original prompt but explore entirely different angles. Results can be very surprising because the subject remains, but its interpretation changes dramatically. Excellent for extreme creative exploration.
64 |
65 | 🔸 principal_component 🔹
66 | Modification of principal axes (Advanced)
67 | This modifier identifies the “principal axes” of your prompt (the most important directions of variation) and alters them. With positive values, it amplifies these dominant axes, pushing the main features of your description to the extreme. With negative values, it attenuates them, simplifying the image by reducing conceptual dimensionality. It’s like choosing between “emphasize what matters most” and “flatten to simplify.”
68 |
69 | 🔸 block_shuffle 🔹
70 | Block-based reorganization
71 | This modifier cuts your prompt into conceptual “blocks” and rearranges them randomly while preserving coherence inside each block. With increasing positive values, the blocks become smaller and the shuffle more chaotic, creating surreal images where elements appear in unexpected order. It’s less radical than embedding_mix because local structure is preserved. Perfect for creating unusual compositions while keeping recognizable elements.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | NDDG Great Nodes
2 |
3 | ________________________________________
4 | ________________________________________
5 |
6 | 🍄Great Conditioning Modifier
7 |
8 |
9 | 
10 |
11 |
12 |
13 | For Qwen-image, Z-image, Flux1 and Flux2.
14 |
15 | 📚 Modifier Guide
16 |
17 | 🔹 > degree of importance for POSITIVE value modifications
18 |
19 | 🔸 > degree of importance for NEGATIVE value modifications
20 |
21 | ❌ > no use in Positive
22 |
23 | 🔸 semantic_drift 🔹
24 |
25 | Progressive semantic drift
26 | This modifier gradually blends your original prompt with a noisier version of itself, as if adding artistic blur to your instructions. With positive values, the image gently drifts away from the initial prompt while keeping overall coherence — imagine a concept "drifting" into neighboring interpretations. With negative values, the opposite occurs: the prompt is reinforced and becomes less prone to variation. Perfect for achieving creative variations without losing the original meaning.
27 |
28 | 🔸🔸🔸 token_dropout 🔹🔹
29 |
30 | Selective token removal
31 | Imagine your prompt is composed of several keywords the model "listens to." This modifier randomly ignores some of them, as if you temporarily changed the subject mid-generation. With positive values, some elements of your description are skipped, creating more abstract or surprising images because the model must "guess" the missing parts. With negative values, the opposite effect forces the model to concentrate on only a few specific tokens, producing cleaner, more focused images.
32 |
33 | 🔸🔸🔸 gradient_amplify 🔹🔹
34 |
35 | Amplification of conceptual transitions
36 | This modifier acts on the "transitions" between different elements of your prompt. Think of it as a contrast control for concepts: with positive values, the differences between parts of your description are exaggerated, creating more dramatic images with sharper contrasts between elements. With negative values, transitions are smoothed out, resulting in more harmonious, blended images where everything merges gently. Useful for controlling the dramatic intensity of your generations.
37 |
38 | 🔸🔸🔸 guided_noise 🔹🔹🔹
39 |
40 | Proportional guided noise
41 | This is the most universal and predictable modifier. It adds "creative noise" proportional to the intensity of your prompt — like adding film grain to a photo. With positive values (0.2–0.5), you get natural variations of your base image, perfect for generating several similar but unique versions. With negative values, you subtract this noise, stabilizing the image and making it more predictable. It's the ideal starting tool because its effects are progressive and controllable.
42 |
43 | 🔸 quantize 🔹🔹🔹🔹
44 |
45 | Quantization and stabilization
46 | This modifier reduces the “precision” of the instructions given to the model, like switching from millions of colors to a limited palette. With high positive values (0.5–1.0), the image becomes more stylized and graphic, with stronger choices and fewer subtle nuances — ideal for a simplified artistic rendering. With negative values, the opposite effect adds dithering (fine grain) that enriches details and micro-variations, creating more organic and textured images.
47 |
48 | 🔸🔸🔸 perlin_noise 🔹🔹🔹🔹
49 |
50 | Coherent structured noise
51 | Unlike classic random noise, Perlin noise creates smooth, “natural” variations, like cloud patterns or wood grain. With positive values, your images gain an organic, flowing quality, with soft variations that look natural rather than chaotic. Elements transform gradually instead of changing abruptly. With negative values, you get the opposite effect, which “de-structures” these patterns, creating more fragmented images. Excellent for natural or fluid abstract renderings.
52 |
53 | 🔸🔸🔸 fourier_filter ❌
54 |
55 | NON-FUNCTIONAL frequency filtering
56 | This modifier analyzes your prompt like a sound wave and filters certain conceptual “frequencies.” It only works with negative values: it's a low-pass filter that smooths the image by keeping only large shapes and general concepts (like keeping only bass tones). Think of it as an equalizer for your visual concepts.
57 |
58 | 🔸 style_shift 🔹
59 |
60 | Directional style shift
61 | This modifier pushes your prompt in a random but coherent “direction” in concept space, like turning a knob that gradually changes the global style. With positive values, you explore significant stylistic variations while keeping the subject — the image may shift from photorealistic to painterly, or from one lighting style to another. With negative values, the direction is reversed. Perfect for discovering unexpected stylistic interpretations of your prompt.
62 |
63 | 🔸 temperature_scale 🔹
64 |
65 | Creativity control
66 | This modifier controls the model’s “creative freedom,” exactly like the temperature parameter in text-based AIs. With positive values (0.5–1.0), the model becomes bolder and more unpredictable, taking artistic liberties with your prompt — ideal for creative exploration. With negative values, the model becomes conservative and predictable, following your prompt strictly with few variations — perfect for consistency and replication. It's the slider between “surprise me” and “do exactly what I say.”
67 |
68 | 🔸 embedding_mix 🔹
69 |
70 | Mixing and reorganization
71 | This modifier rearranges the internal order of elements in your prompt, like shuffling a deck of cards. With positive values, different parts of your description are “mixed,” creating unexpected combinations — a character might inherit attributes intended for the background. With negative values, the effect “unmixes” by accentuating separations, making each element more distinct. Useful for creative hybridizations or, on the contrary, clearly separating concepts.
72 |
73 | 🔸 svd_filter 🔹
74 |
75 | Complexity-based filtering (Advanced)
76 | This modifier mathematically decomposes your prompt into “complexity components” and selectively modifies them. With positive values, it amplifies mid-level details, enriching nuances and visual sophistication. With negative values, it simplifies the concept by reducing those components, producing more minimalistic, clean images. Think of it as a filter that controls the “conceptual richness” of your generation.
77 |
78 | 🔸 spherical_rotation 🔹
79 |
80 | Conceptual rotation (Advanced)
81 | This modifier “rotates” your prompt in the multidimensional concept space while preserving its overall intensity, like rotating a 3D object. With high positive values, you get radical variations that keep the “weight” of the original prompt but explore entirely different angles. Results can be very surprising because the subject remains, but its interpretation changes dramatically. Excellent for extreme creative exploration.
82 |
83 | 🔸 principal_component 🔹
84 |
85 | Modification of principal axes (Advanced)
86 | This modifier identifies the “principal axes” of your prompt (the most important directions of variation) and alters them. With positive values, it amplifies these dominant axes, pushing the main features of your description to the extreme. With negative values, it attenuates them, simplifying the image by reducing conceptual dimensionality. It’s like choosing between “emphasize what matters most” and “flatten to simplify.”
87 |
88 | 🔸 block_shuffle 🔹
89 |
90 | Block-based reorganization
91 | This modifier cuts your prompt into conceptual “blocks” and rearranges them randomly while preserving coherence inside each block. With increasing positive values, the blocks become smaller and the shuffle more chaotic, creating surreal images where elements appear in unexpected order. It’s less radical than embedding_mix because local structure is preserved. Perfect for creating unusual compositions while keeping recognizable elements.
92 |
93 | 💡 General Usage Tips
94 |
95 | • Beginners: Start with guided_noise (0.2–0.4) and temperature_scale (0.5–0.7)
96 | • Subtle variations: perlin_noise (0.1–0.3), semantic_drift (0.2)
97 | • Creative exploration: style_shift (0.5–0.8), spherical_rotation (0.6–1.0)
98 | • Stabilization: Negative values on temperature_scale (–0.3 to –0.5)
99 | • Artistic effects: quantize (0.7–1.0), block_shuffle (0.5–0.8)
100 |
101 | Don't forget: Change the seed of the node to get different variations with the same parameters!
102 |
103 |
104 |
105 |
106 |
107 |
108 | ________________________________________
109 | ________________________________________
110 |
111 | 🍄Great Interactive Gradient Node
112 | 
113 |
114 | ________________________________________
115 | ________________________________________
116 |
117 | 🍄Great Random Organic Gradient Node
118 |
119 |
120 |
121 | ________________________________________
122 | ________________________________________
123 |
124 | 🍄Great_thick_border.js
125 |
126 | To see immediately which node is currently running!!
127 |
128 |
129 |
130 |
131 |
132 | ________________________________________
133 | ________________________________________
134 |
135 | 🍄Great Multiply Sigmas
136 |
137 | This node adds functionality to the Jonseed node (https://github.com/Jonseed/ComfyUI-Detail-Daemon) with the additional option "s_curve" to apply the changes.
138 | You can now also choose different values for the start and end of the affected area.
139 | An optional display is also available, allowing you to visualize the curve before and after the changes.
140 | You can also chain these nodes for better control.
141 |
142 | 
143 |
144 | ________________________________________
145 | ________________________________________
146 |
--------------------------------------------------------------------------------
/web/interactive_organic_gradient.js:
--------------------------------------------------------------------------------
1 | import { app } from "/scripts/app.js";
2 |
3 | app.registerExtension({
4 | name: "ImageBlendEditor",
5 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
6 |
7 | // Vérifie que le node correspond bien à celui qu’on veut modifier
8 | if (nodeData.name !== "ImageBlendNode") return;
9 |
10 | const origOnNodeCreated = nodeType.prototype.onNodeCreated;
11 |
12 | nodeType.prototype.onNodeCreated = function () {
13 | const result = origOnNodeCreated ? origOnNodeCreated.apply(this, arguments) : undefined;
14 | const nodeRef = this;
15 |
16 | // 🎨 Personnalisation des couleurs du node "ImageBlendNode"
17 | nodeRef.color = "#080808"; // bandeau du titre
18 | nodeRef.bgcolor = "#353535"; // fond du node
19 | nodeRef.groupcolor = "#c41c30"; // bande verticale de groupe
20 |
21 | // Exemple : changer le titre affiché (facultatif)
22 | //nodeRef.title = "🍄Image Blend Node";
23 |
24 | return result;
25 | };
26 | },
27 | });
28 |
29 |
30 | app.registerExtension({
31 | name: "InteractiveGradientEditor",
32 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
33 |
34 | if (nodeData.name !== "InteractiveOrganicGradientNode") return;
35 |
36 | const origOnNodeCreated = nodeType.prototype.onNodeCreated;
37 | nodeType.prototype.onNodeCreated = function () {
38 | const result = origOnNodeCreated ? origOnNodeCreated.apply(this, arguments) : undefined;
39 | const nodeRef = this;
40 |
41 | // 🎨 Personnalisation des couleurs du node
42 | nodeRef.color = "#080808"; // bandeau du titre #d97517 > jaune orangé
43 | nodeRef.bgcolor = "#353535"; // fond du node
44 | nodeRef.groupcolor = "#c41c30"; // bande verticale de groupe
45 |
46 | // 🔧 Ajustement propre de la hauteur du node
47 | const adjustNodeSize = () => {
48 | try {
49 | // Trouve la taille actuelle du node
50 | const baseHeight = nodeRef.size[1];
51 |
52 | // Calcule une nouvelle hauteur minimale (le canvas fait ~520px)
53 | const minHeight = 630;
54 |
55 | // Si la hauteur actuelle est inférieure, on l'augmente
56 | if (baseHeight < minHeight) {
57 | nodeRef.size[1] = minHeight;
58 | nodeRef.setDirtyCanvas(true, true);
59 | }
60 | } catch (e) {
61 | console.warn("⚠️ Impossible d’ajuster la taille du node :", e);
62 | }
63 | };
64 |
65 | // Ajustement au chargement + après un court délai
66 | adjustNodeSize();
67 | setTimeout(adjustNodeSize, 800);
68 |
69 |
70 | // --- Gradient data widget ---
71 | const gradientWidget = nodeRef.widgets.find(w => w.name === "gradient_data");
72 | if (!gradientWidget) {
73 | console.warn("Widget 'gradient_data' non trouvé !");
74 | return result;
75 | }
76 |
77 | // --- Container ---
78 | const container = document.createElement("div");
79 | container.style.display = "flex";
80 | container.style.flexDirection = "column";
81 | container.style.alignItems = "center";
82 | container.style.gap = "6px";
83 | container.style.margin = "6px 0";
84 |
85 | // --- Canvas ---
86 | const canvas = document.createElement("canvas");
87 | canvas.width = 512;
88 | canvas.height = 512;
89 | canvas.style.width = "100%";
90 | canvas.style.height = "auto";
91 | canvas.style.aspectRatio = "1";
92 | canvas.style.display = "block";
93 | canvas.style.border = "2px solid " + nodeRef.color;
94 | canvas.style.cursor = "pointer";
95 | canvas.style.borderRadius = "6px";
96 | canvas.style.boxShadow = "0 0 5px rgba(0,0,0,0.4)";
97 | container.appendChild(canvas);
98 |
99 | // --- Reset button ---
100 | const resetBtn = document.createElement("button");
101 | resetBtn.textContent = "Reset Gradient";
102 | resetBtn.style.padding = "6px 10px";
103 | resetBtn.style.fontSize = "13px";
104 | resetBtn.style.border = "2px solid" + nodeRef.color;
105 | resetBtn.style.borderRadius = "6px";
106 | resetBtn.style.background = nodeRef.bgcolor;//"#333";
107 | resetBtn.style.color = "#dddddd";//"white";
108 | resetBtn.style.cursor = "pointer";
109 | resetBtn.style.transition = "background 0.2s";
110 | resetBtn.onmouseenter = () => (resetBtn.style.background = nodeRef.color);
111 | resetBtn.onmouseleave = () => (resetBtn.style.background = nodeRef.bgcolor);
112 | container.appendChild(resetBtn);
113 |
114 | // add DOM widget
115 | const domWidget = nodeRef.addDOMWidget("gradient_editor", "custom", container, {
116 | onDraw: () => {
117 | drawFn();
118 | }
119 | });
120 |
121 | const ctx = canvas.getContext("2d");
122 |
123 | // --- Data handling ---
124 | let data = [];
125 | function defaultGradient() {
126 | return [
127 | { x: 0.2, y: 0.5, color: "#ff3300" },
128 | { x: 0.8, y: 0.5, color: "#00ffe1" }
129 | ];
130 | }
131 |
132 | function loadData() {
133 | try {
134 | const v = gradientWidget.value ?? "";
135 | const parsed = v ? JSON.parse(v) : [];
136 | if (Array.isArray(parsed)) data = parsed;
137 | else data = defaultGradient();
138 | } catch (e) {
139 | data = defaultGradient();
140 | }
141 | }
142 | loadData();
143 |
144 | function saveData() {
145 | try {
146 | gradientWidget.value = JSON.stringify(data);
147 | } catch (e) {
148 | console.error("Impossible de sauvegarder le gradient:", e);
149 | }
150 | try { nodeRef.setDirtyCanvas(true, true); } catch (e) {}
151 | }
152 |
153 | // Reset handler
154 | resetBtn.onclick = () => {
155 | data = defaultGradient();
156 | saveData();
157 | drawFn();
158 | };
159 |
160 | // --- Drawing ---
161 | function drawFn() {
162 | if (!Array.isArray(data) || data.length === 0) data = defaultGradient();
163 |
164 | const w = canvas.width;
165 | const h = canvas.height;
166 | ctx.clearRect(0, 0, w, h);
167 |
168 | // checkerboard
169 | const tile = 32;
170 | for (let yy = 0; yy < h; yy += tile) {
171 | for (let xx = 0; xx < w; xx += tile) {
172 | ctx.fillStyle = ((xx / tile + yy / tile) % 2 === 0) ? "#cccccc" : "#eeeeee";
173 | ctx.fillRect(xx, yy, tile, tile);
174 | }
175 | }
176 |
177 | // radial blobs
178 | for (const stop of data) {
179 | const x = stop.x * w;
180 | const y = stop.y * h;
181 | const g = ctx.createRadialGradient(x, y, 0, x, y, Math.max(w, h) / 2);
182 | g.addColorStop(0, stop.color);
183 | g.addColorStop(1, "transparent");
184 | ctx.fillStyle = g;
185 | ctx.fillRect(0, 0, w, h);
186 | }
187 |
188 | // handles
189 | for (const stop of data) {
190 | const x = stop.x * w;
191 | const y = stop.y * h;
192 | ctx.beginPath();
193 | ctx.arc(x, y, 20, 0, Math.PI * 2);// rayon des pastilles
194 | ctx.fillStyle = stop.color;
195 | ctx.fill();
196 | ctx.strokeStyle = nodeRef.color;
197 | ctx.lineWidth = 3;
198 | ctx.stroke();
199 |
200 | // === REMPLACÉ : petit cercle rouge (diamètre 16px => rayon 8px) ===
201 | const delX = x + 15;
202 | const delY = y - 15;
203 | ctx.beginPath();
204 | ctx.arc(delX, delY, 12, 0, Math.PI * 2); // radius 4px == diameter 8px
205 | ctx.fillStyle = "red";
206 | ctx.fill();
207 | ctx.lineWidth = 1;
208 | ctx.strokeStyle = "#000000";
209 | ctx.stroke();
210 | }
211 | }
212 |
213 | // --- Helpers ---
214 | function clientToCanvas(eClientX, eClientY) {
215 | const rect = canvas.getBoundingClientRect();
216 | const scaleX = canvas.width / rect.width;
217 | const scaleY = canvas.height / rect.height;
218 | return { x: (eClientX - rect.left) * scaleX, y: (eClientY - rect.top) * scaleY };
219 | }
220 |
221 | // --- Interaction ---
222 | let dragging = false, selected = null, activePointerId = null;
223 |
224 | const onPointerDown = (ev) => {
225 | if (ev.button && ev.button !== 0) return;
226 | ev.preventDefault(); ev.stopPropagation();
227 | loadData();
228 |
229 | const { x, y } = clientToCanvas(ev.clientX, ev.clientY);
230 |
231 | // delete small red circle (use same center as drawing: x+12, y-8)
232 | for (let i = 0; i < data.length; i++) {
233 | const s = data[i];
234 | const px = s.x * canvas.width, py = s.y * canvas.height;
235 | const cx = px + 15;
236 | const cy = py - 15;
237 | // tolerance a bit larger than radius so clicks are easier (use 8px)
238 | if (Math.hypot(x - cx, y - cy) <= 12) {
239 | data.splice(i, 1);
240 | saveData();
241 | drawFn();
242 | return;
243 | }
244 | }
245 |
246 | // drag handle
247 | for (const s of data) {
248 | const px = s.x * canvas.width, py = s.y * canvas.height;
249 | if (Math.hypot(x - px, y - py) <= 20) {
250 | dragging = true; selected = s; activePointerId = ev.pointerId;
251 | try { canvas.setPointerCapture(ev.pointerId); } catch {}
252 | return;
253 | }
254 | }
255 |
256 | // add new
257 | data.push({ x: x / canvas.width, y: y / canvas.height, color: "#ffffff" });
258 | saveData(); drawFn();
259 | };
260 |
261 | const onPointerMove = (ev) => {
262 | if (!dragging || !selected || ev.pointerId !== activePointerId) return;
263 | ev.preventDefault(); ev.stopPropagation();
264 | const { x, y } = clientToCanvas(ev.clientX, ev.clientY);
265 | selected.x = Math.max(0, Math.min(1, x / canvas.width));
266 | selected.y = Math.max(0, Math.min(1, y / canvas.height));
267 | saveData(); drawFn();
268 | };
269 |
270 | const onPointerUp = (ev) => {
271 | if (ev.pointerId !== activePointerId) return;
272 | dragging = false; selected = null;
273 | try { canvas.releasePointerCapture(ev.pointerId); } catch {}
274 | activePointerId = null;
275 | };
276 |
277 | const onDblClick = (ev) => {
278 | ev.preventDefault(); ev.stopPropagation();
279 | loadData();
280 | const { x, y } = clientToCanvas(ev.clientX, ev.clientY);
281 | for (const s of data) {
282 | const px = s.x * canvas.width, py = s.y * canvas.height;
283 | if (Math.hypot(x - px, y - py) <= 10) {
284 | const input = document.createElement("input");
285 | input.type = "color"; input.value = s.color;
286 | input.style.position = "fixed";
287 | input.style.left = `${ev.clientX}px`;
288 | input.style.top = `${ev.clientY}px`;
289 | input.style.zIndex = 99999;
290 | document.body.appendChild(input);
291 | input.addEventListener("input", e => { s.color = e.target.value; saveData(); drawFn(); });
292 | input.addEventListener("change", () => input.remove());
293 | input.addEventListener("blur", () => input.remove());
294 | input.focus(); input.click();
295 | return;
296 | }
297 | }
298 | };
299 |
300 | // attach events
301 | canvas.addEventListener("pointerdown", onPointerDown, { passive: false });
302 | canvas.addEventListener("pointermove", onPointerMove, { passive: false });
303 | canvas.addEventListener("pointerup", onPointerUp, { passive: false });
304 | canvas.addEventListener("dblclick", onDblClick, { passive: false });
305 |
306 | // initial draw
307 | saveData(); drawFn();
308 |
309 | return result;
310 | };
311 | },
312 | });
313 |
--------------------------------------------------------------------------------
/web/Great_thick_border.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Extension ComfyUI - Bordure Épaisse FINALE avec support COMPLET des Subgraphs
3 | * ✅ Détecte les nodes parents quand leurs enfants sont exécutés (format parent_id:child_id)
4 | * ✅ Suit parfaitement le contour de TOUS les types de nodes
5 | *
6 | * Installation: ComfyUI/custom_nodes/thick-border/web/thick_border.js
7 | *
8 | * 🍄NDDG Great Nodes
9 | *
10 | */
11 |
12 | import { app } from "../../scripts/app.js";
13 |
14 | app.registerExtension({
15 | name: "thick.executing.border.final",
16 |
17 | async setup() {
18 | console.log("🎨 Extension Thick Border FINAL chargée !");
19 |
20 | // ============ CONFIGURATION ============
21 | const CONFIG = {
22 | borderWidth: 6, // Épaisseur de la bordure (3-10)
23 | borderColor: "#00FF00", // Couleur
24 | borderOffset: 6, // Distance du node (4-10)
25 | addGlow: true, // Effet lumineux
26 | glowIntensity: 15, // Intensité de la lueur
27 | opacity: 0.95, // Opacité de la bordure
28 | showSubgraphChildren: true // Mettre en évidence les nodes enfants aussi
29 | };
30 | // ========================================
31 |
32 | // Attendre LGraphCanvas et LiteGraph
33 | await new Promise((resolve) => {
34 | if (typeof LGraphCanvas !== 'undefined' && typeof LiteGraph !== 'undefined') {
35 | resolve();
36 | } else {
37 | const check = setInterval(() => {
38 | if (typeof LGraphCanvas !== 'undefined' && typeof LiteGraph !== 'undefined') {
39 | clearInterval(check);
40 | resolve();
41 | }
42 | }, 100);
43 | }
44 | });
45 |
46 | console.log("✅ LGraphCanvas détecté !");
47 |
48 | // ===== FONCTION HELPER : Extraire l'ID parent d'un node enfant =====
49 | function getParentNodeId(nodeId) {
50 | // Format des nodes enfants: "parent_id:child_id"
51 | if (typeof nodeId === 'string' && nodeId.includes(':')) {
52 | return parseInt(nodeId.split(':')[0]);
53 | }
54 | return null;
55 | }
56 |
57 | // ===== FONCTION DE DESSIN DE LA BORDURE PARFAITE =====
58 | function drawPerfectBorder(ctx, node, canvas) {
59 | ctx.save();
60 |
61 | // Configuration du style
62 | ctx.lineWidth = CONFIG.borderWidth;
63 | ctx.strokeStyle = CONFIG.borderColor;
64 | ctx.globalAlpha = CONFIG.opacity;
65 | ctx.lineCap = "round";
66 | ctx.lineJoin = "round";
67 |
68 | // Effet de lueur
69 | if (CONFIG.addGlow) {
70 | ctx.shadowBlur = CONFIG.glowIntensity;
71 | ctx.shadowColor = CONFIG.borderColor;
72 | }
73 |
74 | // Récupérer les dimensions et forme du node
75 | const shape = node._shape || node.constructor.shape || LiteGraph.ROUND_SHAPE;
76 | const size = node.size;
77 | const titleHeight = LiteGraph.NODE_TITLE_HEIGHT || 30;
78 | const isCollapsed = node.flags && node.flags.collapsed;
79 | const offset = CONFIG.borderOffset;
80 |
81 | // Rayon des coins arrondis
82 | let radius = canvas.round_radius || 10;
83 |
84 | ctx.beginPath();
85 |
86 | if (isCollapsed) {
87 | // ===== NODE COLLAPSÉ =====
88 | const collapsedWidth = node._collapsed_width || LiteGraph.NODE_COLLAPSED_WIDTH || 80;
89 | const collapsedRadius = LiteGraph.NODE_COLLAPSED_RADIUS || 10;
90 |
91 | const x = -offset;
92 | const y = -titleHeight - offset;
93 | const w = collapsedWidth + offset * 2;
94 | const h = titleHeight + offset * 2;
95 | const r = collapsedRadius + offset / 2;
96 |
97 | // Dessiner rectangle arrondi
98 | ctx.moveTo(x + r, y);
99 | ctx.lineTo(x + w - r, y);
100 | ctx.arcTo(x + w, y, x + w, y + r, r);
101 | ctx.lineTo(x + w, y + h - r);
102 | ctx.arcTo(x + w, y + h, x + w - r, y + h, r);
103 | ctx.lineTo(x + r, y + h);
104 | ctx.arcTo(x, y + h, x, y + h - r, r);
105 | ctx.lineTo(x, y + r);
106 | ctx.arcTo(x, y, x + r, y, r);
107 |
108 | } else {
109 | // ===== NODE DÉVELOPPÉ =====
110 |
111 | if (shape === LiteGraph.BOX_SHAPE) {
112 | // Forme rectangulaire SANS coins arrondis
113 | const x = -offset;
114 | const y = -titleHeight - offset;
115 | const w = size[0] + offset * 2;
116 | const h = size[1] + titleHeight + offset * 2;
117 |
118 | ctx.rect(x, y, w, h);
119 |
120 | } else if (shape === LiteGraph.ROUND_SHAPE || shape === LiteGraph.CARD_SHAPE) {
121 | // Forme arrondie (LA PLUS COURANTE)
122 | const x = -offset;
123 | const y = -titleHeight - offset;
124 | const w = size[0] + offset * 2;
125 | const h = size[1] + titleHeight + offset * 2;
126 | const r = radius + offset / 2;
127 |
128 | // Rectangle avec coins parfaitement arrondis
129 | ctx.moveTo(x + r, y);
130 | ctx.lineTo(x + w - r, y);
131 | ctx.arcTo(x + w, y, x + w, y + r, r);
132 | ctx.lineTo(x + w, y + h - r);
133 | ctx.arcTo(x + w, y + h, x + w - r, y + h, r);
134 | ctx.lineTo(x + r, y + h);
135 | ctx.arcTo(x, y + h, x, y + h - r, r);
136 | ctx.lineTo(x, y + r);
137 | ctx.arcTo(x, y, x + r, y, r);
138 | ctx.closePath();
139 |
140 | } else if (shape === LiteGraph.CIRCLE_SHAPE) {
141 | // Forme circulaire
142 | const centerX = size[0] * 0.5;
143 | const centerY = size[1] * 0.5;
144 | const circleRadius = Math.max(size[0], size[1]) * 0.5 + offset;
145 |
146 | ctx.arc(centerX, centerY, circleRadius, 0, Math.PI * 2);
147 |
148 | } else {
149 | // Par défaut : forme arrondie
150 | const x = -offset;
151 | const y = -titleHeight - offset;
152 | const w = size[0] + offset * 2;
153 | const h = size[1] + titleHeight + offset * 2;
154 | const r = radius + offset / 2;
155 |
156 | ctx.moveTo(x + r, y);
157 | ctx.lineTo(x + w - r, y);
158 | ctx.arcTo(x + w, y, x + w, y + r, r);
159 | ctx.lineTo(x + w, y + h - r);
160 | ctx.arcTo(x + w, y + h, x + w - r, y + h, r);
161 | ctx.lineTo(x + r, y + h);
162 | ctx.arcTo(x, y + h, x, y + h - r, r);
163 | ctx.lineTo(x, y + r);
164 | ctx.arcTo(x, y, x + r, y, r);
165 | ctx.closePath();
166 | }
167 | }
168 |
169 | // Dessiner la bordure
170 | ctx.stroke();
171 | ctx.restore();
172 | }
173 |
174 | // ===== INTERCEPTER drawNode =====
175 | const originalDrawNode = LGraphCanvas.prototype.drawNode;
176 |
177 | if (!originalDrawNode) {
178 | console.error("❌ Erreur: drawNode introuvable");
179 | return;
180 | }
181 |
182 | LGraphCanvas.prototype.drawNode = function(node, ctx) {
183 | // Dessiner le node normalement
184 | originalDrawNode.call(this, node, ctx);
185 |
186 | // Vérifier si le node est en exécution
187 | const isExecuting = node.isExecuting ||
188 | node.isExecutingAsParent ||
189 | (app.runningNodeId && app.runningNodeId == node.id);
190 |
191 | if (isExecuting) {
192 | // Dessiner la bordure parfaite
193 | drawPerfectBorder(ctx, node, this);
194 | }
195 | };
196 |
197 | // ===== ÉCOUTER LES ÉVÉNEMENTS D'EXÉCUTION =====
198 | if (app.api) {
199 | app.api.addEventListener("executing", ({ detail }) => {
200 | const nodeId = detail;
201 |
202 | // Réinitialiser tous les flags d'exécution
203 | if (app.graph && app.graph._nodes) {
204 | app.graph._nodes.forEach(node => {
205 | node.isExecuting = false;
206 | node.isExecutingAsParent = false;
207 | });
208 | }
209 |
210 | if (nodeId && app.graph) {
211 | // Vérifier si c'est un node enfant (format "parent_id:child_id")
212 | const parentId = getParentNodeId(nodeId);
213 |
214 | if (parentId !== null) {
215 | // C'EST UN NODE ENFANT D'UN SUBGRAPH !
216 | const parentNode = app.graph.getNodeById(parentId);
217 |
218 | if (parentNode) {
219 | // Marquer le PARENT comme en exécution
220 | parentNode.isExecutingAsParent = true;
221 | console.log(`⚡ Subgraph parent "${parentNode.title || parentId}" en exécution (enfant: ${nodeId})`);
222 |
223 | // Optionnel : marquer aussi l'enfant si configuré
224 | if (CONFIG.showSubgraphChildren) {
225 | // Note: les nodes enfants ne sont pas directement accessibles
226 | // via getNodeById car ils sont dans un sous-graphe
227 | }
228 | } else {
229 | console.warn(`⚠️ Parent node ${parentId} introuvable pour l'enfant ${nodeId}`);
230 | }
231 | } else {
232 | // C'est un node normal (pas un enfant)
233 | const executingNode = app.graph.getNodeById(nodeId);
234 | if (executingNode) {
235 | executingNode.isExecuting = true;
236 | console.log(`⚡ Node "${executingNode.title || nodeId}" en exécution`);
237 | }
238 | }
239 |
240 | // Forcer un redessin
241 | if (app.canvas) {
242 | app.canvas.setDirty(true, true);
243 | }
244 | }
245 |
246 | // Fin de l'exécution
247 | if (!nodeId) {
248 | console.log("✅ Exécution terminée");
249 | if (app.canvas) {
250 | app.canvas.setDirty(true, true);
251 | }
252 | }
253 | });
254 |
255 | console.log("📡 Écoute WebSocket activée");
256 | }
257 |
258 | console.log(`✨ Configuration:`);
259 | console.log(` • Épaisseur: ${CONFIG.borderWidth}px`);
260 | console.log(` • Distance: ${CONFIG.borderOffset}px`);
261 | console.log(` • Couleur: ${CONFIG.borderColor}`);
262 | console.log(` • Effet lumineux: ${CONFIG.addGlow ? 'OUI' : 'NON'}`);
263 | console.log(` • Support subgraphs: ACTIVÉ (détection parent:child)`);
264 | }
265 | });
--------------------------------------------------------------------------------
/web/great_conditioning_modifier_slider.js:
--------------------------------------------------------------------------------
1 | // d'après > ComfyUI.mxToolkit.Slider v.0.9.92 - Max Smirnov 2025
2 | // Great Conditioning Modifier - Custom Slider v1.0 - Fixed
3 | import { app } from "../../scripts/app.js";
4 |
5 | class GreatSlider {
6 | constructor(node, widgetName, config = {}) {
7 | this.node = node;
8 | this.widgetName = widgetName;
9 | this.config = {
10 | min: config.min || -10.0,
11 | max: config.max || 10.0,
12 | step: config.step || 0.01,
13 | decimals: config.decimals || 2,
14 | default: config.default || 0.0
15 | };
16 |
17 | this.value = config.default;
18 | this.normalizedPos = this.valueToPosition(this.value);
19 |
20 | this.isDragging = false;
21 | }
22 |
23 | valueToPosition(value) {
24 | return (value - this.config.min) / (this.config.max - this.config.min);
25 | }
26 |
27 | positionToValue(pos) {
28 | const value = this.config.min + (this.config.max - this.config.min) * pos;
29 | return Math.round(value / this.config.step) * this.config.step;
30 | }
31 |
32 | setValue(value) {
33 | this.value = Math.max(this.config.min, Math.min(this.config.max, value));
34 | this.normalizedPos = this.valueToPosition(this.value);
35 | }
36 |
37 | draw(ctx, y, width) {
38 | const margin = 10;
39 | const sliderWidth = width - 75;
40 | const sliderHeight = 10;
41 | const centerY = y + 15;
42 |
43 | ctx.fillStyle = "rgba(30, 30, 30, 0.8)";
44 | ctx.beginPath();
45 | ctx.roundRect(margin, centerY - sliderHeight/2, sliderWidth, sliderHeight, 5);
46 | ctx.fill();
47 |
48 | const fillWidth = sliderWidth * this.normalizedPos;
49 | if (fillWidth >= 0) {
50 | const gradient = ctx.createLinearGradient(margin, 0, margin + sliderWidth, 0);
51 |
52 | if (this.value < 0) {
53 | gradient.addColorStop(0, "#f0db1f");
54 | gradient.addColorStop(0.5, "#944212");
55 | gradient.addColorStop(1, "#000");
56 | } else if (this.value > 0) {
57 | gradient.addColorStop(0, "#000");
58 | gradient.addColorStop(0.5, "#114e80");
59 | gradient.addColorStop(1, "#21eff3");
60 | } else {
61 | gradient.addColorStop(0, "#a2a2a2");
62 | gradient.addColorStop(1, "#a2a2a2");
63 | }
64 |
65 | ctx.fillStyle = gradient;
66 | ctx.beginPath();
67 |
68 | if (this.value < 0) {
69 | ctx.roundRect(
70 | margin + sliderWidth,
71 | centerY - sliderHeight/2,
72 | (-sliderWidth /2) + ((-sliderWidth / 2) + ((sliderWidth) * this.normalizedPos)),
73 | sliderHeight, 5
74 | );
75 | } else {
76 | ctx.roundRect(
77 | margin, centerY - sliderHeight/2,
78 | fillWidth, sliderHeight, 5
79 | );
80 | }
81 |
82 | ctx.fill();
83 | }
84 |
85 | const zeroPos = margin + sliderWidth * this.valueToPosition(0);
86 | ctx.strokeStyle = "rgba(255, 255, 255, 0.3)";
87 | ctx.lineWidth = 3;
88 | ctx.beginPath();
89 | ctx.moveTo(zeroPos, centerY - sliderHeight);
90 | ctx.lineTo(zeroPos, centerY + sliderHeight);
91 | ctx.stroke();
92 |
93 | const cursorX = margin + sliderWidth * this.normalizedPos;
94 | ctx.fillStyle = "#a2a2a2";
95 | ctx.beginPath();
96 | ctx.arc(cursorX, centerY, 8, 0, 2 * Math.PI);
97 | ctx.fill();
98 |
99 | ctx.strokeStyle = this.node.bgcolor || "#333333";
100 | ctx.lineWidth = 2;
101 | ctx.beginPath();
102 | ctx.arc(cursorX, centerY, 6, 0, 2 * Math.PI);
103 | ctx.stroke();
104 |
105 | ctx.fillStyle = "#ffffff";
106 | ctx.font = "18px monospace";
107 | ctx.textAlign = "right";
108 | ctx.fillText(this.value.toFixed(this.config.decimals), width - 6, centerY + 4);
109 | }
110 |
111 | handleMouseDown(e, localX, localY, width) {
112 | // Vérifier que c'est le bouton gauche (button === 0)
113 | if (e.button !== 0) return false;
114 |
115 | const margin = 10;
116 | const sliderWidth = width - 75;
117 |
118 | if (localY >= 5 && localY <= 25) {
119 | // Zone du texte de valeur (clic pour prompt)
120 | if (localX >= width - 65 && localX <= width - 5) {
121 | return "prompt";
122 | }
123 |
124 | // Zone du slider
125 | if (localX >= margin && localX <= margin + sliderWidth) {
126 | this.isDragging = true;
127 | this.updateFromMouse(localX, width);
128 | return true;
129 | }
130 | }
131 | return false;
132 | }
133 |
134 | handleMouseMove(e, localX, localY, width) {
135 | // Ne bouger que si on est en train de glisser
136 | if (this.isDragging) {
137 | this.updateFromMouse(localX, width);
138 | return true;
139 | }
140 | return false;
141 | }
142 |
143 | handleMouseUp(e) {
144 | // Relâchement du bouton gauche : arrêter l'interaction
145 | if (this.isDragging) {
146 | this.isDragging = false;
147 | return true;
148 | }
149 | return false;
150 | }
151 |
152 | updateFromMouse(localX, width) {
153 | const margin = 10;
154 | const sliderWidth = width - 75;
155 |
156 | let pos = (localX - margin) / sliderWidth;
157 | pos = Math.max(0, Math.min(1, pos));
158 |
159 | const newValue = this.positionToValue(pos);
160 |
161 | if (newValue !== this.value) {
162 | this.setValue(newValue);
163 |
164 | const strengthWidget = this.node.widgets?.find(w => w.name === this.widgetName);
165 | if (strengthWidget) strengthWidget.value = this.value;
166 |
167 | return true;
168 | }
169 | return false;
170 | }
171 |
172 | // Méthode pour forcer l'arrêt du dragging (sécurité)
173 | stopDragging() {
174 | this.isDragging = false;
175 | }
176 | }
177 |
178 | app.registerExtension({
179 | name: "great.conditioning.modifier.slider",
180 |
181 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
182 | if (nodeData.name !== "GreatConditioningModifier") return;
183 |
184 | const onNodeCreated = nodeType.prototype.onNodeCreated;
185 | nodeType.prototype.onNodeCreated = function() {
186 |
187 | const result = onNodeCreated?.apply(this, arguments);
188 |
189 | const strengthWidget = this.widgets?.find(w => w.name === "modification_strength");
190 |
191 | const savedValue = strengthWidget ? strengthWidget.value : 0.0;
192 |
193 | if (strengthWidget) {
194 | strengthWidget.hidden = true;
195 | strengthWidget.type = "hidden";
196 | }
197 |
198 | this.strengthSlider = new GreatSlider(this, "modification_strength", {
199 | min: -10.0,
200 | max: 10.0,
201 | step: 0.01,
202 | decimals: 2,
203 | default: savedValue
204 | });
205 |
206 | this.strengthSlider.setValue(savedValue);
207 |
208 | this.sliderHeight = 40;
209 |
210 | const originalComputeSize = this.computeSize;
211 | this.computeSize = function(out) {
212 | const size = originalComputeSize ? originalComputeSize.call(this, out) : [this.size[0], this.size[1]];
213 | size[1] += this.sliderHeight + 10 || 0;
214 | return size;
215 | };
216 |
217 | this.size = this.computeSize();
218 |
219 | const originalOnConfigure = this.onConfigure;
220 | this.onConfigure = function(info) {
221 | const r = originalOnConfigure?.apply(this, arguments);
222 |
223 | const w = this.widgets?.find(w => w.name === "modification_strength");
224 | if (w && this.strengthSlider) {
225 | this.strengthSlider.setValue(w.value);
226 | }
227 |
228 | return r;
229 | };
230 |
231 | return result;
232 | };
233 |
234 | const onDrawForeground = nodeType.prototype.onDrawForeground;
235 | nodeType.prototype.onDrawForeground = function(ctx) {
236 | const result = onDrawForeground?.apply(this, arguments);
237 |
238 | if (this.flags.collapsed) return result;
239 |
240 | if (this.strengthSlider) {
241 | const sliderY = this.size[1] - this.sliderHeight + 5;
242 |
243 | ctx.save();
244 | this.strengthSlider.draw(ctx, sliderY, this.size[0]);
245 | ctx.restore();
246 |
247 | const strengthWidget = this.widgets?.find(w => w.name === "modification_strength");
248 | if (strengthWidget && strengthWidget.value !== this.strengthSlider.value) {
249 | strengthWidget.value = this.strengthSlider.value;
250 | if (this.onPropertyChanged) {
251 | this.onPropertyChanged("modification_strength", this.strengthSlider.value);
252 | }
253 | }
254 | }
255 |
256 | return result;
257 | };
258 |
259 | const onMouseDown = nodeType.prototype.onMouseDown;
260 | nodeType.prototype.onMouseDown = function(e, localPos, canvas) {
261 | if (this.strengthSlider) {
262 | const sliderY = this.size[1] - this.sliderHeight + 5;
263 | const localX = localPos[0];
264 | const localY = localPos[1] - sliderY;
265 |
266 | const result = this.strengthSlider.handleMouseDown(e, localX, localY, this.size[0]);
267 |
268 | if (result === "prompt") {
269 | canvas.prompt(
270 | "Modification Strength",
271 | this.strengthSlider.value,
272 | (v) => {
273 | const num = parseFloat(v);
274 | if (!isNaN(num)) {
275 | this.strengthSlider.setValue(num);
276 | this.setDirtyCanvas(true, true);
277 | }
278 | },
279 | e
280 | );
281 | return true;
282 | }
283 |
284 | if (result) {
285 | this.setDirtyCanvas(true, true);
286 | return true;
287 | }
288 | }
289 |
290 | return onMouseDown?.apply(this, arguments);
291 | };
292 |
293 | const onMouseMove = nodeType.prototype.onMouseMove;
294 | nodeType.prototype.onMouseMove = function(e, localPos, canvas) {
295 | // Ne traiter le mouvement que si on est en train de glisser ET que le bouton gauche est toujours enfoncé
296 | if (this.strengthSlider && this.strengthSlider.isDragging) {
297 | // Vérifier si le bouton gauche est toujours appuyé (e.buttons & 1)
298 | if (!(e.buttons & 1)) {
299 | // Le bouton a été relâché sans que onMouseUp soit appelé
300 | this.strengthSlider.stopDragging();
301 | this.setDirtyCanvas(true, true);
302 | return false;
303 | }
304 |
305 | const sliderY = this.size[1] - this.sliderHeight + 5;
306 | const localX = localPos[0];
307 | const localY = localPos[1] - sliderY;
308 |
309 | if (this.strengthSlider.handleMouseMove(e, localX, localY, this.size[0])) {
310 | this.setDirtyCanvas(true, true);
311 | return true;
312 | }
313 | }
314 |
315 | return onMouseMove?.apply(this, arguments);
316 | };
317 |
318 | const onMouseUp = nodeType.prototype.onMouseUp;
319 | nodeType.prototype.onMouseUp = function(e, localPos, canvas) {
320 | if (this.strengthSlider) {
321 | if (this.strengthSlider.handleMouseUp(e)) {
322 | this.setDirtyCanvas(true, true);
323 | return true;
324 | }
325 | }
326 |
327 | return onMouseUp?.apply(this, arguments);
328 | };
329 |
330 | // Sécurité supplémentaire : arrêter le dragging si la souris quitte le canvas
331 | const onMouseLeave = nodeType.prototype.onMouseLeave;
332 | nodeType.prototype.onMouseLeave = function(e) {
333 | if (this.strengthSlider && this.strengthSlider.isDragging) {
334 | this.strengthSlider.stopDragging();
335 | this.setDirtyCanvas(true, true);
336 | }
337 |
338 | return onMouseLeave?.apply(this, arguments);
339 | };
340 | }
341 | });
--------------------------------------------------------------------------------
/workflow/🍄Great Conditioning Modifier.json:
--------------------------------------------------------------------------------
1 | {"id":"86b77360-cab7-4091-a953-97043c2aacee","revision":0,"last_node_id":19,"last_link_id":19,"nodes":[{"id":3,"type":"CLIPLoader","pos":[-470,-213.67208497244863],"size":[330,106],"flags":{},"order":0,"mode":0,"inputs":[{"localized_name":"clip_name","name":"clip_name","type":"COMBO","widget":{"name":"clip_name"},"link":null},{"localized_name":"type","name":"type","type":"COMBO","widget":{"name":"type"},"link":null},{"localized_name":"device","name":"device","shape":7,"type":"COMBO","widget":{"name":"device"},"link":null}],"outputs":[{"localized_name":"CLIP","name":"CLIP","type":"CLIP","links":[1]}],"properties":{"cnr_id":"comfy-core","ver":"0.3.59","Node name for S&R":"CLIPLoader","ue_properties":{"widget_ue_connectable":{},"version":"7.1","input_ue_unconnectable":{}}},"widgets_values":["qwen_2.5_vl_7b_fp8_scaled.safetensors","qwen_image","default"],"color":"#3c4a54","bgcolor":"#283640"},{"id":7,"type":"CR Aspect Ratio","pos":[-110,-333.508127458673],"size":[210,322],"flags":{},"order":1,"mode":0,"inputs":[{"localized_name":"width","name":"width","type":"INT","widget":{"name":"width"},"link":null},{"localized_name":"height","name":"height","type":"INT","widget":{"name":"height"},"link":null},{"localized_name":"aspect_ratio","name":"aspect_ratio","type":"COMBO","widget":{"name":"aspect_ratio"},"link":null},{"localized_name":"swap_dimensions","name":"swap_dimensions","type":"COMBO","widget":{"name":"swap_dimensions"},"link":null},{"localized_name":"upscale_factor","name":"upscale_factor","type":"FLOAT","widget":{"name":"upscale_factor"},"link":null},{"localized_name":"prescale_factor","name":"prescale_factor","type":"FLOAT","widget":{"name":"prescale_factor"},"link":null},{"localized_name":"batch_size","name":"batch_size","type":"INT","widget":{"name":"batch_size"},"link":null}],"outputs":[{"localized_name":"width","name":"width","type":"INT","links":[3]},{"localized_name":"height","name":"height","type":"INT","links":[4]},{"localized_name":"upscale_factor","name":"upscale_factor","type":"FLOAT","links":null},{"localized_name":"prescale_factor","name":"prescale_factor","type":"FLOAT","links":null},{"localized_name":"batch_size","name":"batch_size","type":"INT","links":null},{"localized_name":"empty_latent","name":"empty_latent","type":"LATENT","links":null},{"localized_name":"show_help","name":"show_help","type":"STRING","links":null}],"properties":{"cnr_id":"ComfyUI_Comfyroll_CustomNodes","ver":"d78b780ae43fcf8c6b7c6505e6ffb4584281ceca","Node name for S&R":"CR Aspect Ratio","ue_properties":{"widget_ue_connectable":{},"version":"7.1","input_ue_unconnectable":{}}},"widgets_values":[1344,768,"SDXL - 3:4 portrait 896x1152","Off",1,1,1],"color":"#3c4a54","bgcolor":"#283640"},{"id":2,"type":"OverrideCLIPDevice","pos":[-470,-315.8360424862243],"size":[330,58],"flags":{},"order":5,"mode":0,"inputs":[{"localized_name":"clip","name":"clip","type":"CLIP","link":1},{"localized_name":"device","name":"device","type":"COMBO","widget":{"name":"device"},"link":null}],"outputs":[{"localized_name":"CLIP","name":"CLIP","type":"CLIP","links":[6,8]}],"properties":{"cnr_id":"ComfyUI_ExtraModels","ver":"92f556ed4d3bec1a3f16117d2de10f195c36d68e","Node name for S&R":"OverrideCLIPDevice","ue_properties":{"widget_ue_connectable":{},"input_ue_unconnectable":{},"version":"7.4.1"}},"widgets_values":["cuda:0"],"color":"#3c4a54","bgcolor":"#283640"},{"id":8,"type":"EmptySD3LatentImage","pos":[120,-330],"size":[210,106],"flags":{},"order":6,"mode":0,"inputs":[{"localized_name":"width","name":"width","type":"INT","widget":{"name":"width"},"link":3},{"localized_name":"height","name":"height","type":"INT","widget":{"name":"height"},"link":4},{"localized_name":"batch_size","name":"batch_size","type":"INT","widget":{"name":"batch_size"},"link":null}],"outputs":[{"localized_name":"LATENT","name":"LATENT","type":"LATENT","links":[15]}],"properties":{"cnr_id":"comfy-core","ver":"0.3.48","Node name for S&R":"EmptySD3LatentImage","ue_properties":{"widget_ue_connectable":{"width":true,"height":true},"version":"7.1","input_ue_unconnectable":{}}},"widgets_values":[896,1152,1],"color":"#3c4a54","bgcolor":"#283640"},{"id":4,"type":"VAELoader","pos":[-470,-63.50812745867296],"size":[330,58],"flags":{},"order":2,"mode":0,"inputs":[{"localized_name":"vae_name","name":"vae_name","type":"COMBO","widget":{"name":"vae_name"},"link":null}],"outputs":[{"localized_name":"VAE","name":"VAE","type":"VAE","links":[18]}],"properties":{"cnr_id":"comfy-core","ver":"0.3.59","Node name for S&R":"VAELoader","ue_properties":{"widget_ue_connectable":{},"version":"7.1","input_ue_unconnectable":{}}},"widgets_values":["qwen_image_vae.safetensors"],"color":"#3c4a54","bgcolor":"#283640"},{"id":11,"type":"CLIPTextEncode","pos":[120,170],"size":[213.0927734375,93],"flags":{"collapsed":true},"order":8,"mode":0,"inputs":[{"localized_name":"clip","name":"clip","type":"CLIP","link":6},{"localized_name":"text","name":"text","type":"STRING","widget":{"name":"text"},"link":null}],"outputs":[{"localized_name":"CONDITIONING","name":"CONDITIONING","type":"CONDITIONING","links":[12]}],"properties":{"cnr_id":"comfy-core","ver":"0.3.47","Node name for S&R":"CLIPTextEncode","ue_properties":{"version":"7.0.1","widget_ue_connectable":{}},"enableTabs":false,"tabWidth":65,"tabXOffset":10,"hasSecondTab":false,"secondTabText":"Send Back","secondTabOffset":80,"secondTabWidth":65},"widgets_values":["Bad things."],"color":"#332922","bgcolor":"#593930"},{"id":12,"type":"CLIPTextEncode","pos":[120,110],"size":[210.2994140625,90],"flags":{"collapsed":true},"order":9,"mode":0,"inputs":[{"localized_name":"clip","name":"clip","type":"CLIP","link":8},{"localized_name":"text","name":"text","type":"STRING","widget":{"name":"text"},"link":7}],"outputs":[{"localized_name":"CONDITIONING","name":"CONDITIONING","type":"CONDITIONING","slot_index":0,"links":[11]}],"properties":{"cnr_id":"comfy-core","ver":"0.3.7-1-g1e21f4c","Node name for S&R":"CLIPTextEncode","ue_properties":{"version":"7.0.1","widget_ue_connectable":{"text":true}},"enableTabs":false,"tabWidth":65,"tabXOffset":10,"hasSecondTab":false,"secondTabText":"Send Back","secondTabOffset":80,"secondTabWidth":65,"widget_ue_connectable":{"text":true}},"widgets_values":[""],"color":"#232","bgcolor":"#353"},{"id":17,"type":"VAEDecode","pos":[860,-450],"size":[140,46],"flags":{"collapsed":true},"order":14,"mode":0,"inputs":[{"localized_name":"samples","name":"samples","type":"LATENT","link":16},{"localized_name":"vae","name":"vae","type":"VAE","link":18}],"outputs":[{"localized_name":"IMAGE","name":"IMAGE","type":"IMAGE","links":[19]}],"properties":{"cnr_id":"comfy-core","ver":"0.3.68","Node name for S&R":"VAEDecode","ue_properties":{"widget_ue_connectable":{},"input_ue_unconnectable":{},"version":"7.4.1"}},"widgets_values":[],"color":"#323","bgcolor":"#535"},{"id":1,"type":"UNETLoader","pos":[-470,-450],"size":[330,90],"flags":{},"order":3,"mode":0,"inputs":[{"localized_name":"unet_name","name":"unet_name","type":"COMBO","widget":{"name":"unet_name"},"link":null},{"localized_name":"weight_dtype","name":"weight_dtype","type":"COMBO","widget":{"name":"weight_dtype"},"link":null}],"outputs":[{"localized_name":"MODEL","name":"MODEL","type":"MODEL","slot_index":0,"links":[5]}],"properties":{"cnr_id":"comfy-core","ver":"0.3.48","Node name for S&R":"UNETLoader","ue_properties":{"version":"7.4.1","widget_ue_connectable":{},"input_ue_unconnectable":{}},"models":[{"name":"qwen_image_fp8_e4m3fn.safetensors","url":"https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/resolve/main/split_files/diffusion_models/qwen_image_fp8_e4m3fn.safetensors","directory":"diffusion_models"}],"enableTabs":false,"tabWidth":65,"tabXOffset":10,"hasSecondTab":false,"secondTabText":"Send Back","secondTabOffset":80,"secondTabWidth":65},"widgets_values":["Qwen\\qwenUltimateRealism_v11.safetensors","default"],"color":"#3c4a54","bgcolor":"#283640"},{"id":5,"type":"ModelSamplingAuraFlow","pos":[-110,-450],"size":[210,58],"flags":{},"order":7,"mode":0,"inputs":[{"localized_name":"model","name":"model","type":"MODEL","link":5},{"localized_name":"shift","name":"shift","type":"FLOAT","widget":{"name":"shift"},"link":null}],"outputs":[{"localized_name":"MODEL","name":"MODEL","type":"MODEL","links":[14]}],"properties":{"cnr_id":"comfy-core","ver":"0.3.48","Node name for S&R":"ModelSamplingAuraFlow","ue_properties":{"version":"7.1","widget_ue_connectable":{},"input_ue_unconnectable":{}},"enableTabs":false,"tabWidth":65,"tabXOffset":10,"hasSecondTab":false,"secondTabText":"Send Back","secondTabOffset":80,"secondTabWidth":65},"widgets_values":[3.1],"color":"#3c4a54","bgcolor":"#283640"},{"id":10,"type":"Primitive string multiline [Crystools]","pos":[-470,90],"size":[570,130],"flags":{},"order":4,"mode":0,"inputs":[{"localized_name":"string","name":"string","type":"STRING","widget":{"name":"string"},"link":null}],"outputs":[{"localized_name":"string","name":"string","type":"STRING","slot_index":0,"links":[7]}],"properties":{"cnr_id":"comfyui-crystools","ver":"1.22.0","Node name for S&R":"Primitive string multiline [Crystools]","ue_properties":{"version":"7.0.1","widget_ue_connectable":{}},"enableTabs":false,"tabWidth":65,"tabXOffset":10,"hasSecondTab":false,"secondTabText":"Send Back","secondTabOffset":80,"secondTabWidth":65,"widget_ue_connectable":{"string":true}},"widgets_values":["The wild, contorted face of a madwoman with a glassy stare. She has black hair, her mouth twisted in a cry of extreme suffering, thin, unpainted lips, and wears a silk scarf with delicate floral patterns. The background is a Renaissance painting.\nA red shade illuminates the scene. Aluminum, red velvet, wood, drops, sea, ghostly landscape, wrinkles and crevices.\n"],"color":"#232","bgcolor":"#353"},{"id":16,"type":"KSamplerAdvanced (WLSH)","pos":[700,-400],"size":[310,620],"flags":{"collapsed":false},"order":13,"mode":0,"inputs":[{"localized_name":"model","name":"model","type":"MODEL","link":14},{"localized_name":"positive","name":"positive","type":"CONDITIONING","link":13},{"localized_name":"negative","name":"negative","type":"CONDITIONING","link":12},{"localized_name":"latent_image","name":"latent_image","type":"LATENT","link":15},{"localized_name":"add_noise","name":"add_noise","type":"COMBO","widget":{"name":"add_noise"},"link":null},{"localized_name":"seed","name":"seed","type":"INT","widget":{"name":"seed"},"link":null},{"localized_name":"steps","name":"steps","type":"INT","widget":{"name":"steps"},"link":null},{"localized_name":"cfg","name":"cfg","type":"FLOAT","widget":{"name":"cfg"},"link":null},{"localized_name":"sampler_name","name":"sampler_name","type":"COMBO","widget":{"name":"sampler_name"},"link":null},{"localized_name":"scheduler","name":"scheduler","type":"COMBO","widget":{"name":"scheduler"},"link":null},{"localized_name":"start_at_step","name":"start_at_step","type":"INT","widget":{"name":"start_at_step"},"link":null},{"localized_name":"end_at_step","name":"end_at_step","type":"INT","widget":{"name":"end_at_step"},"link":null},{"localized_name":"return_with_leftover_noise","name":"return_with_leftover_noise","type":"COMBO","widget":{"name":"return_with_leftover_noise"},"link":null},{"localized_name":"denoise","name":"denoise","type":"FLOAT","widget":{"name":"denoise"},"link":null}],"outputs":[{"localized_name":"LATENT","name":"LATENT","type":"LATENT","links":[16]},{"localized_name":"INFO","name":"INFO","type":"INFO","links":null}],"properties":{"cnr_id":"wlsh_nodes","ver":"97807467bf7ff4ea01d529fcd6e666758f34e3c1","Node name for S&R":"KSamplerAdvanced (WLSH)","ue_properties":{"widget_ue_connectable":{"add_noise":true,"seed":true,"steps":true,"cfg":true,"sampler_name":true,"scheduler":true,"start_at_step":true,"end_at_step":true,"return_with_leftover_noise":true,"denoise":true},"version":"7.1","input_ue_unconnectable":{}}},"widgets_values":["enable",705003845982767,"fixed",24,1,"euler_ancestral","simple",0,10000,"enable",1],"color":"#233","bgcolor":"#355"},{"id":19,"type":"SaveImage","pos":[1030,-440],"size":[610,660],"flags":{"collapsed":false},"order":15,"mode":0,"inputs":[{"localized_name":"images","name":"images","type":"IMAGE","link":19},{"localized_name":"filename_prefix","name":"filename_prefix","type":"STRING","widget":{"name":"filename_prefix"},"link":null}],"outputs":[],"properties":{"cnr_id":"comfy-core","ver":"0.3.7-1-g1e21f4c","Node name for S&R":"SaveImage","ue_properties":{"version":"7.0.1","widget_ue_connectable":{}},"enableTabs":false,"tabWidth":65,"tabXOffset":10,"hasSecondTab":false,"secondTabText":"Send Back","secondTabOffset":80,"secondTabWidth":65,"widget_ue_connectable":{"filename_prefix":true}},"widgets_values":["%date:MM_yy%/%KSamplerAdvanced (WLSH).seed%"],"color":"#222","bgcolor":"#000"},{"id":15,"type":"GreatConditioningModifier","pos":[370,-430],"size":[280,180],"flags":{},"order":10,"mode":0,"inputs":[{"localized_name":"conditioning","name":"conditioning","type":"CONDITIONING","link":11},{"localized_name":"modification_strength","name":"modification_strength","type":"FLOAT","widget":{"name":"modification_strength"},"link":null},{"localized_name":"seed","name":"seed","type":"INT","widget":{"name":"seed"},"link":null},{"localized_name":"modification_method","name":"modification_method","type":"COMBO","widget":{"name":"modification_method"},"link":null},{"localized_name":"debug_mode","name":"debug_mode","type":"BOOLEAN","widget":{"name":"debug_mode"},"link":null}],"outputs":[{"localized_name":"CONDITIONING","name":"CONDITIONING","type":"CONDITIONING","links":[9]}],"properties":{"Node name for S&R":"GreatConditioningModifier","ue_properties":{"widget_ue_connectable":{},"input_ue_unconnectable":{},"version":"7.4.1"}},"widgets_values":[-10,2165,"increment","🔸semantic_drift🔹",false]},{"id":13,"type":"GreatConditioningModifier","pos":[370,-200],"size":[290,180],"flags":{},"order":11,"mode":0,"inputs":[{"localized_name":"conditioning","name":"conditioning","type":"CONDITIONING","link":9},{"localized_name":"modification_strength","name":"modification_strength","type":"FLOAT","widget":{"name":"modification_strength"},"link":null},{"localized_name":"seed","name":"seed","type":"INT","widget":{"name":"seed"},"link":null},{"localized_name":"modification_method","name":"modification_method","type":"COMBO","widget":{"name":"modification_method"},"link":null},{"localized_name":"debug_mode","name":"debug_mode","type":"BOOLEAN","widget":{"name":"debug_mode"},"link":null}],"outputs":[{"localized_name":"CONDITIONING","name":"CONDITIONING","type":"CONDITIONING","links":[10]}],"properties":{"Node name for S&R":"GreatConditioningModifier","ue_properties":{"widget_ue_connectable":{},"input_ue_unconnectable":{},"version":"7.4.1"}},"widgets_values":[1,1165,"increment","🔸🔸🔸token_dropout🔹🔹",false]},{"id":14,"type":"GreatConditioningModifier","pos":[370,40],"size":[290,180],"flags":{},"order":12,"mode":4,"inputs":[{"localized_name":"conditioning","name":"conditioning","type":"CONDITIONING","link":10},{"localized_name":"modification_strength","name":"modification_strength","type":"FLOAT","widget":{"name":"modification_strength"},"link":null},{"localized_name":"seed","name":"seed","type":"INT","widget":{"name":"seed"},"link":null},{"localized_name":"modification_method","name":"modification_method","type":"COMBO","widget":{"name":"modification_method"},"link":null},{"localized_name":"debug_mode","name":"debug_mode","type":"BOOLEAN","widget":{"name":"debug_mode"},"link":null}],"outputs":[{"localized_name":"CONDITIONING","name":"CONDITIONING","type":"CONDITIONING","links":[13]}],"properties":{"Node name for S&R":"GreatConditioningModifier","ue_properties":{"widget_ue_connectable":{},"input_ue_unconnectable":{},"version":"7.4.1"}},"widgets_values":[1,3199,"increment","🔸temperature_scale🔹",false]}],"links":[[1,3,0,2,0,"CLIP"],[3,7,0,8,0,"INT"],[4,7,1,8,1,"INT"],[5,1,0,5,0,"MODEL"],[6,2,0,11,0,"CLIP"],[7,10,0,12,1,"STRING"],[8,2,0,12,0,"CLIP"],[9,15,0,13,0,"CONDITIONING"],[10,13,0,14,0,"CONDITIONING"],[11,12,0,15,0,"CONDITIONING"],[12,11,0,16,2,"CONDITIONING"],[13,14,0,16,1,"CONDITIONING"],[14,5,0,16,0,"MODEL"],[15,8,0,16,3,"LATENT"],[16,16,0,17,0,"LATENT"],[18,4,0,17,1,"VAE"],[19,17,0,19,0,"IMAGE"]],"groups":[{"id":1,"title":"🍄Conditioning Modifier","bounding":[360,-520,310,750],"color":"#3f789e","font_size":24,"flags":{"pinned":true}},{"id":2,"title":"prompt","bounding":[-480,20,820,210],"color":"#3f789e","font_size":24,"flags":{}},{"id":3,"title":"model + latent","bounding":[-480,-520,820,528.091872541327],"color":"#3f789e","font_size":24,"flags":{"pinned":true}},{"id":4,"title":"rendu","bounding":[690,-520,960,750],"color":"#3f789e","font_size":24,"flags":{"pinned":true}}],"config":{},"extra":{"ue_links":[],"ds":{"scale":0.9646149645000164,"offset":[59.572819317322846,505.0402603127604]},"links_added_by_ue":[]},"version":0.4}
--------------------------------------------------------------------------------
/NDDG_Great_Nodes.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | import torch.nn as nn
4 | import comfy.sd
5 | import comfy.utils
6 | import comfy.samplers
7 | import comfy.model_management
8 | from nodes import common_ksampler
9 | import latent_preview
10 | import comfy.sample
11 | from typing import List, Dict, Tuple, Any
12 | import numpy as np
13 | from PIL import Image, ImageDraw, ImageFilter, ImageChops
14 | import io
15 | import hashlib
16 | import json
17 | import colorsys
18 | import random
19 | import math
20 | import matplotlib
21 | import matplotlib.pyplot as plt
22 | import base64
23 | from io import BytesIO
24 | import folder_paths
25 |
26 |
27 | # ----------------------------------------------------------------------------------#
28 | # ----------------------------------------------------------------------------------#
29 |
30 | class GreatConditioningModifier:
31 | """
32 | Node pour modifier les conditionnements Qwen-Image, Flux et Flux2
33 | Version avec support automatique des types de tenseurs
34 | """
35 |
36 | @classmethod
37 | def INPUT_TYPES(cls):
38 | return {
39 | "required": {
40 | "conditioning": ("CONDITIONING",),
41 | "modification_strength": ("FLOAT", {
42 | "default": 0,
43 | "min": -10.0,
44 | "max": 10.0,
45 | "step": 0.05
46 | }),
47 | "seed": ("INT", {
48 | "default": 0,
49 | "min": 0,
50 | "max": 0xffffffffffffffff
51 | }),
52 | "modification_method": ([
53 | "🔸semantic_drift🔹",
54 | "🔸🔸🔸token_dropout🔹🔹",
55 | "🔸🔸🔸gradient_amplify🔹🔹",
56 | "🔸🔸🔸guided_noise🔹🔹🔹",
57 | "🔸quantize🔹🔹🔹🔹",
58 | "🔸🔸🔸perlin_noise🔹🔹🔹🔹",
59 | "🔸🔸🔸fourier_filter🔹🔹🔹🔹",
60 | "🔸style_shift🔹",
61 | "🔸temperature_scale🔹",
62 | "🔸embedding_mix🔹",
63 | "🔸svd_filter🔹",
64 | "🔸spherical_rotation🔹",
65 | "🔸principal_component🔹",
66 | "🔸block_shuffle🔹",
67 | ], {
68 | "default": "🔸semantic_drift🔹"
69 | }),
70 | "debug_mode": ("BOOLEAN", {
71 | "default": False
72 | })
73 | }
74 | }
75 |
76 | RETURN_TYPES = ("CONDITIONING",)
77 | FUNCTION = "modify"
78 | CATEGORY = "🍄NDDG/conditioning"
79 |
80 | def __init__(self):
81 | self.device = comfy.model_management.get_torch_device()
82 |
83 | def _is_modifiable_tensor(self, tensor, debug=False):
84 | """
85 | Détermine si un tenseur peut être modifié
86 | Retourne (can_modify, reason)
87 | """
88 | # Vérifier le dtype
89 | if not tensor.dtype.is_floating_point:
90 | if debug:
91 | print(f" ⚠️ Skipping tensor: dtype={tensor.dtype} (not float)")
92 | return False, f"dtype {tensor.dtype} not supported"
93 |
94 | # Vérifier la taille minimale
95 | if tensor.numel() < 2:
96 | if debug:
97 | print(f" ⚠️ Skipping tensor: numel={tensor.numel()} (too small)")
98 | return False, "tensor too small"
99 |
100 | # Vérifier les valeurs
101 | if not torch.isfinite(tensor).all():
102 | if debug:
103 | print(f" ⚠️ Skipping tensor: contains inf/nan")
104 | return False, "contains inf/nan"
105 |
106 | return True, "OK"
107 |
108 | def _safe_convert_to_float(self, tensor, debug=False):
109 | """
110 | Convertit un tenseur en float de manière sécurisée
111 | Retourne (converted_tensor, success)
112 | """
113 | try:
114 | # Si déjà en float, retourner tel quel
115 | if tensor.dtype.is_floating_point:
116 | return tensor, True
117 |
118 | # Tentative de conversion
119 | if tensor.dtype in [torch.int64, torch.int32, torch.int16, torch.int8]:
120 | if debug:
121 | print(f" 🔄 Converting {tensor.dtype} to float32")
122 | # Normaliser les entiers (supposés être des IDs de tokens)
123 | # Ne pas modifier les IDs directement, juste les ignorer
124 | return tensor, False
125 |
126 | if tensor.dtype == torch.bool:
127 | if debug:
128 | print(f" 🔄 Boolean tensor detected, skipping")
129 | return tensor, False
130 |
131 | # Autres types : essayer float32
132 | converted = tensor.to(torch.float32)
133 | return converted, True
134 |
135 | except Exception as e:
136 | if debug:
137 | print(f" ❌ Conversion failed: {e}")
138 | return tensor, False
139 |
140 | def _apply_modification(self, tensor, method, strength, seed, debug=False):
141 | """Applique des modifications numériques avancées avec gestion des types"""
142 |
143 | # NOUVELLE VÉRIFICATION: Type et compatibilité
144 | can_modify, reason = self._is_modifiable_tensor(tensor, debug)
145 | if not can_modify:
146 | if debug:
147 | print(f" ⏭️ Tensor non modifiable: {reason}")
148 | return tensor
149 |
150 | torch.manual_seed(seed)
151 | if tensor.is_cuda:
152 | torch.cuda.manual_seed(seed)
153 |
154 | modified = tensor.clone()
155 |
156 | # NOUVELLE SÉCURITÉ: Calcul robuste des statistiques
157 | try:
158 | original_std = tensor.std()
159 | original_mean = tensor.mean()
160 |
161 | # Vérifier que les statistiques sont valides
162 | if not torch.isfinite(original_std) or not torch.isfinite(original_mean):
163 | if debug:
164 | print(f" ⚠️ Invalid statistics, skipping modification")
165 | return tensor
166 |
167 | # Éviter division par zéro
168 | if original_std < 1e-8:
169 | if debug:
170 | print(f" ⚠️ Std too small ({original_std:.2e}), adding epsilon")
171 | original_std = torch.tensor(1e-6, device=tensor.device)
172 |
173 | except Exception as e:
174 | if debug:
175 | print(f" ❌ Statistics calculation failed: {e}")
176 | return tensor
177 |
178 | abs_strength = abs(strength)
179 | is_negative = strength < 0
180 |
181 | if debug:
182 | sign = "négatif" if is_negative else "positif"
183 | print(f"\n🔧 Modification: {method} (strength={strength:.2f}, {sign})")
184 | print(f" Shape: {tensor.shape}, dtype: {tensor.dtype}")
185 | print(f" Avant - Mean: {original_mean.item():.4f}, Std: {original_std.item():.4f}")
186 |
187 | # ========== MÉTHODES AVEC PROTECTION ==========
188 |
189 | try:
190 | if method == "🔸🔸🔸guided_noise🔹🔹🔹":
191 | noise = torch.randn_like(modified)
192 | noise = noise * original_std * abs_strength
193 | modified = modified + noise if not is_negative else modified - noise
194 |
195 | elif method == "🔸style_shift🔹":
196 | direction = torch.randn(1, 1, modified.shape[-1], device=modified.device)
197 | direction = direction / (direction.norm() + 1e-8) * original_std
198 | shift = direction * abs_strength * 10.0
199 | modified = modified + shift if not is_negative else modified - shift
200 |
201 | elif method == "🔸semantic_drift🔹":
202 | noise = torch.randn_like(modified) * original_std * 0.5
203 | alpha = min(abs_strength * 0.7, 1.0)
204 | if is_negative:
205 | modified = modified * (1 + alpha * 0.3) - noise * alpha
206 | else:
207 | modified = modified * (1 - alpha) + (modified + noise) * alpha
208 |
209 | elif method == "🔸temperature_scale🔹":
210 | if strength < 0:
211 | temperature = max(0.01, 0.1 + (1.0 + strength / 10.0) * 0.9)
212 | elif strength == 0:
213 | temperature = 1.0
214 | else:
215 | temperature = 1.0 + (min(strength, 10.0) / 10.0) * 3.0
216 |
217 | modified = (modified - original_mean) * temperature + original_mean
218 |
219 | if temperature > 1.5:
220 | extra_noise = torch.randn_like(modified) * original_std * (temperature - 1.0) * 0.15
221 | modified = modified + extra_noise
222 |
223 | if debug:
224 | print(f" Temperature: {temperature:.3f}")
225 |
226 | elif method == "🔸🔸🔸token_dropout🔹🔹":
227 | if len(modified.shape) >= 2:
228 | dropout_rate = min(abs_strength * 0.5, 0.95)
229 | if is_negative:
230 | mask = torch.rand(modified.shape[0], modified.shape[1], 1, device=modified.device) < dropout_rate
231 | else:
232 | mask = torch.rand(modified.shape[0], modified.shape[1], 1, device=modified.device) > dropout_rate
233 | modified = modified * mask
234 |
235 | elif method == "🔸embedding_mix🔹":
236 | perm_indices = torch.randperm(modified.shape[1])
237 | permuted = modified[:, perm_indices, :]
238 | alpha = min(abs_strength * 0.6, 1.0)
239 | if is_negative:
240 | modified = modified * (1 + alpha * 0.5) - permuted * alpha * 0.5
241 | else:
242 | modified = modified * (1 - alpha) + permuted * alpha
243 |
244 | elif method == "🔸svd_filter🔹":
245 | if len(modified.shape) == 3:
246 | batch, seq, embed = modified.shape
247 | reshaped = modified.reshape(batch * seq, embed)
248 |
249 | U, S, Vh = torch.linalg.svd(reshaped, full_matrices=False)
250 |
251 | if is_negative:
252 | S_modified = S * (1.0 - abs_strength * 0.5)
253 | else:
254 | filter_curve = torch.exp(-torch.linspace(0, 3, len(S), device=S.device) * abs_strength)
255 | S_modified = S * (1.0 + filter_curve)
256 |
257 | reconstructed = U @ torch.diag_embed(S_modified) @ Vh
258 | modified = reconstructed.reshape(batch, seq, embed)
259 |
260 | if debug:
261 | print(f" SVD: {len(S)} composantes, top-3: {S[:3].tolist()}")
262 |
263 | elif method == "🔸🔸🔸perlin_noise🔹🔹🔹🔹":
264 | if len(modified.shape) == 3:
265 | batch, seq, embed = modified.shape
266 | freq = max(1, int(5 * (1.0 - abs_strength * 0.5)))
267 |
268 | control_points = torch.randn(batch, max(2, seq // freq), embed, device=modified.device)
269 |
270 | indices = torch.linspace(0, control_points.shape[1] - 1, seq, device=modified.device)
271 | idx_floor = indices.long().clamp(0, control_points.shape[1] - 2)
272 | idx_ceil = (idx_floor + 1).clamp(0, control_points.shape[1] - 1)
273 | weight = (indices - idx_floor.float()).unsqueeze(0).unsqueeze(-1)
274 |
275 | perlin = (control_points[:, idx_floor] * (1 - weight) +
276 | control_points[:, idx_ceil] * weight)
277 |
278 | perlin = perlin * original_std * abs_strength
279 | modified = modified + perlin if not is_negative else modified - perlin
280 |
281 | if debug:
282 | print(f" Perlin: freq={freq}, points={control_points.shape[1]}")
283 |
284 | elif method == "🔸spherical_rotation🔹":
285 | if len(modified.shape) == 3:
286 | norms = modified.norm(dim=-1, keepdim=True) + 1e-8
287 | normalized = modified / norms
288 |
289 | num_rotations = min(modified.shape[-1] // 2, int(abs_strength * 100))
290 |
291 | for _ in range(num_rotations):
292 | dim1 = torch.randint(0, modified.shape[-1], (1,)).item()
293 | dim2 = torch.randint(0, modified.shape[-1], (1,)).item()
294 | if dim1 == dim2:
295 | continue
296 |
297 | angle = abs_strength * 0.1 if not is_negative else -abs_strength * 0.1
298 | cos_a, sin_a = torch.cos(torch.tensor(angle)), torch.sin(torch.tensor(angle))
299 |
300 | x = normalized[:, :, dim1].clone()
301 | y = normalized[:, :, dim2].clone()
302 | normalized[:, :, dim1] = x * cos_a - y * sin_a
303 | normalized[:, :, dim2] = x * sin_a + y * cos_a
304 |
305 | modified = normalized * norms
306 |
307 | if debug:
308 | print(f" Rotations: {num_rotations} plans")
309 |
310 | elif method == "🔸🔸🔸fourier_filter🔹🔹🔹🔹":
311 | if len(modified.shape) == 3:
312 | fft = torch.fft.fft(modified, dim=1)
313 | freqs = torch.fft.fftfreq(modified.shape[1], device=modified.device)
314 |
315 | if is_negative:
316 | cutoff = 1.0 - abs_strength * 0.8
317 | filter_mask = (freqs.abs() < cutoff).float().unsqueeze(0).unsqueeze(-1)
318 | else:
319 | cutoff = abs_strength * 0.5
320 | filter_mask = (freqs.abs() > cutoff).float().unsqueeze(0).unsqueeze(-1)
321 |
322 | fft_filtered = fft * filter_mask
323 | modified = torch.fft.ifft(fft_filtered, dim=1).real
324 |
325 | if debug:
326 | print(f" Fourier: cutoff={cutoff:.3f}, {'low-pass' if is_negative else 'high-pass'}")
327 |
328 | elif method == "🔸principal_component🔹":
329 | if len(modified.shape) == 3:
330 | batch, seq, embed = modified.shape
331 | centered = modified - modified.mean(dim=1, keepdim=True)
332 | cov = (centered.transpose(1, 2) @ centered) / seq
333 |
334 | eigenvalues, eigenvectors = torch.linalg.eigh(cov)
335 | projected = centered @ eigenvectors
336 |
337 | if is_negative:
338 | scale = 1.0 - abs_strength * 0.5
339 | projected = projected * scale
340 | else:
341 | weights = torch.linspace(1.0 + abs_strength, 1.0, embed, device=modified.device)
342 | projected = projected * weights.unsqueeze(0).unsqueeze(1)
343 |
344 | modified = projected @ eigenvectors.transpose(1, 2) + modified.mean(dim=1, keepdim=True)
345 |
346 | if debug:
347 | print(f" PCA: top eigenvalue={eigenvalues[0, -1].item():.4f}")
348 |
349 | elif method == "🔸block_shuffle🔹":
350 | if len(modified.shape) == 3:
351 | batch, seq, embed = modified.shape
352 | block_size = max(1, int(seq * (1.0 - abs_strength * 0.5)))
353 |
354 | num_blocks = seq // block_size
355 | if num_blocks > 1:
356 | blocks = modified[:, :num_blocks * block_size].reshape(batch, num_blocks, block_size, embed)
357 | perm = torch.randperm(num_blocks)
358 | shuffled_blocks = blocks[:, perm]
359 | modified[:, :num_blocks * block_size] = shuffled_blocks.reshape(batch, num_blocks * block_size, embed)
360 |
361 | if debug:
362 | print(f" Block shuffle: {num_blocks} blocks de {block_size}")
363 |
364 | elif method == "🔸quantize🔹🔹🔹🔹":
365 | if is_negative:
366 | dither = torch.randn_like(modified) * original_std * abs_strength * 0.1
367 | modified = modified + dither
368 | else:
369 | num_levels = max(2, int(256 * (1.0 - abs_strength * 0.9)))
370 | min_val = modified.min()
371 | max_val = modified.max()
372 | normalized = (modified - min_val) / (max_val - min_val + 1e-8)
373 | quantized = torch.round(normalized * (num_levels - 1)) / (num_levels - 1)
374 | modified = quantized * (max_val - min_val) + min_val
375 |
376 | if debug:
377 | print(f" Quantize: {num_levels} niveaux")
378 |
379 | elif method == "🔸🔸🔸gradient_amplify🔹🔹":
380 | if len(modified.shape) == 3:
381 | diff = modified[:, 1:] - modified[:, :-1]
382 |
383 | if is_negative:
384 | diff = diff * (1.0 - abs_strength * 0.5)
385 | else:
386 | diff = diff * (1.0 + abs_strength * 2.0)
387 |
388 | modified[:, 1:] = modified[:, :1] + torch.cumsum(diff, dim=1)
389 |
390 | if debug:
391 | grad_strength = diff.abs().mean().item()
392 | print(f" Gradient strength: {grad_strength:.4f}")
393 |
394 | # Vérification finale
395 | if not torch.isfinite(modified).all():
396 | if debug:
397 | print(f" ⚠️ Modification produced inf/nan, reverting")
398 | return tensor
399 |
400 | except Exception as e:
401 | if debug:
402 | print(f" ❌ Modification failed: {e}")
403 | return tensor
404 |
405 | # ========== DEBUG OUTPUT ==========
406 |
407 | if debug:
408 | new_std = modified.std()
409 | new_mean = modified.mean()
410 | print(f" Après - Mean: {new_mean.item():.4f}, Std: {new_std.item():.4f}")
411 | diff = (modified - tensor).abs().max().item()
412 | print(f" Max diff: {diff:.4f}")
413 | if original_std.item() > 0:
414 | print(f" Relative change: {(diff / original_std.item()):.2%}")
415 |
416 | return modified
417 |
418 | def modify(self, conditioning, modification_strength, seed, modification_method, debug_mode):
419 | """Fonction principale de modification"""
420 |
421 | if debug_mode:
422 | print("\n" + "="*80)
423 | print("🔍 GREAT CONDITIONING MODIFIER (Flux2 Compatible)")
424 | print("="*80)
425 | print(f"Strength: {modification_strength:.2f}, Method: {modification_method}")
426 |
427 | if modification_strength == 0:
428 | if debug_mode:
429 | print("Strength = 0, pas de modification")
430 | return (conditioning,)
431 |
432 | new_conditioning = []
433 | tensor_count = 0
434 | modified_count = 0
435 | skipped_count = 0
436 |
437 | for idx, item in enumerate(conditioning):
438 | if isinstance(item, torch.Tensor):
439 | tensor_count += 1
440 | can_modify, _ = self._is_modifiable_tensor(item, debug_mode)
441 |
442 | if can_modify:
443 | tensor = item.to(self.device)
444 | modified_tensor = self._apply_modification(
445 | tensor, modification_method, modification_strength, seed, debug_mode
446 | )
447 | new_conditioning.append(modified_tensor)
448 | modified_count += 1
449 | else:
450 | new_conditioning.append(item)
451 | skipped_count += 1
452 |
453 | elif isinstance(item, (list, tuple)):
454 | new_items = []
455 | for sub_idx, sub_item in enumerate(item):
456 | if isinstance(sub_item, torch.Tensor):
457 | tensor_count += 1
458 | can_modify, _ = self._is_modifiable_tensor(sub_item, debug_mode)
459 |
460 | if can_modify:
461 | if debug_mode:
462 | print(f" ✓ Modifying tensor in position [{idx}][{sub_idx}]")
463 | tensor = sub_item.to(self.device)
464 | modified_tensor = self._apply_modification(
465 | tensor, modification_method, modification_strength, seed, debug_mode
466 | )
467 | new_items.append(modified_tensor)
468 | modified_count += 1
469 | else:
470 | new_items.append(sub_item)
471 | skipped_count += 1
472 |
473 | elif isinstance(sub_item, dict):
474 | new_dict = {}
475 | for key, value in sub_item.items():
476 | if isinstance(value, torch.Tensor):
477 | tensor_count += 1
478 | can_modify, _ = self._is_modifiable_tensor(value, debug_mode)
479 |
480 | if can_modify:
481 | if debug_mode:
482 | print(f" ✓ Modifying tensor in dict['{key}']")
483 | tensor = value.to(self.device)
484 | modified_tensor = self._apply_modification(
485 | tensor, modification_method, modification_strength, seed, debug_mode
486 | )
487 | new_dict[key] = modified_tensor
488 | modified_count += 1
489 | else:
490 | new_dict[key] = value
491 | skipped_count += 1
492 | else:
493 | new_dict[key] = value
494 | new_items.append(new_dict)
495 | else:
496 | new_items.append(sub_item)
497 |
498 | if isinstance(item, tuple):
499 | new_conditioning.append(tuple(new_items))
500 | else:
501 | new_conditioning.append(new_items)
502 |
503 | elif isinstance(item, dict):
504 | new_dict = {}
505 | for key, value in item.items():
506 | if isinstance(value, torch.Tensor):
507 | tensor_count += 1
508 | can_modify, _ = self._is_modifiable_tensor(value, debug_mode)
509 |
510 | if can_modify:
511 | if debug_mode:
512 | print(f" ✓ Modifying tensor in dict['{key}']")
513 | tensor = value.to(self.device)
514 | modified_tensor = self._apply_modification(
515 | tensor, modification_method, modification_strength, seed, debug_mode
516 | )
517 | new_dict[key] = modified_tensor
518 | modified_count += 1
519 | else:
520 | new_dict[key] = value
521 | skipped_count += 1
522 | else:
523 | new_dict[key] = value
524 | new_conditioning.append(new_dict)
525 | else:
526 | new_conditioning.append(item)
527 |
528 | if debug_mode:
529 | print(f"\n📊 Résumé:")
530 | print(f" Total tensors: {tensor_count}")
531 | print(f" Modified: {modified_count}")
532 | print(f" Skipped: {skipped_count}")
533 | print("="*80 + "\n")
534 |
535 | return (new_conditioning,)
536 |
537 | # ----------------------------------------------------------------------------------#
538 | # ----------------------------------------------------------------------------------#
539 |
540 | def smooth_mask(mask, smoothness):
541 | # smoothness >= 0.1 : plus grand -> plus doux
542 | eps = 1e-8
543 | s = max(0.1, float(smoothness))
544 | return 1.0 - (1.0 - mask) ** (1.0 / (s + eps))
545 |
546 | def hex_to_rgb(hexstr):
547 | return (int(hexstr[1:3], 16), int(hexstr[3:5], 16), int(hexstr[5:7], 16))
548 |
549 | def interpolate_color_2d_rgb(stops, x, y):
550 | total_weight = 0.0
551 | r_total = g_total = b_total = 0.0
552 | for stop in stops:
553 | sx, sy = stop["x"], stop["y"]
554 | r, g, b = hex_to_rgb(stop["color"])
555 | dist_sq = (x - sx) ** 2 + (y - sy) ** 2
556 | if dist_sq == 0:
557 | return (r, g, b)
558 | weight = 1.0 / (dist_sq + 1e-6)
559 | r_total += r * weight
560 | g_total += g * weight
561 | b_total += b * weight
562 | total_weight += weight
563 | return (int(r_total / total_weight), int(g_total / total_weight), int(b_total / total_weight))
564 |
565 | def interpolate_color_2d_hsv(stops, x, y):
566 | total_weight = 0.0
567 | sum_cos = sum_sin = s_total = v_total = 0.0
568 | for stop in stops:
569 | sx, sy = stop["x"], stop["y"]
570 | r, g, b = hex_to_rgb(stop["color"])
571 | rf, gf, bf = r / 255.0, g / 255.0, b / 255.0
572 | h, s, v = colorsys.rgb_to_hsv(rf, gf, bf)
573 | dist_sq = (x - sx) ** 2 + (y - sy) ** 2
574 | if dist_sq == 0:
575 | return (r, g, b)
576 | weight = 1.0 / (dist_sq + 1e-6)
577 | sum_cos += np.cos(2.0 * np.pi * h) * weight
578 | sum_sin += np.sin(2.0 * np.pi * h) * weight
579 | s_total += s * weight
580 | v_total += v * weight
581 | total_weight += weight
582 | if total_weight == 0:
583 | return (0, 0, 0)
584 | avg_cos = sum_cos / total_weight
585 | avg_sin = sum_sin / total_weight
586 | avg_h = (np.arctan2(avg_sin, avg_cos) / (2.0 * np.pi)) % 1.0
587 | avg_s = s_total / total_weight
588 | avg_v = v_total / total_weight
589 | rf, gf, bf = colorsys.hsv_to_rgb(avg_h, avg_s, avg_v)
590 | return (int(rf * 255), int(gf * 255), int(bf * 255))
591 |
592 |
593 | class InteractiveOrganicGradientNode:
594 | @classmethod
595 | def INPUT_TYPES(cls):
596 | return {
597 | "required": {
598 | "width": ("INT", {"default": 512, "min": 64, "max": 2048}),
599 | "height": ("INT", {"default": 512, "min": 64, "max": 2048}),
600 | "blob_shape": (["circle", "radial", "donut", "rectangle", "horizontal_stripe", "vertical_stripe", "diamond", "triangle", "star", "blob_random", "spore"],),
601 | "blur_strength": ("FLOAT", {"default": 0.4, "min": 0.0, "max": 1.0, "step": 0.05}),
602 | "blob_size": ("FLOAT", {"default": 0.25, "min": 0.01, "max": 1.0, "step": 0.01}),
603 | "blob_opacity": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
604 | "radial_smoothness": ("FLOAT", {"default": 1.5, "min": 0.1, "max": 10.0, "step": 0.1, "display": "slider"}),
605 | "gradient_data": (
606 | "STRING",
607 | {
608 | "default": '[{"x":0.2,"y":0.5,"color":"#ff3300"},{"x":0.8,"y":0.5,"color":"#00ffe1"}]',
609 | "multiline": False,
610 | },
611 | ),
612 | }
613 | }
614 |
615 | RETURN_TYPES = ("IMAGE", "IMAGE", "STRING")
616 | RETURN_NAMES = ("image", "palette_image", "palette_hex")
617 | FUNCTION = "generate"
618 | CATEGORY = "🍄NDDG/Interactive"
619 |
620 | # ------------------------------------------------ #
621 |
622 | def generate(self, width, height, blob_shape, blur_strength, gradient_data, blob_size, blob_opacity, radial_smoothness):
623 |
624 | # Lecture du JSON du gradient
625 | try:
626 | gradient_points = json.loads(gradient_data)
627 | except Exception:
628 | gradient_points = [{"x": 0.1, "y": 0.5, "color": "#ff0000"}, {"x": 0.9, "y": 0.5, "color": "#0000ff"}]
629 |
630 | img = Image.new("RGBA", (width, height), (0, 0, 0, 0))
631 |
632 | def interpolate_color_2d(stops, x, y, smoothness):
633 | total_weight = 0
634 | r_total = g_total = b_total = 0
635 | for stop in stops:
636 | sx, sy = stop["x"], stop["y"]
637 | color = stop["color"]
638 | r = int(color[1:3], 16)
639 | g = int(color[3:5], 16)
640 | b = int(color[5:7], 16)
641 | dist_sq = (x - sx) ** 2 + (y - sy) ** 2
642 | weight = 1.0 / ((dist_sq + 0.001) ** smoothness)
643 | r_total += r * weight
644 | g_total += g * weight
645 | b_total += b * weight
646 | total_weight += weight
647 | if total_weight == 0:
648 | return (0, 0, 0)
649 | return (
650 | int(r_total / total_weight),
651 | int(g_total / total_weight),
652 | int(b_total / total_weight),
653 | )
654 |
655 | # --- Définition de base du gradient ---
656 | def linear_gradient(x):
657 | stops = sorted(gradient_data, key=lambda s: s["x"])
658 | for i in range(len(stops) - 1):
659 | if stops[i]["x"] <= x <= stops[i + 1]["x"]:
660 | t = (x - stops[i]["x"]) / (stops[i + 1]["x"] - stops[i]["x"])
661 | c1 = tuple(int(stops[i]["color"][j:j + 2], 16) for j in (1, 3, 5))
662 | c2 = tuple(int(stops[i + 1]["color"][j:j + 2], 16) for j in (1, 3, 5))
663 | return interpolate_color(c1, c2, t)
664 | c = stops[-1]["color"]
665 | return [int(c[j:j + 2], 16) for j in (1, 3, 5)]
666 |
667 | for stop in gradient_points:
668 | sx, sy = stop["x"], stop["y"]
669 | x, y = int(sx * width), int(sy * height)
670 | rel_x, rel_y = x / width, y / height
671 | color = interpolate_color_2d(gradient_points, rel_x, rel_y, radial_smoothness)
672 |
673 | temp_img = Image.new("RGBA", (width, height), (0, 0, 0, 0))
674 | draw = ImageDraw.Draw(temp_img, "RGBA")
675 |
676 | w, h = int(blob_size * width), int(blob_size * height)
677 | shape_color = (*color, 255)
678 |
679 | # ==== SHAPES =====================================================
680 |
681 | if blob_shape == "circle":
682 | draw.ellipse((x - w, y - h, x + w, y + h), fill=shape_color)
683 |
684 | # -------------------------------------------------------------
685 | # 🔸 Forme : RADIAL
686 | # -------------------------------------------------------------
687 | elif blob_shape == "radial":
688 | radius = int(blob_size * min(width, height))
689 | size = radius * 2
690 |
691 | yy, xx = np.mgrid[-radius:radius, -radius:radius]
692 | dist = np.sqrt(xx**2 + yy**2) / float(radius)
693 | dist = np.clip(dist, 0.0, 1.0)
694 |
695 | # masque brut
696 | mask = 1.0 - dist
697 |
698 | # application du lissage correct
699 | mask = smooth_mask(mask, radial_smoothness)
700 |
701 | alpha = (mask * 255.0).astype(np.uint8)
702 | grad = Image.fromarray(alpha, mode="L")
703 |
704 | blob = Image.new("RGBA", (size, size), (*color, 255))
705 | blob.putalpha(grad)
706 |
707 | # Ne pas coller ici, mais créer temp_img pour que blur et opacity s'appliquent
708 | temp_img = Image.new("RGBA", (width, height), (0, 0, 0, 0))
709 | temp_img.paste(blob, (x - radius, y - radius), blob)
710 |
711 | # -------------------------------------------------------------
712 | # 🔹 Forme : DONUT (trou au centre, sensible à blur_strength et blob_opacity)
713 | # -------------------------------------------------------------
714 | elif blob_shape == "donut":
715 | radius = int(blob_size * min(width, height))
716 | size = radius * 2
717 |
718 | # Créer une image temporaire pour le donut
719 | temp_img = Image.new("RGBA", (width, height), (0, 0, 0, 0))
720 | draw = ImageDraw.Draw(temp_img, "RGBA")
721 |
722 | # Dessiner un cercle extérieur opaque
723 | draw.ellipse((x - radius, y - radius, x + radius, y + radius), fill=(*color, 255))
724 |
725 | # Dessiner un cercle intérieur transparent (pour faire le trou)
726 | inner_radius = radius * 0.5 # 50% de la taille du cercle extérieur
727 | draw.ellipse((x - inner_radius, y - inner_radius, x + inner_radius, y + inner_radius), fill=(0, 0, 0, 0))
728 |
729 | # Ne pas coller ici, mais laisser le code général s'occuper de blur et opacity
730 |
731 | # -------------------------------------------------------------
732 | elif blob_shape == "rectangle":
733 | draw.rectangle((x - w, y - h, x + w, y + h), fill=shape_color)
734 | # -------------------------------------------------------------
735 | elif blob_shape == "horizontal_stripe":
736 | # Bande horizontale sur toute la largeur du canvas
737 | band_height = int(blob_size * height)
738 | y0 = max(0, y - band_height // 2)
739 | y1 = min(height, y + band_height // 2)
740 | draw.rectangle((0, y0, width, y1), fill=shape_color)
741 | # -------------------------------------------------------------
742 | elif blob_shape == "vertical_stripe":
743 | # Bande verticale sur toute la hauteur du canvas
744 | band_width = int(blob_size * width)
745 | x0 = max(0, x - band_width // 2)
746 | x1 = min(width, x + band_width // 2)
747 | draw.rectangle((x0, 0, x1, height), fill=shape_color)
748 | # -------------------------------------------------------------
749 | elif blob_shape == "diamond":
750 | points = [(x, y - h), (x + w, y), (x, y + h), (x - w, y)]
751 | draw.polygon(points, fill=shape_color)
752 | # -------------------------------------------------------------
753 | elif blob_shape == "triangle":
754 | points = [(x, y - h), (x + w, y + h), (x - w, y + h)]
755 | draw.polygon(points, fill=shape_color)
756 | # -------------------------------------------------------------
757 | elif blob_shape == "star":
758 | points = []
759 | spikes = 5
760 | outer_r = w
761 | inner_r = w * 0.4
762 | for i in range(spikes * 2):
763 | angle = math.pi / spikes * i
764 | r = outer_r if i % 2 == 0 else inner_r
765 | px = x + math.cos(angle) * r
766 | py = y + math.sin(angle) * r
767 | points.append((px, py))
768 | draw.polygon(points, fill=shape_color)
769 | # -------------------------------------------------------------
770 | elif blob_shape == "blob_random":
771 | points = []
772 | num_points = random.randint(6, 12)
773 | for i in range(num_points):
774 | angle = 2 * math.pi * i / num_points
775 | r = w * (0.7 + 0.3 * random.random())
776 | px = x + math.cos(angle) * r
777 | py = y + math.sin(angle) * r
778 | points.append((px, py))
779 | draw.polygon(points, fill=shape_color)
780 | # -------------------------------------------------------------
781 | elif blob_shape == "spore":
782 | radius = int(blob_size * min(width, height))
783 | temp_img = Image.new("RGBA", (width, height), (0, 0, 0, 0))
784 | draw = ImageDraw.Draw(temp_img, "RGBA")
785 | cx, cy = x, y
786 | points = []
787 | num_points = 120
788 | noise_strength = 0.25
789 | for i in range(num_points):
790 | angle = 2 * math.pi * i / num_points
791 | noise = (random.random() - 0.5) * 2 * noise_strength
792 | r = radius * (1 + noise)
793 | px = cx + r * math.cos(angle)
794 | py = cy + r * math.sin(angle)
795 | points.append((px, py))
796 | draw.polygon(points, fill=shape_color)
797 |
798 | spore_array = np.array(temp_img)
799 | alpha_layer = spore_array[..., 3].astype(np.float32) / 255.0
800 | y_indices, x_indices = np.indices((height, width))
801 | dist = np.sqrt((x_indices - cx) ** 2 + (y_indices - cy) ** 2)
802 | dist /= radius
803 | fade = np.clip(1.0 - dist ** 1.5, 0, 1)
804 | alpha_layer *= fade
805 | spore_array[..., 3] = (alpha_layer * 255).astype(np.uint8)
806 | temp_img = Image.fromarray(spore_array, "RGBA")
807 |
808 | # ================================================================
809 |
810 | if blur_strength > 0:
811 | blur_val = int((width + height) / 2 * blur_strength / 6)
812 | if blur_val > 0:
813 | temp_img = temp_img.filter(ImageFilter.GaussianBlur(radius=blur_val))
814 |
815 | # Appliquer l’opacité
816 | temp_np = np.array(temp_img)
817 | temp_np[..., 3] = (temp_np[..., 3].astype(np.float32) * blob_opacity).astype(np.uint8)
818 | temp_img = Image.fromarray(temp_np, "RGBA")
819 |
820 | # Fusion dans l’image principale
821 | base_np = np.array(img).astype(np.float32)
822 | overlay_np = np.array(temp_img).astype(np.float32)
823 |
824 | alpha_overlay = overlay_np[..., 3:] / 255.0
825 | alpha_base = base_np[..., 3:] / 255.0
826 |
827 | # Calculer la couleur résultante
828 | out_rgb = overlay_np[..., :3] * alpha_overlay + base_np[..., :3] * (1 - alpha_overlay)
829 |
830 | # Calculer l'alpha résultant
831 | out_alpha = alpha_overlay + alpha_base * (1 - alpha_overlay)
832 |
833 | # Remettre l'alpha entre 0 et 1
834 | out_alpha = np.clip(out_alpha, 0, 1)
835 |
836 | # Combiner couleur et alpha
837 | out = np.dstack([out_rgb, out_alpha * 255]).astype(np.uint8)
838 |
839 | # Créer l'image résultante
840 | img = Image.fromarray(out, mode="RGBA")
841 |
842 | # Tensor final
843 | img_np = np.array(img).astype(np.float32) / 255.0
844 | img_tensor = torch.from_numpy(img_np)[None,]
845 |
846 | # Palette
847 | colors = [tuple(int(p["color"][i:i + 2], 16) for i in (1, 3, 5)) for p in gradient_points]
848 | palette_img = Image.new("RGB", (len(colors) * 20, 20))
849 | draw_pal = ImageDraw.Draw(palette_img)
850 | for i, color in enumerate(colors):
851 | draw_pal.rectangle([i * 20, 0, (i + 1) * 20, 20], fill=color)
852 | palette_tensor = torch.from_numpy(np.array(palette_img).astype(np.float32) / 255.0)[None,]
853 | palette_hex = ", ".join([p["color"] for p in gradient_points])
854 |
855 | return (img_tensor, palette_tensor, palette_hex)
856 |
857 | # ----------------------------------------------------------------------------------#
858 | # ----------------------------------------------------------------------------------#
859 |
860 | class ImageBlendNode:
861 | @classmethod
862 | def INPUT_TYPES(cls):
863 | return {
864 | "required": {
865 | "image_a": ("IMAGE",),
866 | "image_b": ("IMAGE",),
867 | "mode": ([
868 | "normal", "multiply", "screen", "overlay",
869 | "add", "subtract", "difference", "lighten", "darken"
870 | ], {"default": "normal"}),
871 | "opacity": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
872 | }
873 | }
874 |
875 | RETURN_TYPES = ("IMAGE",)
876 | FUNCTION = "blend"
877 | CATEGORY = "🍄NDDG/Blend"
878 |
879 | # Définir les couleurs directement dans la classe
880 | @classmethod
881 | def IS_CHANGED(cls, **kwargs):
882 | return float("nan")
883 | # ------------------------------------------------ #
884 |
885 | def add_alpha(self, arr):
886 | """Ajoute un canal alpha (opaque) si absent"""
887 | if arr.shape[2] == 3:
888 | alpha = np.ones((arr.shape[0], arr.shape[1], 1), dtype=arr.dtype)
889 | arr = np.concatenate([arr, alpha], axis=2)
890 | return arr
891 |
892 | def resize_and_crop_to_cover(self, arr, target_h, target_w):
893 | """Resize + crop pour que arr recouvre entièrement (cover) la cible"""
894 | h, w = arr.shape[:2]
895 |
896 | # Calcul du scale factor (cover)
897 | scale = max(target_w / w, target_h / h)
898 |
899 | new_w = int(w * scale)
900 | new_h = int(h * scale)
901 |
902 | # Resize avec PIL
903 | img_pil = Image.fromarray((arr * 255).astype(np.uint8)) if arr.dtype != np.uint8 else Image.fromarray(arr)
904 | img_resized = img_pil.resize((new_w, new_h), Image.LANCZOS)
905 | arr_resized = np.array(img_resized).astype(np.float32) / 255.0
906 |
907 | # Crop centré
908 | start_x = (new_w - target_w) // 2
909 | start_y = (new_h - target_h) // 2
910 | arr_cropped = arr_resized[start_y:start_y + target_h, start_x:start_x + target_w, :]
911 |
912 | return arr_cropped
913 |
914 | def blend_mode(self, a, b, mode):
915 | if mode == "normal":
916 | return b
917 | elif mode == "multiply":
918 | return a * b
919 | elif mode == "screen":
920 | return 1 - (1 - a) * (1 - b)
921 | elif mode == "overlay":
922 | return np.where(a < 0.5, 2 * a * b, 1 - 2 * (1 - a) * (1 - b))
923 | elif mode == "add":
924 | return np.clip(a + b, 0, 1)
925 | elif mode == "subtract":
926 | return np.clip(a - b, 0, 1)
927 | elif mode == "difference":
928 | return np.abs(a - b)
929 | elif mode == "lighten":
930 | return np.maximum(a, b)
931 | elif mode == "darken":
932 | return np.minimum(a, b)
933 | else:
934 | return b
935 |
936 | def blend(self, image_a, image_b, mode, opacity):
937 | arr_a = image_a[0].cpu().numpy()
938 | arr_b = image_b[0].cpu().numpy()
939 |
940 | # Normalisation [0,1]
941 | if arr_a.max() > 1.0: arr_a = arr_a / 255.0
942 | if arr_b.max() > 1.0: arr_b = arr_b / 255.0
943 |
944 | # Ajout alpha si manquant
945 | arr_a = self.add_alpha(arr_a)
946 | arr_b = self.add_alpha(arr_b)
947 |
948 | # Resize image_b pour couvrir image_a
949 | arr_b = self.resize_and_crop_to_cover(arr_b, arr_a.shape[0], arr_a.shape[1])
950 |
951 | # Blend par mode
952 | blended_rgb = self.blend_mode(arr_a[..., :3], arr_b[..., :3], mode)
953 |
954 | # Gestion alpha avec opacity
955 | alpha_a = arr_a[..., 3:4]
956 | alpha_b = arr_b[..., 3:4] * opacity
957 | out_rgb = (1 - alpha_b) * arr_a[..., :3] + alpha_b * blended_rgb
958 |
959 | # Clamp [0,1]
960 | out_rgb = np.clip(out_rgb, 0, 1)
961 |
962 | # Conversion tensor
963 | out_tensor = torch.from_numpy(out_rgb).unsqueeze(0).float()
964 |
965 | return (out_tensor,)
966 |
967 | # ----------------------------------------------------------------------------------#
968 | # ----------------------------------------------------------------------------------#
969 |
970 | class GreatRandomOrganicGradientNode:
971 | @classmethod
972 | def INPUT_TYPES(cls):
973 | return {
974 | "required": {
975 | "width": ("INT", {"default": 512, "min": 64, "max": 2048, "step": 64}),
976 | "height": ("INT", {"default": 512, "min": 64, "max": 2048, "step": 64}),
977 | "colors": ("INT", {"default": 3, "min": 2, "max": 8}),
978 | "blob_count": ("INT", {"default": 5, "min": 1, "max": 50}),
979 | "blob_shape": (["circle", "square", "polygon", "radial"], {"default": "circle"}),
980 | "blur_strength": ("FLOAT", {"default": 0.25, "min": 0.05, "max": 1.0, "step": 0.05}),
981 | "background_color": ("STRING", {"default": "#FFFFFF", "widget": {"type": "color", "format":"hex"}}),
982 | "random_background": ("BOOLEAN", {"default": False}),
983 | "random_palette": ("BOOLEAN", {"default": True}),
984 | "color1": ("STRING", {"default": "#c7c7c7", "multiline": False, "dynamicPrompts": False}),
985 | "color2": ("STRING", {"default": "#c7c7c7", "multiline": False, "dynamicPrompts": False}),
986 | "color3": ("STRING", {"default": "#c7c7c7", "multiline": False, "dynamicPrompts": False}),
987 | "color4": ("STRING", {"default": "#c7c7c7", "multiline": False, "dynamicPrompts": False}),
988 | "color5": ("STRING", {"default": "#c7c7c7", "multiline": False, "dynamicPrompts": False}),
989 | "color6": ("STRING", {"default": "#c7c7c7", "multiline": False, "dynamicPrompts": False}),
990 | "color7": ("STRING", {"default": "#c7c7c7", "multiline": False, "dynamicPrompts": False}),
991 | "color8": ("STRING", {"default": "#c7c7c7", "multiline": False, "dynamicPrompts": False}),
992 | "seed": ("INT", {"default": -1, "min": -1, "max": 999999}),
993 | "transparent_background": ("BOOLEAN", {"default": False}),
994 | }
995 | }
996 |
997 | RETURN_TYPES = ("IMAGE", "IMAGE", "STRING")
998 | RETURN_NAMES = ("image", "palette_image", "palette_hex")
999 | FUNCTION = "make_gradient"
1000 | CATEGORY = "🍄NDDG/Generators"
1001 |
1002 | def make_gradient(self, width, height, colors, blob_count, blob_shape, blur_strength,
1003 | background_color, random_background, random_palette,
1004 | color1, color2, color3, color4, color5, color6, color7, color8,
1005 | seed, transparent_background):
1006 |
1007 | # Seed
1008 | if seed == -1:
1009 | seed = np.random.randint(0, 999999)
1010 | rng = np.random.default_rng(seed)
1011 |
1012 | # Background
1013 | if transparent_background:
1014 | img = Image.new("RGBA", (width, height), (0, 0, 0, 0))
1015 | draw = ImageDraw.Draw(img, "RGBA")
1016 | else:
1017 | if random_background:
1018 | bg = tuple(rng.integers(0, 256, size=3))
1019 | else:
1020 | bg = self._hex_to_rgb(background_color)
1021 | img = Image.new("RGBA", (width, height), (*bg, 255))
1022 | draw = ImageDraw.Draw(img, "RGBA")
1023 |
1024 | # Palette
1025 | if random_palette:
1026 | palette = [tuple(rng.integers(0, 256, size=3)) for _ in range(colors)]
1027 | else:
1028 | chosen = []
1029 | for c in [color1, color2, color3, color4, color5, color6, color7, color8]:
1030 | if c and c.startswith("#") and len(c) == 7:
1031 | try:
1032 | rgb = self._hex_to_rgb(c)
1033 | chosen.append(rgb)
1034 | except:
1035 | pass
1036 | palette = chosen.copy()
1037 | while len(palette) < colors:
1038 | palette.append(tuple(rng.integers(0, 256, size=3)))
1039 | palette = palette[:colors]
1040 |
1041 | # Dessin des blobs
1042 | for i in range(blob_count):
1043 | color = palette[i % len(palette)]
1044 | x, y = rng.integers(0, width), rng.integers(0, height)
1045 | radius = rng.integers(width // 6, width // 2)
1046 |
1047 | if blob_shape == "circle":
1048 | bbox = [x - radius, y - radius, x + radius, y + radius]
1049 | draw.ellipse(bbox, fill=(*color, 255))
1050 |
1051 | elif blob_shape == "square":
1052 | bbox = [x - radius, y - radius, x + radius, y + radius]
1053 | draw.rectangle(bbox, fill=(*color, 255))
1054 |
1055 | elif blob_shape == "polygon":
1056 | points = [(x + rng.integers(-radius, radius),
1057 | y + rng.integers(-radius, radius)) for _ in range(rng.integers(3, 8))]
1058 | draw.polygon(points, fill=(*color, 255))
1059 |
1060 | elif blob_shape == "radial":
1061 | size = radius * 2
1062 | yy, xx = np.mgrid[-radius:radius, -radius:radius]
1063 | dist = np.sqrt(xx**2 + yy**2) / radius
1064 | mask = (1 - np.clip(dist, 0, 1)) * 255
1065 | mask = mask.astype(np.uint8)
1066 |
1067 | grad = Image.fromarray(mask, mode="L")
1068 | blob = Image.new("RGBA", (size, size), (*color, 255))
1069 | blob.putalpha(grad)
1070 | img.paste(blob, (x - radius, y - radius), blob)
1071 |
1072 | # Flou
1073 | blur_strength = max(0.05, min(blur_strength, 1.0))
1074 | blur_radius = max(1, int((width + height) / 2 * blur_strength / 4))
1075 | img = img.filter(ImageFilter.GaussianBlur(radius=blur_radius))
1076 |
1077 | # Conversion tensor image principale (toujours RGBA)
1078 | arr = np.array(img).astype(np.float32) / 255.0
1079 | if arr.shape[-1] == 3: # sécurité
1080 | alpha = np.ones((*arr.shape[:2], 1), dtype=np.float32)
1081 | arr = np.concatenate([arr, alpha], axis=-1)
1082 | tensor = torch.from_numpy(arr).unsqueeze(0)
1083 |
1084 | # Palette en hex
1085 | palette_hex = [f"#{c[0]:02X}{c[1]:02X}{c[2]:02X}" for c in palette]
1086 | palette_hex_str = ", ".join(palette_hex)
1087 |
1088 | # Génération image palette (en RGBA aussi)
1089 | palette_img = Image.new("RGBA", (max(256, 64 * colors), 64), (255, 255, 255, 255))
1090 | draw = ImageDraw.Draw(palette_img)
1091 | band_w = palette_img.width // colors
1092 | for i, c in enumerate(palette):
1093 | x0 = i * band_w
1094 | x1 = (i + 1) * band_w if i < colors - 1 else palette_img.width
1095 | draw.rectangle([x0, 0, x1, 64], fill=(*c, 255))
1096 |
1097 | palette_arr = np.array(palette_img).astype(np.float32) / 255.0
1098 | if palette_arr.shape[-1] == 3:
1099 | alpha = np.ones((*palette_arr.shape[:2], 1), dtype=np.float32)
1100 | palette_arr = np.concatenate([palette_arr, alpha], axis=-1)
1101 | palette_tensor = torch.from_numpy(palette_arr).unsqueeze(0)
1102 |
1103 | return (tensor, palette_tensor, palette_hex_str)
1104 |
1105 | def _hex_to_rgb(self, hex_color):
1106 | """Convertit une couleur hex en tuple RGB"""
1107 | hex_color = hex_color.lstrip("#")
1108 | return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4))
1109 |
1110 | # ----------------------------------------------------------------------------------#
1111 | # ----------------------------------------------------------------------------------#
1112 |
1113 | class GreatMultiplySigmas:
1114 | def __init__(self):
1115 | self.output_dir = folder_paths.get_temp_directory()
1116 | self.type = "temp"
1117 | self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
1118 | self.compress_level = 4
1119 |
1120 | @classmethod
1121 | def INPUT_TYPES(s):
1122 | return {
1123 | "required": {
1124 | "sigmas": ("SIGMAS", {"forceInput": True}),
1125 | "factor_global": ("FLOAT", {"default": 1, "min": 0, "max": 100, "step": 0.001}),
1126 | "start_factor": ("FLOAT", {"default": 1, "min": 0, "max": 100, "step": 0.001}),
1127 | "end_factor": ("FLOAT", {"default": 1, "min": 0, "max": 100, "step": 0.001}),
1128 | "curve_type": (["linear", "s_curve"], {"default": "linear"}),
1129 | "zone_start": ("FLOAT", {"default": 0, "min": 0, "max": 1, "step": 0.001}),
1130 | "zone_end": ("FLOAT", {"default": 1, "min": 0, "max": 1, "step": 0.001}),
1131 | "curve_strength": ("FLOAT", {"default": 2.0, "min": 0.1, "max": 10, "step": 0.1}),
1132 | },
1133 | "optional": {
1134 | "show_preview": ("BOOLEAN", {"default": False}),
1135 | }
1136 | }
1137 |
1138 | FUNCTION = "simple_output"
1139 | RETURN_TYPES = ("SIGMAS",)
1140 | OUTPUT_NODE = True
1141 | CATEGORY = "NDDG/sigmas"
1142 |
1143 | def interpolate_curve(self, t, curve_type, strength):
1144 | """
1145 | Génère une valeur interpolée selon le type de courbe
1146 | t: position normalisée entre 0 et 1
1147 | curve_type: type de courbe d'interpolation
1148 | strength: force de la courbe (pour exponential et s_curve)
1149 | """
1150 | """
1151 | elif curve_type == "exponential":
1152 | # Courbe exponentielle: croissance rapide au début
1153 | return math.pow(t, strength)
1154 |
1155 | elif curve_type == "inverse_exponential":
1156 | # Courbe exponentielle inverse: croissance lente au début
1157 | return 1 - math.pow(1 - t, strength)
1158 | """
1159 |
1160 | if curve_type == "linear":
1161 | return t
1162 |
1163 | elif curve_type == "s_curve":
1164 | # Courbe en S (sigmoïde): transition douce
1165 | # Utilise une fonction sigmoïde centrée
1166 | x = (t - 0.5) * strength * 2
1167 | return 1 / (1 + math.exp(-x))
1168 |
1169 | return t
1170 |
1171 | @staticmethod
1172 | def save_image_to_temp(image_pil, output_dir, prefix_append, compress_level=4):
1173 | """
1174 | Sauvegarde l'image PIL dans le dossier temp et retourne les infos pour l'UI
1175 | """
1176 | import os
1177 | from PIL.PngImagePlugin import PngInfo
1178 |
1179 | # Générer un nom de fichier unique
1180 | counter = 0
1181 | filename = f"{prefix_append}_{counter:05d}.png"
1182 | file_path = os.path.join(output_dir, filename)
1183 |
1184 | while os.path.exists(file_path):
1185 | counter += 1
1186 | filename = f"{prefix_append}_{counter:05d}.png"
1187 | file_path = os.path.join(output_dir, filename)
1188 |
1189 | # Sauvegarder l'image
1190 | metadata = PngInfo()
1191 | image_pil.save(file_path, pnginfo=metadata, compress_level=compress_level)
1192 |
1193 | return {
1194 | "filename": filename,
1195 | "subfolder": "",
1196 | "type": "temp"
1197 | }
1198 |
1199 | @staticmethod
1200 | def create_comparison_graph(sigmas_before, sigmas_after, zone_start_idx, zone_end_idx):
1201 | """
1202 | Crée un graphique comparatif avant/après modification des sigmas
1203 | """
1204 | plt.figure(figsize=(10, 6))
1205 |
1206 | # Convertir en numpy pour matplotlib
1207 | steps = np.arange(len(sigmas_before))
1208 | before = sigmas_before.cpu().numpy()
1209 | after = sigmas_after.cpu().numpy()
1210 |
1211 | # Tracer les courbes
1212 | plt.plot(steps, before, 'o-', color='blue', label='Before', linewidth=2, markersize=6, alpha=0.7)
1213 | plt.plot(steps, after, 'o-', color='red', label='After', linewidth=2, markersize=6, alpha=0.7)
1214 |
1215 | # Mettre en évidence la zone modifiée
1216 | if zone_start_idx < zone_end_idx:
1217 | plt.axvspan(zone_start_idx, zone_end_idx, alpha=0.35, color='yellow', label='Modified Zone')
1218 |
1219 | plt.title("Sigmas Comparison: Before vs After", fontsize=14, fontweight='bold')
1220 | plt.xlabel("Step Number", fontsize=11)
1221 | plt.ylabel("Sigma Value", fontsize=11)
1222 | plt.legend(loc='best', fontsize=10)
1223 | plt.grid(True, alpha=0.3, linestyle='--')
1224 | plt.tight_layout()
1225 |
1226 | # Convertir en image PIL
1227 | with BytesIO() as buf:
1228 | plt.savefig(buf, format='png', dpi=100, bbox_inches='tight')
1229 | buf.seek(0)
1230 | image = Image.open(buf).copy()
1231 |
1232 | plt.close()
1233 | return image
1234 |
1235 | def simple_output(self, sigmas, factor_global, start_factor, end_factor, curve_type, zone_start, zone_end, curve_strength, show_preview=False):
1236 | # Clone les sigmas pour ne pas modifier l'entrée (stateless)
1237 | sigmas_original = sigmas.clone()
1238 | sigmas = sigmas.clone()
1239 |
1240 | total_sigmas = len(sigmas)
1241 |
1242 | # Déterminer la zone d'application (indices basés sur la position)
1243 | zone_start_idx = int(zone_start * (total_sigmas - 1))
1244 | zone_end_idx = int(zone_end * (total_sigmas - 1))
1245 |
1246 | # S'assurer que zone_end >= zone_start
1247 | if zone_end_idx < zone_start_idx:
1248 | zone_start_idx, zone_end_idx = zone_end_idx, zone_start_idx
1249 |
1250 | zone_length = zone_end_idx - zone_start_idx
1251 |
1252 | # Appliquer la modification UNIQUEMENT dans la zone définie
1253 | for i in range(zone_start_idx, zone_end_idx + 1):
1254 | # Calculer la position normalisée dans la zone (0 à 1)
1255 | if zone_length > 0:
1256 | t = (i - zone_start_idx) / zone_length
1257 | else:
1258 | t = 0
1259 |
1260 | # Appliquer la courbe d'interpolation
1261 | curve_value = self.interpolate_curve(t, curve_type, curve_strength)
1262 |
1263 | # Interpoler entre start_factor et end_factor selon la courbe
1264 | # start_factor = multiplicateur au DÉBUT de la zone (quand t=0)
1265 | # end_factor = multiplicateur à la FIN de la zone (quand t=1)
1266 | local_factor = start_factor + (end_factor - start_factor) * curve_value
1267 |
1268 | # Appliquer le facteur global
1269 | final_factor = factor_global * local_factor
1270 |
1271 | # Multiplication relative (comportement standard)
1272 | sigmas[i] *= final_factor
1273 |
1274 | # Les sigmas en dehors de [zone_start_idx, zone_end_idx] restent inchangés
1275 |
1276 | # Créer l'image de prévisualisation et l'afficher dans le node
1277 | ui_output = {"images": []}
1278 |
1279 | if show_preview:
1280 | try:
1281 | # Générer le graphique comparatif
1282 | pil_image = self.create_comparison_graph(sigmas_original, sigmas, zone_start_idx, zone_end_idx)
1283 |
1284 | # Sauvegarder l'image dans le dossier temp
1285 | saved_image = self.save_image_to_temp(
1286 | pil_image,
1287 | self.output_dir,
1288 | self.prefix_append,
1289 | self.compress_level
1290 | )
1291 |
1292 | ui_output["images"].append(saved_image)
1293 |
1294 | except Exception as e:
1295 | print(f"[GreatMultiplySigmas] Erreur lors de la génération du graphique: {e}")
1296 | import traceback
1297 | traceback.print_exc()
1298 |
1299 | return {"ui": ui_output, "result": (sigmas,)}
1300 |
1301 | # ----------------------------------------------------------------------------------#
1302 | # ----------------------------------------------------------------------------------#
1303 |
1304 | # ----------------------------------------------------------------------------------#
1305 | # ----------------------------------------------------------------------------------#
1306 |
1307 |
1308 |
1309 | NODE_CLASS_MAPPINGS = {
1310 | "GreatConditioningModifier": GreatConditioningModifier,
1311 | "InteractiveOrganicGradientNode": InteractiveOrganicGradientNode,
1312 | "ImageBlendNode": ImageBlendNode,
1313 | "GreatRandomOrganicGradientNode": GreatRandomOrganicGradientNode,
1314 | "GreatMultiplySigmas": GreatMultiplySigmas
1315 | }
1316 | NODE_DISPLAY_NAME_MAPPINGS = {
1317 | "GreatConditioningModifier": "🍄Great Conditioning Modifier",
1318 | "InteractiveOrganicGradientNode": "🍄Great Interactive Organic Gradient",
1319 | "ImageBlendNode": "🍄Great Image Blend",
1320 | "GreatRandomOrganicGradientNode" : "🍄Great Random Organic Gradient",
1321 | "GreatMultiplySigmas": "🍄Great Multiply Sigmas"
1322 | }
--------------------------------------------------------------------------------