├── .env ├── .gitignore ├── .streamlit └── config.toml ├── README.md ├── main.py ├── model ├── __init__.py ├── image23d.py ├── image2image.py ├── image2video.py ├── text23d.py ├── text2image.py └── text2video.py ├── requirements.txt └── utils ├── __init__.py └── helpers.py /.env: -------------------------------------------------------------------------------- 1 | # HuggingFace Access Token 2 | HUGGINGFACE_TOKEN=YOUR_ACCES_TOKEN 3 | 4 | # Optional: CUDA settings 5 | CUDA_VISIBLE_DEVICES=0 6 | 7 | # Optional: Model cache directory 8 | HF_HOME=./models_cache 9 | TRANSFORMERS_CACHE=./models_cache 10 | 11 | # Optional: Streamlit settings 12 | STREAMLIT_SERVER_PORT=8501 13 | STREAMLIT_SERVER_ADDRESS=localhost -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | *.so 6 | .Python 7 | build/ 8 | develop-eggs/ 9 | dist/ 10 | downloads/ 11 | eggs/ 12 | .eggs/ 13 | lib/ 14 | lib64/ 15 | parts/ 16 | sdist/ 17 | var/ 18 | wheels/ 19 | share/python-wheels/ 20 | *.egg-info/ 21 | .installed.cfg 22 | *.egg 23 | PIPFILE.lock 24 | 25 | # Virtual Environment 26 | venv/ 27 | env/ 28 | ENV/ 29 | env.bak/ 30 | venv.bak/ 31 | 32 | # Environment Variables 33 | .env 34 | .env.local 35 | .env.development.local 36 | .env.test.local 37 | .env.production.local 38 | 39 | # Streamlit 40 | .streamlit/secrets.toml 41 | 42 | # Model Cache 43 | models_cache/ 44 | .cache/ 45 | hf_cache/ 46 | transformers_cache/ 47 | 48 | # Generated Files 49 | *.mp4 50 | *.avi 51 | *.mov 52 | *.glb 53 | *.obj 54 | *.ply 55 | *.stl 56 | *.png 57 | *.jpg 58 | *.jpeg 59 | *.gif 60 | *.bmp 61 | *.tiff 62 | generated_* 63 | text_to_* 64 | image_to_* 65 | 66 | # Temporary Files 67 | tmp/ 68 | temp/ 69 | *.tmp 70 | *.temp 71 | 72 | # Logs 73 | *.log 74 | logs/ 75 | 76 | # IDE 77 | .vscode/ 78 | .idea/ 79 | *.swp 80 | *.swo 81 | *~ 82 | 83 | # OS 84 | .DS_Store 85 | .DS_Store? 86 | ._* 87 | .Spotlight-V100 88 | .Trashes 89 | ehthumbs.db 90 | Thumbs.db 91 | 92 | # Jupyter Notebook 93 | .ipynb_checkpoints 94 | 95 | # PyTorch 96 | *.pth 97 | *.pt 98 | *.ckpt 99 | 100 | # Large Files 101 | *.zip 102 | *.tar.gz 103 | *.rar 104 | *.7z 105 | 106 | # API Keys (extra security) 107 | *token* 108 | *key* 109 | *secret* 110 | api_keys.txt 111 | credentials.json 112 | 113 | # Model Downloads 114 | models/ 115 | checkpoints/ 116 | weights/ -------------------------------------------------------------------------------- /.streamlit/config.toml: -------------------------------------------------------------------------------- 1 | [global] 2 | dataFrameSerialization = "legacy" 3 | 4 | [server] 5 | port = 8501 6 | address = "localhost" 7 | maxUploadSize = 200 8 | enableCORS = false 9 | enableXsrfProtection = false 10 | 11 | [browser] 12 | gatherUsageStats = false 13 | serverAddress = "localhost" 14 | serverPort = 8501 15 | 16 | [theme] 17 | primaryColor = "#667eea" 18 | backgroundColor = "#ffffff" 19 | secondaryBackgroundColor = "#f0f2f6" 20 | textColor = "#262730" 21 | font = "sans serif" 22 | 23 | [client] 24 | toolbarMode = "minimal" 25 | showErrorDetails = true 26 | 27 | [runner] 28 | magicEnabled = true 29 | installTracer = false 30 | fixMatplotlib = true -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🎨 Kapsamlı AI Studio 2 | 3 | Bu proje, en son AI modellerini kullanarak metin ve görsellerden video, 3D model ve gelişmiş görseller oluşturabilen kapsamlı bir yapay zeka uygulamasıdır. 4 | 5 | ## 🚀 Özellikler 6 | 7 | ### 📝 Metin → Görsel 8 | - **Model**: FLUX.1-dev (black-forest-labs/FLUX.1-dev) 9 | - **Özellik**: Metinden yüksek kaliteli görseller oluşturma 10 | - **Çözünürlük**: 1024x1024'e kadar 11 | 12 | ### 🖼️ Görsel → Gelişmiş Görsel 13 | - **Model**: Stable Diffusion XL Refiner (stabilityai/stable-diffusion-xl-refiner-1.0) 14 | - **Özellik**: Mevcut görselleri geliştirme ve iyileştirme 15 | - **Format**: PNG, JPG, JPEG destekli 16 | 17 | ### 📝 Metin → Video 18 | - **Model**: LTX-Video (Lightricks/LTX-Video) 19 | - **Özellik**: Metinden gerçek zamanlı video oluşturma 20 | - **Çözünürlük**: 1216×704, 30 FPS 21 | 22 | ### 🖼️ Görsel → Video 23 | - **Model**: Stable Video Diffusion (stabilityai/stable-video-diffusion-img2vid-xt) 24 | - **Özellik**: Statik görsellerden video oluşturma 25 | - **Çözünürlük**: 576x1024, 25 frame 26 | 27 | ### 🖼️ Görsel → 3D Model 28 | - **Model**: Hunyuan3D-2 (tencent/Hunyuan3D-2) 29 | - **Özellik**: Görsellerden yüksek çözünürlüklü 3D model oluşturma 30 | - **Format**: GLB, OBJ çıktı formatları 31 | 32 | ### 📝 Metin → 3D Model 33 | - **Model**: Stable Zero123 (stabilityai/stable-zero123) 34 | - **Özellik**: Metinden 3D model oluşturma 35 | - **Teknik**: Score Distillation Sampling (SDS) 36 | 37 | ## 🛠️ Kurulum 38 | 39 | ### Gereksinimler 40 | - Python 3.8+ 41 | - CUDA destekli GPU (önerilen) 42 | - 16GB+ RAM 43 | - 50GB+ disk alanı 44 | 45 | ### 1. Repository'yi klonlayın 46 | ```bash 47 | git clone 48 | cd Multilangual 49 | ``` 50 | 51 | ### 2. Sanal ortam oluşturun 52 | ```bash 53 | python -m venv venv 54 | venv\Scripts\activate # Windows 55 | # source venv/bin/activate # Linux/Mac 56 | ``` 57 | 58 | ### 3. Bağımlılıkları yükleyin 59 | ```bash 60 | pip install -r requirements.txt 61 | ``` 62 | 63 | ### 4. HuggingFace Token'ını ayarlayın 64 | 65 | #### Yöntem 1: Environment Variable 66 | ```bash 67 | set HUGGINGFACE_TOKEN=YOUR_ACCES_TOKEN 68 | ``` 69 | 70 | #### Yöntem 2: .env dosyası (zaten yapılandırılmış) 71 | `.env` dosyasında token zaten ayarlanmış durumda. 72 | 73 | #### Yöntem 3: Streamlit Secrets (zaten yapılandırılmış) 74 | `.streamlit/secrets.toml` dosyasında token zaten ayarlanmış durumda. 75 | 76 | ## 🚀 Kullanım 77 | 78 | ### Uygulamayı başlatın 79 | ```bash 80 | streamlit run main.py 81 | ``` 82 | 83 | Uygulama varsayılan olarak `http://localhost:8501` adresinde çalışacaktır. 84 | 85 | ### Web Arayüzü 86 | 1. Tarayıcınızda `http://localhost:8501` adresine gidin 87 | 2. Sol menüden kullanmak istediğiniz AI aracını seçin 88 | 3. Gerekli parametreleri girin 89 | 4. "Oluştur" butonuna tıklayın 90 | 5. Sonuçları görüntüleyin ve indirin 91 | 92 | ## 📁 Proje Yapısı 93 | 94 | ``` 95 | Multilangual/ 96 | ├── main.py # Ana Streamlit uygulaması 97 | ├── requirements.txt # Python bağımlılıkları 98 | ├── .env # Environment variables 99 | ├── README.md # Bu dosya 100 | ├── .streamlit/ 101 | │ ├── config.toml # Streamlit konfigürasyonu 102 | │ └── secrets.toml # Streamlit secrets 103 | ├── models/ 104 | │ ├── __init__.py 105 | │ ├── text2image.py # FLUX.1-dev model handler 106 | │ ├── image2image.py # SDXL Refiner model handler 107 | │ ├── text2video.py # LTX-Video model handler 108 | │ ├── image2video.py # Stable Video Diffusion handler 109 | │ ├── image23d.py # Hunyuan3D-2 model handler 110 | │ └── text23d.py # Stable Zero123 model handler 111 | └── utils/ 112 | ├── __init__.py 113 | └── helpers.py # Yardımcı fonksiyonlar 114 | ``` 115 | 116 | ## ⚙️ Konfigürasyon 117 | 118 | ### GPU Ayarları 119 | Eğer birden fazla GPU'nuz varsa, kullanılacak GPU'yu belirtebilirsiniz: 120 | ```bash 121 | set CUDA_VISIBLE_DEVICES=0 122 | ``` 123 | 124 | ### Model Cache 125 | Modeller varsayılan olarak `./models_cache` dizininde saklanır. Bu dizini değiştirmek için: 126 | ```bash 127 | set HF_HOME=C:\path\to\your\cache 128 | set TRANSFORMERS_CACHE=C:\path\to\your\cache 129 | ``` 130 | 131 | ### Streamlit Ayarları 132 | Port ve adres ayarları `.streamlit/config.toml` dosyasında yapılandırılabilir. 133 | 134 | ## 🔧 Sorun Giderme 135 | 136 | ### CUDA Bellek Hatası 137 | Eğer GPU bellek hatası alıyorsanız: 138 | 1. Daha küçük batch size kullanın 139 | 2. Model CPU offloading'i etkinleştirin (zaten aktif) 140 | 3. Daha düşük çözünürlük kullanın 141 | 142 | ### Model Yükleme Hatası 143 | 1. İnternet bağlantınızı kontrol edin 144 | 2. HuggingFace token'ının doğru olduğundan emin olun 145 | 3. Disk alanınızın yeterli olduğundan emin olun 146 | 147 | ### Yavaş Performans 148 | 1. GPU kullandığınızdan emin olun 149 | 2. CUDA sürümünüzü kontrol edin 150 | 3. Daha az inference step kullanın 151 | 152 | ## 📊 Sistem Gereksinimleri 153 | 154 | ### Minimum 155 | - CPU: Intel i5 / AMD Ryzen 5 156 | - RAM: 16GB 157 | - GPU: GTX 1060 6GB / RTX 2060 158 | - Disk: 50GB boş alan 159 | 160 | ### Önerilen 161 | - CPU: Intel i7 / AMD Ryzen 7 162 | - RAM: 32GB+ 163 | - GPU: RTX 3080 / RTX 4070+ 164 | - Disk: 100GB+ SSD 165 | 166 | ## 🤝 Katkıda Bulunma 167 | 168 | 1. Fork edin 169 | 2. Feature branch oluşturun (`git checkout -b feature/amazing-feature`) 170 | 3. Commit edin (`git commit -m 'Add amazing feature'`) 171 | 4. Push edin (`git push origin feature/amazing-feature`) 172 | 5. Pull Request oluşturun 173 | 174 | ## 📄 Lisans 175 | 176 | Bu proje MIT lisansı altında lisanslanmıştır. Detaylar için `LICENSE` dosyasına bakın. 177 | 178 | ## 🙏 Teşekkürler 179 | 180 | - [Black Forest Labs](https://huggingface.co/black-forest-labs) - FLUX.1-dev 181 | - [Stability AI](https://huggingface.co/stabilityai) - SDXL Refiner, Stable Video Diffusion, Stable Zero123 182 | - [Lightricks](https://huggingface.co/Lightricks) - LTX-Video 183 | - [Tencent](https://huggingface.co/tencent) - Hunyuan3D-2 184 | - [Streamlit](https://streamlit.io/) - Web framework 185 | - [HuggingFace](https://huggingface.co/) - Model hosting ve diffusers library 186 | 187 | ## 📞 İletişim 188 | 189 | Sorularınız için issue açabilir veya pull request gönderebilirsiniz. 190 | 191 | --- 192 | 193 | **Not**: Bu uygulama eğitim ve araştırma amaçlıdır. Ticari kullanım için model lisanslarını kontrol edin. -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import torch 3 | from PIL import Image 4 | import io 5 | import base64 6 | import os 7 | from pathlib import Path 8 | import tempfile 9 | import numpy as np 10 | from typing import Optional, Union 11 | 12 | # Import model handlers 13 | from models.text2image import Text2ImageGenerator 14 | from models.image2image import Image2ImageRefiner 15 | from models.text2video import Text2VideoGenerator 16 | from models.image2video import Image2VideoGenerator 17 | from models.image23d import Image23DGenerator 18 | from models.text23d import Text23DGenerator 19 | from utils.helpers import save_uploaded_file, display_3d_model 20 | 21 | # Page configuration 22 | st.set_page_config( 23 | page_title="Kapsamlı AI Studio", 24 | page_icon="🎨", 25 | layout="wide", 26 | initial_sidebar_state="expanded" 27 | ) 28 | 29 | # Custom CSS 30 | st.markdown(""" 31 | 56 | """, unsafe_allow_html=True) 57 | 58 | def initialize_models(): 59 | """Initialize all AI models""" 60 | if 'models_initialized' not in st.session_state: 61 | with st.spinner('AI modelleri yükleniyor... Bu işlem birkaç dakika sürebilir.'): 62 | try: 63 | # Get HuggingFace token from environment or user input 64 | hf_token = os.getenv('HUGGINGFACE_TOKEN') or st.secrets.get('HUGGINGFACE_TOKEN') 65 | 66 | if not hf_token: 67 | st.error("HuggingFace token bulunamadı. Lütfen HUGGINGFACE_TOKEN environment variable'ını ayarlayın.") 68 | st.stop() 69 | 70 | st.session_state.text2image = Text2ImageGenerator(hf_token) 71 | st.session_state.image2image = Image2ImageRefiner(hf_token) 72 | st.session_state.text2video = Text2VideoGenerator(hf_token) 73 | st.session_state.image2video = Image2VideoGenerator(hf_token) 74 | st.session_state.image23d = Image23DGenerator(hf_token) 75 | st.session_state.text23d = Text23DGenerator(hf_token) 76 | 77 | st.session_state.models_initialized = True 78 | st.success("Tüm AI modelleri başarıyla yüklendi!") 79 | except Exception as e: 80 | st.error(f"Model yükleme hatası: {str(e)}") 81 | st.stop() 82 | 83 | def main(): 84 | # Header 85 | st.markdown('

🎨 Kapsamlı AI Studio

', unsafe_allow_html=True) 86 | st.markdown("### Metin ve görsellerden video, 3D model ve gelişmiş görseller oluşturun") 87 | 88 | # Initialize models 89 | initialize_models() 90 | 91 | # Sidebar for navigation 92 | st.sidebar.title("🚀 AI Araçları") 93 | selected_tool = st.sidebar.selectbox( 94 | "Kullanmak istediğiniz AI aracını seçin:", 95 | [ 96 | "📝 Metin → Görsel", 97 | "🖼️ Görsel → Gelişmiş Görsel", 98 | "📝 Metin → Video", 99 | "🖼️ Görsel → Video", 100 | "🖼️ Görsel → 3D Model", 101 | "📝 Metin → 3D Model" 102 | ] 103 | ) 104 | 105 | # Main content area 106 | if selected_tool == "📝 Metin → Görsel": 107 | text_to_image_interface() 108 | elif selected_tool == "🖼️ Görsel → Gelişmiş Görsel": 109 | image_to_image_interface() 110 | elif selected_tool == "📝 Metin → Video": 111 | text_to_video_interface() 112 | elif selected_tool == "🖼️ Görsel → Video": 113 | image_to_video_interface() 114 | elif selected_tool == "🖼️ Görsel → 3D Model": 115 | image_to_3d_interface() 116 | elif selected_tool == "📝 Metin → 3D Model": 117 | text_to_3d_interface() 118 | 119 | def text_to_image_interface(): 120 | st.header("📝 Metin → Görsel Oluşturucu") 121 | st.markdown("**FLUX.1-dev** modelini kullanarak metinden yüksek kaliteli görseller oluşturun.") 122 | 123 | col1, col2 = st.columns([1, 1]) 124 | 125 | with col1: 126 | prompt = st.text_area( 127 | "Görsel açıklaması:", 128 | placeholder="Örnek: Güneşin batışında okyanus kenarında yürüyen bir kedi", 129 | height=100 130 | ) 131 | 132 | with st.expander("⚙️ Gelişmiş Ayarlar"): 133 | width = st.slider("Genişlik", 512, 1024, 1024, 64) 134 | height = st.slider("Yükseklik", 512, 1024, 1024, 64) 135 | guidance_scale = st.slider("Guidance Scale", 1.0, 20.0, 3.5, 0.5) 136 | num_steps = st.slider("Inference Steps", 20, 100, 50, 5) 137 | 138 | if st.button("🎨 Görsel Oluştur", type="primary"): 139 | if prompt: 140 | with st.spinner("Görsel oluşturuluyor..."): 141 | try: 142 | image = st.session_state.text2image.generate( 143 | prompt=prompt, 144 | width=width, 145 | height=height, 146 | guidance_scale=guidance_scale, 147 | num_inference_steps=num_steps 148 | ) 149 | st.session_state.generated_image = image 150 | except Exception as e: 151 | st.error(f"Görsel oluşturma hatası: {str(e)}") 152 | else: 153 | st.warning("Lütfen bir görsel açıklaması girin.") 154 | 155 | with col2: 156 | if 'generated_image' in st.session_state: 157 | st.image(st.session_state.generated_image, caption="Oluşturulan Görsel", use_column_width=True) 158 | 159 | # Download button 160 | buf = io.BytesIO() 161 | st.session_state.generated_image.save(buf, format='PNG') 162 | st.download_button( 163 | label="📥 Görseli İndir", 164 | data=buf.getvalue(), 165 | file_name="generated_image.png", 166 | mime="image/png" 167 | ) 168 | 169 | def image_to_image_interface(): 170 | st.header("🖼️ Görsel → Gelişmiş Görsel") 171 | st.markdown("**SDXL Refiner** modelini kullanarak mevcut görselleri geliştirin.") 172 | 173 | col1, col2 = st.columns([1, 1]) 174 | 175 | with col1: 176 | uploaded_file = st.file_uploader( 177 | "Geliştirmek istediğiniz görseli yükleyin:", 178 | type=['png', 'jpg', 'jpeg'] 179 | ) 180 | 181 | if uploaded_file: 182 | input_image = Image.open(uploaded_file) 183 | st.image(input_image, caption="Yüklenen Görsel", use_column_width=True) 184 | 185 | prompt = st.text_area( 186 | "Geliştirme açıklaması:", 187 | placeholder="Örnek: Daha detaylı ve canlı renklerle", 188 | height=80 189 | ) 190 | 191 | with st.expander("⚙️ Gelişmiş Ayarlar"): 192 | strength = st.slider("Değişim Gücü", 0.1, 1.0, 0.3, 0.1) 193 | guidance_scale = st.slider("Guidance Scale", 1.0, 20.0, 7.5, 0.5) 194 | 195 | if st.button("✨ Görseli Geliştir", type="primary"): 196 | if prompt: 197 | with st.spinner("Görsel geliştiriliyor..."): 198 | try: 199 | refined_image = st.session_state.image2image.refine( 200 | image=input_image, 201 | prompt=prompt, 202 | strength=strength, 203 | guidance_scale=guidance_scale 204 | ) 205 | st.session_state.refined_image = refined_image 206 | except Exception as e: 207 | st.error(f"Görsel geliştirme hatası: {str(e)}") 208 | else: 209 | st.warning("Lütfen bir geliştirme açıklaması girin.") 210 | 211 | with col2: 212 | if 'refined_image' in st.session_state: 213 | st.image(st.session_state.refined_image, caption="Geliştirilen Görsel", use_column_width=True) 214 | 215 | buf = io.BytesIO() 216 | st.session_state.refined_image.save(buf, format='PNG') 217 | st.download_button( 218 | label="📥 Geliştirilen Görseli İndir", 219 | data=buf.getvalue(), 220 | file_name="refined_image.png", 221 | mime="image/png" 222 | ) 223 | 224 | def text_to_video_interface(): 225 | st.header("📝 Metin → Video Oluşturucu") 226 | st.markdown("**LTX-Video** modelini kullanarak metinden video oluşturun.") 227 | 228 | prompt = st.text_area( 229 | "Video açıklaması:", 230 | placeholder="Örnek: Ormanda koşan bir geyik, yavaş çekim", 231 | height=100 232 | ) 233 | 234 | col1, col2 = st.columns([1, 1]) 235 | 236 | with col1: 237 | with st.expander("⚙️ Video Ayarları"): 238 | duration = st.slider("Video Süresi (saniye)", 2, 10, 4) 239 | fps = st.selectbox("FPS", [24, 30], index=1) 240 | resolution = st.selectbox("Çözünürlük", ["704x1216", "1216x704"], index=1) 241 | 242 | if st.button("🎬 Video Oluştur", type="primary"): 243 | if prompt: 244 | with st.spinner("Video oluşturuluyor... Bu işlem birkaç dakika sürebilir."): 245 | try: 246 | video_path = st.session_state.text2video.generate( 247 | prompt=prompt, 248 | duration=duration, 249 | fps=fps, 250 | resolution=resolution 251 | ) 252 | st.session_state.generated_video = video_path 253 | st.success("Video başarıyla oluşturuldu!") 254 | except Exception as e: 255 | st.error(f"Video oluşturma hatası: {str(e)}") 256 | else: 257 | st.warning("Lütfen bir video açıklaması girin.") 258 | 259 | if 'generated_video' in st.session_state: 260 | st.video(st.session_state.generated_video) 261 | 262 | with open(st.session_state.generated_video, 'rb') as f: 263 | st.download_button( 264 | label="📥 Videoyu İndir", 265 | data=f.read(), 266 | file_name="generated_video.mp4", 267 | mime="video/mp4" 268 | ) 269 | 270 | def image_to_video_interface(): 271 | st.header("🖼️ Görsel → Video") 272 | st.markdown("**Stable Video Diffusion** modelini kullanarak görsellerden video oluşturun.") 273 | 274 | uploaded_file = st.file_uploader( 275 | "Video oluşturmak istediğiniz görseli yükleyin:", 276 | type=['png', 'jpg', 'jpeg'] 277 | ) 278 | 279 | if uploaded_file: 280 | input_image = Image.open(uploaded_file) 281 | 282 | col1, col2 = st.columns([1, 1]) 283 | 284 | with col1: 285 | st.image(input_image, caption="Kaynak Görsel", use_column_width=True) 286 | 287 | with st.expander("⚙️ Video Ayarları"): 288 | motion_bucket_id = st.slider("Hareket Miktarı", 1, 255, 127) 289 | fps = st.selectbox("FPS", [6, 12, 24], index=1) 290 | frames = st.slider("Frame Sayısı", 14, 25, 25) 291 | 292 | if st.button("🎬 Video Oluştur", type="primary"): 293 | with st.spinner("Görseldan video oluşturuluyor..."): 294 | try: 295 | video_path = st.session_state.image2video.generate( 296 | image=input_image, 297 | motion_bucket_id=motion_bucket_id, 298 | fps=fps, 299 | num_frames=frames 300 | ) 301 | st.session_state.image_video = video_path 302 | st.success("Video başarıyla oluşturuldu!") 303 | except Exception as e: 304 | st.error(f"Video oluşturma hatası: {str(e)}") 305 | 306 | with col2: 307 | if 'image_video' in st.session_state: 308 | st.video(st.session_state.image_video) 309 | 310 | with open(st.session_state.image_video, 'rb') as f: 311 | st.download_button( 312 | label="📥 Videoyu İndir", 313 | data=f.read(), 314 | file_name="image_to_video.mp4", 315 | mime="video/mp4" 316 | ) 317 | 318 | def image_to_3d_interface(): 319 | st.header("🖼️ Görsel → 3D Model") 320 | st.markdown("**Hunyuan3D-2** modelini kullanarak görsellerden 3D model oluşturun.") 321 | 322 | uploaded_file = st.file_uploader( 323 | "3D model oluşturmak istediğiniz görseli yükleyin:", 324 | type=['png', 'jpg', 'jpeg'] 325 | ) 326 | 327 | if uploaded_file: 328 | input_image = Image.open(uploaded_file) 329 | 330 | col1, col2 = st.columns([1, 1]) 331 | 332 | with col1: 333 | st.image(input_image, caption="Kaynak Görsel", use_column_width=True) 334 | 335 | with st.expander("⚙️ 3D Ayarları"): 336 | texture_resolution = st.selectbox("Texture Çözünürlüğü", [512, 1024, 2048], index=1) 337 | mesh_resolution = st.selectbox("Mesh Çözünürlüğü", ["low", "medium", "high"], index=1) 338 | 339 | if st.button("🎯 3D Model Oluştur", type="primary"): 340 | with st.spinner("3D model oluşturuluyor... Bu işlem uzun sürebilir."): 341 | try: 342 | model_path = st.session_state.image23d.generate( 343 | image=input_image, 344 | texture_resolution=texture_resolution, 345 | mesh_resolution=mesh_resolution 346 | ) 347 | st.session_state.generated_3d_model = model_path 348 | st.success("3D model başarıyla oluşturuldu!") 349 | except Exception as e: 350 | st.error(f"3D model oluşturma hatası: {str(e)}") 351 | 352 | with col2: 353 | if 'generated_3d_model' in st.session_state: 354 | display_3d_model(st.session_state.generated_3d_model) 355 | 356 | with open(st.session_state.generated_3d_model, 'rb') as f: 357 | st.download_button( 358 | label="📥 3D Modeli İndir (.glb)", 359 | data=f.read(), 360 | file_name="generated_3d_model.glb", 361 | mime="model/gltf-binary" 362 | ) 363 | 364 | def text_to_3d_interface(): 365 | st.header("📝 Metin → 3D Model") 366 | st.markdown("**Stable Zero123** modelini kullanarak metinden 3D model oluşturun.") 367 | 368 | prompt = st.text_area( 369 | "3D model açıklaması:", 370 | placeholder="Örnek: Kırmızı spor araba, detaylı", 371 | height=100 372 | ) 373 | 374 | col1, col2 = st.columns([1, 1]) 375 | 376 | with col1: 377 | with st.expander("⚙️ 3D Ayarları"): 378 | views = st.slider("Görüş Sayısı", 4, 16, 8) 379 | resolution = st.selectbox("Çözünürlük", [256, 512, 1024], index=1) 380 | steps = st.slider("Optimization Steps", 500, 2000, 1000, 100) 381 | 382 | if st.button("🎯 3D Model Oluştur", type="primary"): 383 | if prompt: 384 | with st.spinner("Metinden 3D model oluşturuluyor... Bu işlem çok uzun sürebilir."): 385 | try: 386 | model_path = st.session_state.text23d.generate( 387 | prompt=prompt, 388 | num_views=views, 389 | resolution=resolution, 390 | optimization_steps=steps 391 | ) 392 | st.session_state.text_3d_model = model_path 393 | st.success("3D model başarıyla oluşturuldu!") 394 | except Exception as e: 395 | st.error(f"3D model oluşturma hatası: {str(e)}") 396 | else: 397 | st.warning("Lütfen bir 3D model açıklaması girin.") 398 | 399 | with col2: 400 | if 'text_3d_model' in st.session_state: 401 | display_3d_model(st.session_state.text_3d_model) 402 | 403 | with open(st.session_state.text_3d_model, 'rb') as f: 404 | st.download_button( 405 | label="📥 3D Modeli İndir (.glb)", 406 | data=f.read(), 407 | file_name="text_to_3d_model.glb", 408 | mime="model/gltf-binary" 409 | ) 410 | 411 | if __name__ == "__main__": 412 | main() -------------------------------------------------------------------------------- /model/__init__.py: -------------------------------------------------------------------------------- 1 | # Models package for AI Studio -------------------------------------------------------------------------------- /model/image23d.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | import tempfile 4 | import os 5 | import gc 6 | from typing import Optional 7 | import trimesh 8 | import numpy as np 9 | import requests 10 | from transformers import pipeline 11 | 12 | class Image23DGenerator: 13 | """Hunyuan3D-2 modelini kullanarak görsellerden 3D model oluşturan sınıf""" 14 | 15 | def __init__(self, hf_token: str): 16 | self.hf_token = hf_token 17 | self.model_id = "tencent/Hunyuan3D-2" 18 | self.shape_pipeline = None 19 | self.texture_pipeline = None 20 | self.device = "cuda" if torch.cuda.is_available() else "cpu" 21 | self._load_model() 22 | 23 | def _load_model(self): 24 | """Modeli yükle""" 25 | try: 26 | # Hunyuan3D-2 için özel import (gerçek implementasyon için gerekli) 27 | # Bu bir placeholder implementasyon'dır 28 | 29 | # Shape generation pipeline 30 | try: 31 | from hy3dgen.shapegen import Hunyuan3DDiTFlowMatchingPipeline 32 | self.shape_pipeline = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained( 33 | self.model_id, 34 | torch_dtype=torch.float16 if self.device == "cuda" else torch.float32, 35 | use_auth_token=self.hf_token 36 | ) 37 | except ImportError: 38 | # Fallback: Basit 3D generation pipeline 39 | print("Hunyuan3D-2 kütüphanesi bulunamadı, basit 3D generation kullanılacak") 40 | self.shape_pipeline = self._create_fallback_pipeline() 41 | 42 | # Texture generation pipeline 43 | try: 44 | from hy3dgen.texgen import Hunyuan3DPaintPipeline 45 | self.texture_pipeline = Hunyuan3DPaintPipeline.from_pretrained( 46 | self.model_id, 47 | torch_dtype=torch.float16 if self.device == "cuda" else torch.float32, 48 | use_auth_token=self.hf_token 49 | ) 50 | except ImportError: 51 | self.texture_pipeline = None 52 | 53 | if self.device == "cuda" and self.shape_pipeline: 54 | self.shape_pipeline = self.shape_pipeline.to("cuda") 55 | if self.texture_pipeline: 56 | self.texture_pipeline = self.texture_pipeline.to("cuda") 57 | 58 | print(f"Hunyuan3D-2 model loaded on {self.device}") 59 | 60 | except Exception as e: 61 | print(f"Hunyuan3D-2 yükleme hatası, fallback kullanılacak: {str(e)}") 62 | self.shape_pipeline = self._create_fallback_pipeline() 63 | self.texture_pipeline = None 64 | 65 | def _create_fallback_pipeline(self): 66 | """Basit 3D generation için fallback pipeline""" 67 | class FallbackPipeline: 68 | def __call__(self, image, **kwargs): 69 | # Basit bir küp mesh oluştur 70 | vertices = np.array([ 71 | [-1, -1, -1], [1, -1, -1], [1, 1, -1], [-1, 1, -1], # bottom 72 | [-1, -1, 1], [1, -1, 1], [1, 1, 1], [-1, 1, 1] # top 73 | ]) 74 | 75 | faces = np.array([ 76 | [0, 1, 2], [0, 2, 3], # bottom 77 | [4, 7, 6], [4, 6, 5], # top 78 | [0, 4, 5], [0, 5, 1], # front 79 | [2, 6, 7], [2, 7, 3], # back 80 | [0, 3, 7], [0, 7, 4], # left 81 | [1, 5, 6], [1, 6, 2] # right 82 | ]) 83 | 84 | mesh = trimesh.Trimesh(vertices=vertices, faces=faces) 85 | return [mesh] 86 | 87 | return FallbackPipeline() 88 | 89 | def generate( 90 | self, 91 | image: Image.Image, 92 | texture_resolution: int = 1024, 93 | mesh_resolution: str = "medium", 94 | guidance_scale: float = 7.5, 95 | num_inference_steps: int = 50, 96 | seed: Optional[int] = None 97 | ) -> str: 98 | """Görsellerden 3D model oluştur""" 99 | 100 | if not self.shape_pipeline: 101 | raise Exception("Model yüklenmedi") 102 | 103 | try: 104 | # Görseli uygun boyuta getir 105 | target_size = (512, 512) 106 | if image.size != target_size: 107 | image = image.resize(target_size, Image.Resampling.LANCZOS) 108 | 109 | # RGB formatına çevir 110 | if image.mode != 'RGB': 111 | image = image.convert('RGB') 112 | 113 | # Generator ayarla 114 | generator = None 115 | if seed is not None: 116 | generator = torch.Generator(device=self.device).manual_seed(seed) 117 | 118 | # Shape generation 119 | with torch.inference_mode(): 120 | if hasattr(self.shape_pipeline, '__call__'): 121 | # Hunyuan3D-2 API 122 | mesh_result = self.shape_pipeline( 123 | image=image, 124 | guidance_scale=guidance_scale, 125 | num_inference_steps=num_inference_steps, 126 | generator=generator 127 | ) 128 | mesh = mesh_result[0] if isinstance(mesh_result, list) else mesh_result 129 | else: 130 | # Fallback 131 | mesh = self.shape_pipeline(image) 132 | mesh = mesh[0] if isinstance(mesh, list) else mesh 133 | 134 | # Texture generation (eğer mevcut ise) 135 | if self.texture_pipeline and hasattr(mesh, 'visual'): 136 | try: 137 | with torch.inference_mode(): 138 | textured_mesh = self.texture_pipeline( 139 | mesh=mesh, 140 | image=image, 141 | texture_resolution=texture_resolution 142 | ) 143 | mesh = textured_mesh 144 | except Exception as e: 145 | print(f"Texture generation hatası: {str(e)}") 146 | # Texture olmadan devam et 147 | 148 | # Mesh'i dosyaya kaydet 149 | temp_dir = tempfile.gettempdir() 150 | model_path = os.path.join(temp_dir, f"generated_3d_model_{hash(str(image.tobytes()))}.glb") 151 | 152 | # GLB formatında kaydet 153 | if isinstance(mesh, trimesh.Trimesh): 154 | mesh.export(model_path) 155 | else: 156 | # Fallback: basit mesh kaydetme 157 | simple_mesh = trimesh.Trimesh( 158 | vertices=[[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]], 159 | faces=[[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]] 160 | ) 161 | simple_mesh.export(model_path) 162 | 163 | # Memory cleanup 164 | if self.device == "cuda": 165 | torch.cuda.empty_cache() 166 | gc.collect() 167 | 168 | return model_path 169 | 170 | except Exception as e: 171 | # Memory cleanup on error 172 | if self.device == "cuda": 173 | torch.cuda.empty_cache() 174 | gc.collect() 175 | raise Exception(f"3D model oluşturma hatası: {str(e)}") 176 | 177 | def cleanup(self): 178 | """Belleği temizle""" 179 | if self.shape_pipeline: 180 | del self.shape_pipeline 181 | self.shape_pipeline = None 182 | 183 | if self.texture_pipeline: 184 | del self.texture_pipeline 185 | self.texture_pipeline = None 186 | 187 | if self.device == "cuda": 188 | torch.cuda.empty_cache() 189 | gc.collect() 190 | 191 | def __del__(self): 192 | """Destructor""" 193 | self.cleanup() -------------------------------------------------------------------------------- /model/image2image.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from diffusers import StableDiffusionXLImg2ImgPipeline 3 | from PIL import Image 4 | import gc 5 | from typing import Optional 6 | 7 | class Image2ImageRefiner: 8 | """SDXL Refiner modelini kullanarak görsel geliştiren sınıf""" 9 | 10 | def __init__(self, hf_token: str): 11 | self.hf_token = hf_token 12 | self.model_id = "stabilityai/stable-diffusion-xl-refiner-1.0" 13 | self.pipeline = None 14 | self.device = "cuda" if torch.cuda.is_available() else "cpu" 15 | self._load_model() 16 | 17 | def _load_model(self): 18 | """Modeli yükle""" 19 | try: 20 | self.pipeline = StableDiffusionXLImg2ImgPipeline.from_pretrained( 21 | self.model_id, 22 | torch_dtype=torch.float16 if self.device == "cuda" else torch.float32, 23 | use_auth_token=self.hf_token, 24 | variant="fp16" if self.device == "cuda" else None, 25 | use_safetensors=True 26 | ) 27 | 28 | if self.device == "cuda": 29 | self.pipeline = self.pipeline.to("cuda") 30 | # VRAM tasarrufu için CPU offloading 31 | self.pipeline.enable_model_cpu_offload() 32 | else: 33 | self.pipeline = self.pipeline.to("cpu") 34 | 35 | # Memory optimization 36 | self.pipeline.enable_attention_slicing() 37 | if hasattr(self.pipeline, 'enable_xformers_memory_efficient_attention'): 38 | try: 39 | self.pipeline.enable_xformers_memory_efficient_attention() 40 | except: 41 | pass 42 | 43 | # Compile for better performance (PyTorch 2.0+) 44 | if hasattr(torch, 'compile') and self.device == "cuda": 45 | try: 46 | self.pipeline.unet = torch.compile( 47 | self.pipeline.unet, 48 | mode="reduce-overhead", 49 | fullgraph=True 50 | ) 51 | except: 52 | pass 53 | 54 | print(f"SDXL Refiner model loaded on {self.device}") 55 | 56 | except Exception as e: 57 | raise Exception(f"Model yükleme hatası: {str(e)}") 58 | 59 | def refine( 60 | self, 61 | image: Image.Image, 62 | prompt: str, 63 | negative_prompt: str = "blurry, low quality, distorted", 64 | strength: float = 0.3, 65 | guidance_scale: float = 7.5, 66 | num_inference_steps: int = 50, 67 | seed: Optional[int] = None 68 | ) -> Image.Image: 69 | """Görseli geliştir""" 70 | 71 | if not self.pipeline: 72 | raise Exception("Model yüklenmedi") 73 | 74 | try: 75 | # Görseli uygun boyuta getir 76 | # SDXL için optimal boyutlar 77 | target_size = (1024, 1024) 78 | if image.size != target_size: 79 | image = image.resize(target_size, Image.Resampling.LANCZOS) 80 | 81 | # RGB formatına çevir 82 | if image.mode != 'RGB': 83 | image = image.convert('RGB') 84 | 85 | # Generator ayarla 86 | generator = None 87 | if seed is not None: 88 | generator = torch.Generator(device=self.device).manual_seed(seed) 89 | 90 | # Görseli geliştir 91 | with torch.inference_mode(): 92 | result = self.pipeline( 93 | prompt=prompt, 94 | negative_prompt=negative_prompt, 95 | image=image, 96 | strength=strength, 97 | guidance_scale=guidance_scale, 98 | num_inference_steps=num_inference_steps, 99 | generator=generator 100 | ) 101 | 102 | # Memory cleanup 103 | if self.device == "cuda": 104 | torch.cuda.empty_cache() 105 | gc.collect() 106 | 107 | return result.images[0] 108 | 109 | except Exception as e: 110 | # Memory cleanup on error 111 | if self.device == "cuda": 112 | torch.cuda.empty_cache() 113 | gc.collect() 114 | raise Exception(f"Görsel geliştirme hatası: {str(e)}") 115 | 116 | def cleanup(self): 117 | """Belleği temizle""" 118 | if self.pipeline: 119 | del self.pipeline 120 | self.pipeline = None 121 | 122 | if self.device == "cuda": 123 | torch.cuda.empty_cache() 124 | gc.collect() 125 | 126 | def __del__(self): 127 | """Destructor""" 128 | self.cleanup() -------------------------------------------------------------------------------- /model/image2video.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from diffusers import StableVideoDiffusionPipeline 3 | from PIL import Image 4 | import tempfile 5 | import os 6 | import gc 7 | from typing import Optional 8 | import imageio 9 | import numpy as np 10 | 11 | class Image2VideoGenerator: 12 | """Stable Video Diffusion modelini kullanarak görsellerden video oluşturan sınıf""" 13 | 14 | def __init__(self, hf_token: str): 15 | self.hf_token = hf_token 16 | self.model_id = "stabilityai/stable-video-diffusion-img2vid-xt" 17 | self.pipeline = None 18 | self.device = "cuda" if torch.cuda.is_available() else "cpu" 19 | self._load_model() 20 | 21 | def _load_model(self): 22 | """Modeli yükle""" 23 | try: 24 | self.pipeline = StableVideoDiffusionPipeline.from_pretrained( 25 | self.model_id, 26 | torch_dtype=torch.float16 if self.device == "cuda" else torch.float32, 27 | use_auth_token=self.hf_token, 28 | variant="fp16" if self.device == "cuda" else None 29 | ) 30 | 31 | if self.device == "cuda": 32 | self.pipeline = self.pipeline.to("cuda") 33 | # VRAM tasarrufu için CPU offloading 34 | self.pipeline.enable_model_cpu_offload() 35 | else: 36 | self.pipeline = self.pipeline.to("cpu") 37 | 38 | # Memory optimization 39 | self.pipeline.enable_attention_slicing() 40 | if hasattr(self.pipeline, 'enable_xformers_memory_efficient_attention'): 41 | try: 42 | self.pipeline.enable_xformers_memory_efficient_attention() 43 | except: 44 | pass 45 | 46 | print(f"Stable Video Diffusion model loaded on {self.device}") 47 | 48 | except Exception as e: 49 | raise Exception(f"Model yükleme hatası: {str(e)}") 50 | 51 | def generate( 52 | self, 53 | image: Image.Image, 54 | motion_bucket_id: int = 127, 55 | fps: int = 12, 56 | num_frames: int = 25, 57 | noise_aug_strength: float = 0.1, 58 | decode_chunk_size: int = 8, 59 | seed: Optional[int] = None 60 | ) -> str: 61 | """Görsellerden video oluştur""" 62 | 63 | if not self.pipeline: 64 | raise Exception("Model yüklenmedi") 65 | 66 | try: 67 | # Görseli uygun boyuta getir (SVD için 576x1024) 68 | target_size = (576, 1024) 69 | if image.size != target_size: 70 | # Aspect ratio'yu koruyarak resize 71 | image.thumbnail(target_size, Image.Resampling.LANCZOS) 72 | 73 | # Center crop veya padding ile tam boyuta getir 74 | new_image = Image.new('RGB', target_size, (0, 0, 0)) 75 | paste_x = (target_size[0] - image.size[0]) // 2 76 | paste_y = (target_size[1] - image.size[1]) // 2 77 | new_image.paste(image, (paste_x, paste_y)) 78 | image = new_image 79 | 80 | # RGB formatına çevir 81 | if image.mode != 'RGB': 82 | image = image.convert('RGB') 83 | 84 | # Generator ayarla 85 | generator = None 86 | if seed is not None: 87 | generator = torch.Generator(device=self.device).manual_seed(seed) 88 | 89 | # Video oluştur 90 | with torch.inference_mode(): 91 | result = self.pipeline( 92 | image=image, 93 | motion_bucket_id=motion_bucket_id, 94 | noise_aug_strength=noise_aug_strength, 95 | decode_chunk_size=decode_chunk_size, 96 | generator=generator, 97 | num_frames=num_frames 98 | ) 99 | 100 | # Video frames'lerini al 101 | frames = result.frames[0] # First batch 102 | 103 | # Temporary file oluştur 104 | temp_dir = tempfile.gettempdir() 105 | video_path = os.path.join(temp_dir, f"image_to_video_{hash(str(image.tobytes()))}.mp4") 106 | 107 | # Frames'leri video olarak kaydet 108 | self._save_video(frames, video_path, fps) 109 | 110 | # Memory cleanup 111 | if self.device == "cuda": 112 | torch.cuda.empty_cache() 113 | gc.collect() 114 | 115 | return video_path 116 | 117 | except Exception as e: 118 | # Memory cleanup on error 119 | if self.device == "cuda": 120 | torch.cuda.empty_cache() 121 | gc.collect() 122 | raise Exception(f"Video oluşturma hatası: {str(e)}") 123 | 124 | def _save_video(self, frames, output_path: str, fps: int): 125 | """Frames'leri video dosyası olarak kaydet""" 126 | try: 127 | # Frames'leri numpy array'e çevir 128 | video_frames = [] 129 | for frame in frames: 130 | if isinstance(frame, Image.Image): 131 | frame_array = np.array(frame) 132 | elif isinstance(frame, torch.Tensor): 133 | frame_array = frame.cpu().numpy() 134 | if frame_array.dtype != np.uint8: 135 | frame_array = (frame_array * 255).astype(np.uint8) 136 | else: 137 | frame_array = frame 138 | 139 | # Ensure correct shape (H, W, C) 140 | if len(frame_array.shape) == 4: # (1, H, W, C) 141 | frame_array = frame_array[0] 142 | elif len(frame_array.shape) == 3 and frame_array.shape[0] == 3: # (C, H, W) 143 | frame_array = frame_array.transpose(1, 2, 0) 144 | 145 | video_frames.append(frame_array) 146 | 147 | # Video'yu kaydet 148 | with imageio.get_writer(output_path, fps=fps, codec='libx264', quality=8) as writer: 149 | for frame in video_frames: 150 | writer.append_data(frame) 151 | 152 | except Exception as e: 153 | raise Exception(f"Video kaydetme hatası: {str(e)}") 154 | 155 | def cleanup(self): 156 | """Belleği temizle""" 157 | if self.pipeline: 158 | del self.pipeline 159 | self.pipeline = None 160 | 161 | if self.device == "cuda": 162 | torch.cuda.empty_cache() 163 | gc.collect() 164 | 165 | def __del__(self): 166 | """Destructor""" 167 | self.cleanup() -------------------------------------------------------------------------------- /model/text23d.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from diffusers import StableDiffusionPipeline 3 | from PIL import Image 4 | import tempfile 5 | import os 6 | import gc 7 | from typing import Optional 8 | import trimesh 9 | import numpy as np 10 | from transformers import pipeline 11 | 12 | class Text23DGenerator: 13 | """Stable Zero123 modelini kullanarak metinden 3D model oluşturan sınıf""" 14 | 15 | def __init__(self, hf_token: str): 16 | self.hf_token = hf_token 17 | self.model_id = "stabilityai/stable-zero123" 18 | self.text2img_pipeline = None 19 | self.zero123_pipeline = None 20 | self.device = "cuda" if torch.cuda.is_available() else "cpu" 21 | self._load_model() 22 | 23 | def _load_model(self): 24 | """Modeli yükle""" 25 | try: 26 | # İlk önce text-to-image pipeline (SDXL kullanarak) 27 | self.text2img_pipeline = StableDiffusionPipeline.from_pretrained( 28 | "stabilityai/stable-diffusion-xl-base-1.0", 29 | torch_dtype=torch.float16 if self.device == "cuda" else torch.float32, 30 | use_auth_token=self.hf_token, 31 | variant="fp16" if self.device == "cuda" else None 32 | ) 33 | 34 | # Zero123 pipeline için özel import (gerçek implementasyon için gerekli) 35 | try: 36 | from diffusers import StableZero123Pipeline 37 | self.zero123_pipeline = StableZero123Pipeline.from_pretrained( 38 | self.model_id, 39 | torch_dtype=torch.float16 if self.device == "cuda" else torch.float32, 40 | use_auth_token=self.hf_token 41 | ) 42 | except ImportError: 43 | print("Stable Zero123 pipeline bulunamadı, basit 3D generation kullanılacak") 44 | self.zero123_pipeline = self._create_fallback_pipeline() 45 | 46 | if self.device == "cuda": 47 | self.text2img_pipeline = self.text2img_pipeline.to("cuda") 48 | self.text2img_pipeline.enable_model_cpu_offload() 49 | 50 | if hasattr(self.zero123_pipeline, 'to'): 51 | self.zero123_pipeline = self.zero123_pipeline.to("cuda") 52 | self.zero123_pipeline.enable_model_cpu_offload() 53 | else: 54 | self.text2img_pipeline = self.text2img_pipeline.to("cpu") 55 | if hasattr(self.zero123_pipeline, 'to'): 56 | self.zero123_pipeline = self.zero123_pipeline.to("cpu") 57 | 58 | # Memory optimization 59 | self.text2img_pipeline.enable_attention_slicing() 60 | if hasattr(self.zero123_pipeline, 'enable_attention_slicing'): 61 | self.zero123_pipeline.enable_attention_slicing() 62 | 63 | print(f"Stable Zero123 model loaded on {self.device}") 64 | 65 | except Exception as e: 66 | print(f"Zero123 yükleme hatası, fallback kullanılacak: {str(e)}") 67 | # Fallback: sadece text2img 68 | try: 69 | self.text2img_pipeline = StableDiffusionPipeline.from_pretrained( 70 | "runwayml/stable-diffusion-v1-5", 71 | torch_dtype=torch.float16 if self.device == "cuda" else torch.float32, 72 | use_auth_token=self.hf_token 73 | ) 74 | if self.device == "cuda": 75 | self.text2img_pipeline = self.text2img_pipeline.to("cuda") 76 | else: 77 | self.text2img_pipeline = self.text2img_pipeline.to("cpu") 78 | except: 79 | pass 80 | 81 | self.zero123_pipeline = self._create_fallback_pipeline() 82 | 83 | def _create_fallback_pipeline(self): 84 | """Basit 3D generation için fallback pipeline""" 85 | class FallbackPipeline: 86 | def __call__(self, image, **kwargs): 87 | # Basit bir piramit mesh oluştur 88 | vertices = np.array([ 89 | [0, 0, 1], # top 90 | [-1, -1, 0], # base corners 91 | [1, -1, 0], 92 | [1, 1, 0], 93 | [-1, 1, 0] 94 | ]) 95 | 96 | faces = np.array([ 97 | [0, 1, 2], # side faces 98 | [0, 2, 3], 99 | [0, 3, 4], 100 | [0, 4, 1], 101 | [1, 4, 3], # base faces 102 | [1, 3, 2] 103 | ]) 104 | 105 | mesh = trimesh.Trimesh(vertices=vertices, faces=faces) 106 | return mesh 107 | 108 | return FallbackPipeline() 109 | 110 | def generate( 111 | self, 112 | prompt: str, 113 | num_views: int = 8, 114 | resolution: int = 512, 115 | optimization_steps: int = 1000, 116 | guidance_scale: float = 7.5, 117 | num_inference_steps: int = 50, 118 | seed: Optional[int] = None 119 | ) -> str: 120 | """Metinden 3D model oluştur""" 121 | 122 | if not self.text2img_pipeline: 123 | raise Exception("Model yüklenmedi") 124 | 125 | try: 126 | # Generator ayarla 127 | generator = None 128 | if seed is not None: 129 | generator = torch.Generator(device=self.device).manual_seed(seed) 130 | 131 | # 1. Adım: Metinden görsel oluştur 132 | with torch.inference_mode(): 133 | # Text-to-image generation 134 | image_result = self.text2img_pipeline( 135 | prompt=f"{prompt}, 3D render, clean background, centered object", 136 | width=resolution, 137 | height=resolution, 138 | guidance_scale=guidance_scale, 139 | num_inference_steps=num_inference_steps, 140 | generator=generator 141 | ) 142 | 143 | reference_image = image_result.images[0] 144 | 145 | # 2. Adım: Görseldan 3D model oluştur 146 | if hasattr(self.zero123_pipeline, '__call__') and hasattr(self.zero123_pipeline, 'to'): 147 | # Gerçek Zero123 pipeline 148 | with torch.inference_mode(): 149 | # Multi-view images oluştur 150 | views = [] 151 | for i in range(num_views): 152 | elevation = 0 # degrees 153 | azimuth = (360 / num_views) * i # degrees 154 | 155 | view_result = self.zero123_pipeline( 156 | image=reference_image, 157 | elevation=elevation, 158 | azimuth=azimuth, 159 | guidance_scale=guidance_scale, 160 | num_inference_steps=num_inference_steps 161 | ) 162 | views.append(view_result.images[0]) 163 | 164 | # Views'lerden 3D mesh oluştur (bu kısım gerçek implementasyonda 165 | # SDS (Score Distillation Sampling) veya NeRF kullanır) 166 | mesh = self._reconstruct_3d_from_views(views, reference_image) 167 | else: 168 | # Fallback: basit mesh 169 | mesh = self.zero123_pipeline(reference_image) 170 | 171 | # Mesh'i dosyaya kaydet 172 | temp_dir = tempfile.gettempdir() 173 | model_path = os.path.join(temp_dir, f"text_to_3d_model_{hash(prompt)}.glb") 174 | 175 | # GLB formatında kaydet 176 | if isinstance(mesh, trimesh.Trimesh): 177 | mesh.export(model_path) 178 | else: 179 | # Fallback: basit mesh kaydetme 180 | simple_mesh = trimesh.Trimesh( 181 | vertices=[[0, 0, 1], [-1, -1, 0], [1, -1, 0], [0, 1, 0]], 182 | faces=[[0, 1, 2], [0, 2, 3], [0, 3, 1], [1, 3, 2]] 183 | ) 184 | simple_mesh.export(model_path) 185 | 186 | # Memory cleanup 187 | if self.device == "cuda": 188 | torch.cuda.empty_cache() 189 | gc.collect() 190 | 191 | return model_path 192 | 193 | except Exception as e: 194 | # Memory cleanup on error 195 | if self.device == "cuda": 196 | torch.cuda.empty_cache() 197 | gc.collect() 198 | raise Exception(f"3D model oluşturma hatası: {str(e)}") 199 | 200 | def _reconstruct_3d_from_views(self, views, reference_image): 201 | """Multi-view görsellerden 3D mesh oluştur (basitleştirilmiş)""" 202 | # Bu gerçek bir implementasyon değil, sadece placeholder 203 | # Gerçek implementasyon SDS, NeRF veya photogrammetry kullanır 204 | 205 | # Basit bir küp mesh döndür 206 | vertices = np.array([ 207 | [-1, -1, -1], [1, -1, -1], [1, 1, -1], [-1, 1, -1], # bottom 208 | [-1, -1, 1], [1, -1, 1], [1, 1, 1], [-1, 1, 1] # top 209 | ]) 210 | 211 | faces = np.array([ 212 | [0, 1, 2], [0, 2, 3], # bottom 213 | [4, 7, 6], [4, 6, 5], # top 214 | [0, 4, 5], [0, 5, 1], # front 215 | [2, 6, 7], [2, 7, 3], # back 216 | [0, 3, 7], [0, 7, 4], # left 217 | [1, 5, 6], [1, 6, 2] # right 218 | ]) 219 | 220 | mesh = trimesh.Trimesh(vertices=vertices, faces=faces) 221 | return mesh 222 | 223 | def cleanup(self): 224 | """Belleği temizle""" 225 | if self.text2img_pipeline: 226 | del self.text2img_pipeline 227 | self.text2img_pipeline = None 228 | 229 | if self.zero123_pipeline: 230 | del self.zero123_pipeline 231 | self.zero123_pipeline = None 232 | 233 | if self.device == "cuda": 234 | torch.cuda.empty_cache() 235 | gc.collect() 236 | 237 | def __del__(self): 238 | """Destructor""" 239 | self.cleanup() -------------------------------------------------------------------------------- /model/text2image.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from diffusers import FluxPipeline 3 | from PIL import Image 4 | import gc 5 | from typing import Optional 6 | 7 | class Text2ImageGenerator: 8 | """FLUX.1-dev modelini kullanarak metinden görsel oluşturan sınıf""" 9 | 10 | def __init__(self, hf_token: str): 11 | self.hf_token = hf_token 12 | self.model_id = "black-forest-labs/FLUX.1-dev" 13 | self.pipeline = None 14 | self.device = "cuda" if torch.cuda.is_available() else "cpu" 15 | self._load_model() 16 | 17 | def _load_model(self): 18 | """Modeli yükle""" 19 | try: 20 | self.pipeline = FluxPipeline.from_pretrained( 21 | self.model_id, 22 | torch_dtype=torch.bfloat16 if self.device == "cuda" else torch.float32, 23 | use_auth_token=self.hf_token, 24 | variant="fp16" if self.device == "cuda" else None 25 | ) 26 | 27 | if self.device == "cuda": 28 | self.pipeline = self.pipeline.to("cuda") 29 | # VRAM tasarrufu için CPU offloading 30 | self.pipeline.enable_model_cpu_offload() 31 | else: 32 | self.pipeline = self.pipeline.to("cpu") 33 | 34 | # Memory optimization 35 | self.pipeline.enable_attention_slicing() 36 | if hasattr(self.pipeline, 'enable_xformers_memory_efficient_attention'): 37 | try: 38 | self.pipeline.enable_xformers_memory_efficient_attention() 39 | except: 40 | pass 41 | 42 | print(f"FLUX.1-dev model loaded on {self.device}") 43 | 44 | except Exception as e: 45 | raise Exception(f"Model yükleme hatası: {str(e)}") 46 | 47 | def generate( 48 | self, 49 | prompt: str, 50 | width: int = 1024, 51 | height: int = 1024, 52 | guidance_scale: float = 3.5, 53 | num_inference_steps: int = 50, 54 | max_sequence_length: int = 512, 55 | seed: Optional[int] = None 56 | ) -> Image.Image: 57 | """Metinden görsel oluştur""" 58 | 59 | if not self.pipeline: 60 | raise Exception("Model yüklenmedi") 61 | 62 | try: 63 | # Generator ayarla 64 | generator = None 65 | if seed is not None: 66 | generator = torch.Generator(device=self.device).manual_seed(seed) 67 | 68 | # Görsel oluştur 69 | with torch.inference_mode(): 70 | result = self.pipeline( 71 | prompt=prompt, 72 | width=width, 73 | height=height, 74 | guidance_scale=guidance_scale, 75 | num_inference_steps=num_inference_steps, 76 | max_sequence_length=max_sequence_length, 77 | generator=generator 78 | ) 79 | 80 | # Memory cleanup 81 | if self.device == "cuda": 82 | torch.cuda.empty_cache() 83 | gc.collect() 84 | 85 | return result.images[0] 86 | 87 | except Exception as e: 88 | # Memory cleanup on error 89 | if self.device == "cuda": 90 | torch.cuda.empty_cache() 91 | gc.collect() 92 | raise Exception(f"Görsel oluşturma hatası: {str(e)}") 93 | 94 | def cleanup(self): 95 | """Belleği temizle""" 96 | if self.pipeline: 97 | del self.pipeline 98 | self.pipeline = None 99 | 100 | if self.device == "cuda": 101 | torch.cuda.empty_cache() 102 | gc.collect() 103 | 104 | def __del__(self): 105 | """Destructor""" 106 | self.cleanup() -------------------------------------------------------------------------------- /model/text2video.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from diffusers import StableDiffusionPipeline, DiffusionPipeline 3 | from PIL import Image 4 | import tempfile 5 | import os 6 | import gc 7 | from typing import Optional, Tuple 8 | import imageio 9 | import numpy as np 10 | import cv2 11 | 12 | class Text2VideoGenerator: 13 | """Stable Diffusion kullanarak metinden video oluşturan sınıf (frame-by-frame)""" 14 | 15 | def __init__(self, hf_token: str): 16 | self.hf_token = hf_token 17 | self.model_id = "runwayml/stable-diffusion-v1-5" 18 | self.pipeline = None 19 | self.device = "cuda" if torch.cuda.is_available() else "cpu" 20 | self._load_model() 21 | 22 | def _load_model(self): 23 | """Modeli yükle""" 24 | try: 25 | # Stable Diffusion pipeline'ını yükle 26 | self.pipeline = StableDiffusionPipeline.from_pretrained( 27 | self.model_id, 28 | torch_dtype=torch.float16 if self.device == "cuda" else torch.float32, 29 | use_auth_token=self.hf_token, 30 | safety_checker=None, 31 | requires_safety_checker=False 32 | ) 33 | 34 | if self.device == "cuda": 35 | self.pipeline = self.pipeline.to("cuda") 36 | # VRAM tasarrufu için CPU offloading 37 | self.pipeline.enable_model_cpu_offload() 38 | else: 39 | self.pipeline = self.pipeline.to("cpu") 40 | 41 | # Memory optimization 42 | self.pipeline.enable_attention_slicing() 43 | if hasattr(self.pipeline, 'enable_xformers_memory_efficient_attention'): 44 | try: 45 | self.pipeline.enable_xformers_memory_efficient_attention() 46 | except: 47 | pass 48 | 49 | print(f"Stable Diffusion model loaded on {self.device}") 50 | 51 | except Exception as e: 52 | raise Exception(f"Model yükleme hatası: {str(e)}") 53 | 54 | def _parse_resolution(self, resolution: str) -> Tuple[int, int]: 55 | """Çözünürlük string'ini parse et""" 56 | try: 57 | width, height = map(int, resolution.split('x')) 58 | return width, height 59 | except: 60 | return 1216, 704 # Default resolution 61 | 62 | def generate( 63 | self, 64 | prompt: str, 65 | duration: int = 4, 66 | fps: int = 8, # Reduced for performance 67 | resolution: str = "512x512", # Reduced for performance 68 | guidance_scale: float = 7.5, 69 | num_inference_steps: int = 20, # Reduced for performance 70 | seed: Optional[int] = None 71 | ) -> str: 72 | """Metinden video oluştur (frame-by-frame)""" 73 | 74 | if not self.pipeline: 75 | raise Exception("Model yüklenmedi") 76 | 77 | try: 78 | # Çözünürlüğü parse et 79 | width, height = self._parse_resolution(resolution) 80 | 81 | # Frame sayısını hesapla (daha az frame için performans) 82 | num_frames = min(duration * fps, 32) # Max 32 frames 83 | 84 | # Generator ayarla 85 | generator = None 86 | if seed is not None: 87 | generator = torch.Generator(device=self.device).manual_seed(seed) 88 | 89 | frames = [] 90 | 91 | # Her frame için farklı prompt varyasyonları oluştur 92 | for i in range(num_frames): 93 | frame_prompt = f"{prompt}, frame {i+1}, cinematic, high quality" 94 | 95 | # Seed'i her frame için biraz değiştir 96 | frame_generator = None 97 | if seed is not None: 98 | frame_seed = seed + i 99 | frame_generator = torch.Generator(device=self.device).manual_seed(frame_seed) 100 | 101 | # Frame oluştur 102 | with torch.inference_mode(): 103 | result = self.pipeline( 104 | prompt=frame_prompt, 105 | width=width, 106 | height=height, 107 | guidance_scale=guidance_scale, 108 | num_inference_steps=num_inference_steps, 109 | generator=frame_generator 110 | ) 111 | 112 | frames.append(result.images[0]) 113 | 114 | # Memory cleanup her 5 frame'de bir 115 | if (i + 1) % 5 == 0 and self.device == "cuda": 116 | torch.cuda.empty_cache() 117 | 118 | # Temporary file oluştur 119 | temp_dir = tempfile.gettempdir() 120 | video_path = os.path.join(temp_dir, f"generated_video_{abs(hash(prompt))}.mp4") 121 | 122 | # Frames'leri video olarak kaydet 123 | self._save_video(frames, video_path, fps) 124 | 125 | # Memory cleanup 126 | if self.device == "cuda": 127 | torch.cuda.empty_cache() 128 | gc.collect() 129 | 130 | return video_path 131 | 132 | except Exception as e: 133 | # Memory cleanup on error 134 | if self.device == "cuda": 135 | torch.cuda.empty_cache() 136 | gc.collect() 137 | raise Exception(f"Video oluşturma hatası: {str(e)}") 138 | 139 | def _save_video(self, frames, output_path: str, fps: int): 140 | """Frames'leri video dosyası olarak kaydet""" 141 | try: 142 | # Frames'leri numpy array'e çevir 143 | if isinstance(frames[0], Image.Image): 144 | # PIL Images 145 | video_frames = [np.array(frame) for frame in frames] 146 | else: 147 | # Tensor frames 148 | video_frames = [] 149 | for frame in frames: 150 | if isinstance(frame, torch.Tensor): 151 | frame = frame.cpu().numpy() 152 | if frame.dtype != np.uint8: 153 | frame = (frame * 255).astype(np.uint8) 154 | video_frames.append(frame) 155 | 156 | # Video'yu kaydet 157 | with imageio.get_writer(output_path, fps=fps, codec='libx264') as writer: 158 | for frame in video_frames: 159 | writer.append_data(frame) 160 | 161 | except Exception as e: 162 | raise Exception(f"Video kaydetme hatası: {str(e)}") 163 | 164 | def cleanup(self): 165 | """Belleği temizle""" 166 | if self.pipeline: 167 | del self.pipeline 168 | self.pipeline = None 169 | 170 | if self.device == "cuda": 171 | torch.cuda.empty_cache() 172 | gc.collect() 173 | 174 | def __del__(self): 175 | """Destructor""" 176 | self.cleanup() -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # Core dependencies 2 | torch>=2.0.0 3 | torchvision>=0.15.0 4 | transformers>=4.35.0 5 | diffusers>=0.24.0 6 | accelerate>=0.24.0 7 | safetensors>=0.4.0 8 | 9 | # Image processing 10 | Pillow>=10.0.0 11 | opencv-python>=4.8.0 12 | numpy>=1.24.0 13 | scipy>=1.11.0 14 | 15 | # 3D processing 16 | trimesh>=4.0.0 17 | pymeshlab>=2022.2 18 | open3d>=0.17.0 19 | 20 | # Video processing 21 | imageio>=2.31.0 22 | imageio-ffmpeg>=0.4.8 23 | 24 | # Web interface 25 | streamlit>=1.28.0 26 | gradio>=4.0.0 27 | 28 | # Utilities 29 | requests>=2.31.0 30 | tqdm>=4.66.0 31 | matplotlib>=3.7.0 32 | seaborn>=0.12.0 33 | 34 | # Optional GPU acceleration 35 | # xformers>=0.0.22 # Uncomment for better performance 36 | # invisible-watermark>=0.2.0 # For SDXL -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Utils package for AI Studio -------------------------------------------------------------------------------- /utils/helpers.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import tempfile 3 | import os 4 | from PIL import Image 5 | import base64 6 | from pathlib import Path 7 | import trimesh 8 | import plotly.graph_objects as go 9 | import numpy as np 10 | 11 | def save_uploaded_file(uploaded_file) -> str: 12 | """Yüklenen dosyayı geçici dizine kaydet""" 13 | try: 14 | temp_dir = tempfile.gettempdir() 15 | file_path = os.path.join(temp_dir, uploaded_file.name) 16 | 17 | with open(file_path, "wb") as f: 18 | f.write(uploaded_file.getbuffer()) 19 | 20 | return file_path 21 | except Exception as e: 22 | st.error(f"Dosya kaydetme hatası: {str(e)}") 23 | return None 24 | 25 | def display_3d_model(model_path: str): 26 | """3D modeli Streamlit'te görüntüle""" 27 | try: 28 | if not os.path.exists(model_path): 29 | st.error("3D model dosyası bulunamadı") 30 | return 31 | 32 | # Dosya uzantısını kontrol et 33 | file_ext = Path(model_path).suffix.lower() 34 | 35 | if file_ext in ['.glb', '.gltf']: 36 | # GLB/GLTF dosyalarını doğrudan göster 37 | with open(model_path, 'rb') as f: 38 | model_data = f.read() 39 | 40 | # Base64 encode 41 | model_b64 = base64.b64encode(model_data).decode() 42 | 43 | # HTML viewer 44 | html_code = f""" 45 |
46 | 53 | 54 |
55 | 56 | """ 57 | 58 | st.components.v1.html(html_code, height=520) 59 | 60 | elif file_ext in ['.obj', '.ply', '.stl']: 61 | # Diğer formatları trimesh ile yükle ve plotly ile göster 62 | try: 63 | mesh = trimesh.load(model_path) 64 | display_mesh_with_plotly(mesh) 65 | except Exception as e: 66 | st.error(f"3D model yükleme hatası: {str(e)}") 67 | # Fallback: dosya indirme linki 68 | with open(model_path, 'rb') as f: 69 | st.download_button( 70 | label="📥 3D Modeli İndir", 71 | data=f.read(), 72 | file_name=os.path.basename(model_path), 73 | mime="application/octet-stream" 74 | ) 75 | else: 76 | st.warning(f"Desteklenmeyen dosya formatı: {file_ext}") 77 | # Fallback: dosya indirme linki 78 | with open(model_path, 'rb') as f: 79 | st.download_button( 80 | label="📥 3D Modeli İndir", 81 | data=f.read(), 82 | file_name=os.path.basename(model_path), 83 | mime="application/octet-stream" 84 | ) 85 | 86 | except Exception as e: 87 | st.error(f"3D model görüntüleme hatası: {str(e)}") 88 | # Fallback: dosya indirme linki 89 | try: 90 | with open(model_path, 'rb') as f: 91 | st.download_button( 92 | label="📥 3D Modeli İndir", 93 | data=f.read(), 94 | file_name=os.path.basename(model_path), 95 | mime="application/octet-stream" 96 | ) 97 | except: 98 | pass 99 | 100 | def display_mesh_with_plotly(mesh): 101 | """Trimesh objesini Plotly ile görüntüle""" 102 | try: 103 | # Mesh verilerini al 104 | vertices = mesh.vertices 105 | faces = mesh.faces 106 | 107 | # Plotly 3D mesh oluştur 108 | fig = go.Figure(data=[ 109 | go.Mesh3d( 110 | x=vertices[:, 0], 111 | y=vertices[:, 1], 112 | z=vertices[:, 2], 113 | i=faces[:, 0], 114 | j=faces[:, 1], 115 | k=faces[:, 2], 116 | opacity=0.8, 117 | color='lightblue', 118 | flatshading=True 119 | ) 120 | ]) 121 | 122 | # Layout ayarları 123 | fig.update_layout( 124 | title="3D Model", 125 | scene=dict( 126 | xaxis_title='X', 127 | yaxis_title='Y', 128 | zaxis_title='Z', 129 | camera=dict( 130 | eye=dict(x=1.5, y=1.5, z=1.5) 131 | ) 132 | ), 133 | width=700, 134 | height=500, 135 | margin=dict(l=0, r=0, b=0, t=40) 136 | ) 137 | 138 | st.plotly_chart(fig, use_container_width=True) 139 | 140 | except Exception as e: 141 | st.error(f"Plotly görüntüleme hatası: {str(e)}") 142 | 143 | def get_device_info(): 144 | """Cihaz bilgilerini al""" 145 | import torch 146 | 147 | device_info = { 148 | "device": "cuda" if torch.cuda.is_available() else "cpu", 149 | "cuda_available": torch.cuda.is_available(), 150 | "cuda_device_count": torch.cuda.device_count() if torch.cuda.is_available() else 0 151 | } 152 | 153 | if torch.cuda.is_available(): 154 | device_info["cuda_device_name"] = torch.cuda.get_device_name(0) 155 | device_info["cuda_memory_total"] = torch.cuda.get_device_properties(0).total_memory 156 | device_info["cuda_memory_allocated"] = torch.cuda.memory_allocated(0) 157 | device_info["cuda_memory_cached"] = torch.cuda.memory_reserved(0) 158 | 159 | return device_info 160 | 161 | def format_bytes(bytes_value): 162 | """Byte değerini okunabilir formata çevir""" 163 | for unit in ['B', 'KB', 'MB', 'GB', 'TB']: 164 | if bytes_value < 1024.0: 165 | return f"{bytes_value:.1f} {unit}" 166 | bytes_value /= 1024.0 167 | return f"{bytes_value:.1f} PB" 168 | 169 | def create_progress_bar(current, total, description="İşlem"): 170 | """Progress bar oluştur""" 171 | progress = current / total if total > 0 else 0 172 | st.progress(progress, text=f"{description}: {current}/{total} ({progress*100:.1f}%)") 173 | 174 | def validate_image(image: Image.Image, max_size=(2048, 2048), min_size=(64, 64)): 175 | """Görsel validasyonu""" 176 | errors = [] 177 | 178 | # Boyut kontrolü 179 | if image.size[0] > max_size[0] or image.size[1] > max_size[1]: 180 | errors.append(f"Görsel çok büyük. Maksimum boyut: {max_size[0]}x{max_size[1]}") 181 | 182 | if image.size[0] < min_size[0] or image.size[1] < min_size[1]: 183 | errors.append(f"Görsel çok küçük. Minimum boyut: {min_size[0]}x{min_size[1]}") 184 | 185 | # Format kontrolü 186 | if image.mode not in ['RGB', 'RGBA', 'L']: 187 | errors.append(f"Desteklenmeyen görsel formatı: {image.mode}") 188 | 189 | return errors 190 | 191 | def cleanup_temp_files(max_age_hours=24): 192 | """Eski geçici dosyaları temizle""" 193 | import time 194 | 195 | temp_dir = tempfile.gettempdir() 196 | current_time = time.time() 197 | max_age_seconds = max_age_hours * 3600 198 | 199 | cleaned_count = 0 200 | 201 | try: 202 | for filename in os.listdir(temp_dir): 203 | if filename.startswith(('generated_', 'text_to_', 'image_to_')): 204 | file_path = os.path.join(temp_dir, filename) 205 | if os.path.isfile(file_path): 206 | file_age = current_time - os.path.getmtime(file_path) 207 | if file_age > max_age_seconds: 208 | try: 209 | os.remove(file_path) 210 | cleaned_count += 1 211 | except: 212 | pass 213 | except: 214 | pass 215 | 216 | return cleaned_count 217 | 218 | def get_model_status(): 219 | """Model durumlarını kontrol et""" 220 | status = { 221 | "text2image": "models_initialized" in st.session_state and hasattr(st.session_state, 'text2image'), 222 | "image2image": "models_initialized" in st.session_state and hasattr(st.session_state, 'image2image'), 223 | "text2video": "models_initialized" in st.session_state and hasattr(st.session_state, 'text2video'), 224 | "image2video": "models_initialized" in st.session_state and hasattr(st.session_state, 'image2video'), 225 | "image23d": "models_initialized" in st.session_state and hasattr(st.session_state, 'image23d'), 226 | "text23d": "models_initialized" in st.session_state and hasattr(st.session_state, 'text23d') 227 | } 228 | 229 | return status --------------------------------------------------------------------------------