├── stslib ├── .gitignore ├── __init__.py ├── tool.py └── cfg.py ├── version.json ├── run.bat ├── requirements.txt ├── static ├── images │ ├── wx.png │ └── alipay.png └── layui │ └── font │ ├── iconfont.eot │ ├── iconfont.ttf │ ├── iconfont.woff │ └── iconfont.woff2 ├── .gitignore ├── .dockerignore ├── testcuda.py ├── set.ini ├── Dockerfile ├── test.py ├── README.md ├── docs ├── en │ └── README_EN.md └── pt │ └── README_pt-BR.md ├── start.py ├── templates └── index.html └── LICENSE /stslib/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | __pycache__/* -------------------------------------------------------------------------------- /stslib/__init__.py: -------------------------------------------------------------------------------- 1 | VERSION=100 2 | version_str="v0.1" -------------------------------------------------------------------------------- /version.json: -------------------------------------------------------------------------------- 1 | { 2 | "version":"v0.0.94", 3 | "version_num":94 4 | } -------------------------------------------------------------------------------- /run.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | call %cd%\\venv\\scripts\\python.exe %cd%\\start.py 3 | 4 | pause -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch==2.1.2 2 | flask 3 | requests 4 | gevent 5 | faster-whisper 6 | fsspec -------------------------------------------------------------------------------- /static/images/wx.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iHunterDev/stt/main/static/images/wx.png -------------------------------------------------------------------------------- /static/images/alipay.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iHunterDev/stt/main/static/images/alipay.png -------------------------------------------------------------------------------- /static/layui/font/iconfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iHunterDev/stt/main/static/layui/font/iconfont.eot -------------------------------------------------------------------------------- /static/layui/font/iconfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iHunterDev/stt/main/static/layui/font/iconfont.ttf -------------------------------------------------------------------------------- /static/layui/font/iconfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iHunterDev/stt/main/static/layui/font/iconfont.woff -------------------------------------------------------------------------------- /static/layui/font/iconfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iHunterDev/stt/main/static/layui/font/iconfont.woff2 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | venv 2 | .idea 3 | build 4 | dist 5 | models/ 6 | *.log 7 | *.wav 8 | *.mp3 9 | *.spec 10 | *.exe 11 | *.zip 12 | ffmpeg 13 | static/tmp 14 | static/file 15 | stslib/__pycache__ 16 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | .github 3 | __pycache__ 4 | *.py[cod] 5 | *.log 6 | *.egg-info 7 | build 8 | dist 9 | venv 10 | .venv 11 | env 12 | .env 13 | models 14 | static/tmp 15 | .DS_Store 16 | docs 17 | files 18 | run.bat 19 | test.py 20 | testcuda.py 21 | -------------------------------------------------------------------------------- /testcuda.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.backends import cudnn 3 | 4 | if torch.cuda.is_available(): 5 | 6 | if cudnn.is_available() and cudnn.is_acceptable(torch.tensor(1.).cuda()): 7 | print('cuda和cudnn 可用') 8 | print('如果实际使用仍提示 cuda 相关错误,请尝试升级显卡驱动\n将 set.init 中 devtype=cpu改为devtype=cuda') 9 | else: 10 | print('cuda可用但cudnn不可用,cuda11.x请安装cudnn8,cuda12.x请安装cudnn9') 11 | 12 | else: 13 | print("当前计算机CUDA不可用") 14 | 15 | input("\n回车关闭") -------------------------------------------------------------------------------- /set.ini: -------------------------------------------------------------------------------- 1 | ; after update set , please restart the app 2 | ; ip:port 3 | web_address=127.0.0.1:9977 4 | ;en or zh 5 | lang=zh 6 | ; cpu or cuda 7 | devtype=cpu 8 | 9 | ;Reducing these two numbers will use less graphics memory 10 | beam_size=5 11 | best_of=5 12 | ;vad set to false to use less GPU memory, true to use more 13 | vad=true 14 | ;0 means less GPU memory usage, higher values mean more 15 | temperature=0 16 | ;false means less GPU memory usage, true means more 17 | condition_on_previous_text=false 18 | initial_prompt_zh= 19 | model_list=tiny,tiny.en,base,base.en,small,small.en,medium,medium.en,large-v1,large-v2,large-v3,large-v3-turbo,distil-whisper-small.en,distil-whisper-medium.en,distil-whisper-large-v2,distil-whisper-large-v3,zh-plus/faster-whisper-large-v2-japanese-5k-steps 20 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10-slim 2 | 3 | ENV PYTHONDONTWRITEBYTECODE=1 \ 4 | PYTHONUNBUFFERED=1 \ 5 | PIP_NO_CACHE_DIR=1 \ 6 | STS_WEB_ADDRESS=0.0.0.0:9977 \ 7 | VIRTUAL_ENV=/opt/venv 8 | 9 | ENV PATH="${VIRTUAL_ENV}/bin:${PATH}" 10 | 11 | WORKDIR /app 12 | 13 | RUN apt-get update \ 14 | && apt-get install -y --no-install-recommends ffmpeg tini \ 15 | && apt-get clean \ 16 | && rm -rf /var/lib/apt/lists/* 17 | 18 | COPY requirements.txt . 19 | RUN python -m venv "${VIRTUAL_ENV}" \ 20 | && "${VIRTUAL_ENV}/bin/pip" install --no-cache-dir --upgrade pip \ 21 | && "${VIRTUAL_ENV}/bin/pip" install --no-cache-dir -r requirements.txt \ 22 | && rm -rf /root/.cache/pip 23 | 24 | COPY . . 25 | 26 | EXPOSE 9977 27 | 28 | ENTRYPOINT ["/usr/bin/tini", "--"] 29 | CMD ["python", "start.py"] 30 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | # api 请求示例 2 | import os 3 | import requests 4 | 5 | # 请求地址 6 | url = "http://127.0.0.1:9977/api" 7 | # 请求参数 file:音视频文件,language:语言代码,model:模型,response_format:text|json|srt 8 | # 返回 code==0 成功,其他失败,msg==成功为ok,其他失败原因,data=识别后返回文字 9 | files = {"file": open("C:\\Users\\c1\\Videos\\2.wav", "rb")} 10 | data={"language":"zh","model":"base","response_format":"json"} 11 | response = requests.request("POST", url, timeout=600, data=data,files=files) 12 | print(response.json()) 13 | ''' 14 | response 15 | {'code': 0, 'data': [{'end_time': '00:00:16,000', 'line': 1, 'start_time': '00:00:00,000', 'text': '在后面的做,本期我们介绍电磁罚的公园里'}, {'end_ 16 | time': '00:00:19,000', 'line': 2, 'start_time': '00:00:16,000', 'text': '首先我们拿到的是一款电磁罚'}, {'end_time': '00:00:25,000', 'line': 3, 'sta 17 | rt_time': '00:00:19,000', 'text': '这上面有三个孔,这里有两个孔'}, {'end_time': '00:00:32,000', 'line': 4, 'start_time': '00:00:25,000', 'text': '这 18 | 里有土,带看一下,A、B,下面是RPS带看下'}], 'msg': 'ok'} 19 | ''' 20 | 21 | -------------------------------------------------------------------------------- /stslib/tool.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import sys 3 | import webbrowser 4 | from datetime import timedelta 5 | 6 | import requests 7 | import stslib 8 | from stslib import cfg 9 | 10 | def runffmpeg(arg): 11 | cmd = ["ffmpeg","-hide_banner","-y"] 12 | # if cfg.devtype =='cuda': 13 | # cmd.extend(["-hwaccel", "cuda","-hwaccel_output_format","cuda"]) 14 | cmd = cmd + arg 15 | p = subprocess.Popen(cmd, 16 | stdout=subprocess.PIPE, 17 | stderr=subprocess.PIPE, 18 | creationflags=0 if sys.platform != 'win32' else subprocess.CREATE_NO_WINDOW) 19 | while True: 20 | try: 21 | #等待0.1未结束则异常 22 | outs, errs = p.communicate(timeout=0.5) 23 | errs=str(errs) 24 | if errs: 25 | errs = errs.replace('\\\\','\\').replace('\r',' ').replace('\n',' ') 26 | errs=errs[errs.find("Error"):] 27 | # 成功 28 | if p.returncode==0: 29 | return "ok" 30 | # 失败 31 | # if cfg.devtype=='cuda': 32 | # errs+="[error] Please try upgrading the graphics card driver and reconfigure CUDA" 33 | return errs 34 | except subprocess.TimeoutExpired as e: 35 | # 如果前台要求停止 36 | pass 37 | except Exception as e: 38 | #出错异常 39 | errs=f"[error]ffmpeg:error {cmd=},\n{str(e)}" 40 | return errs 41 | def checkupdate(): 42 | try: 43 | res=requests.get("https://raw.githubusercontent.com/jianchang512/sts/main/version.json") 44 | if res.status_code==200: 45 | d=res.json() 46 | if d['version_num']>stslib.VERSION: 47 | cfg.updatetips=f'New version {d["version"]}' 48 | except Exception as e: 49 | pass 50 | 51 | def openweb(web_address): 52 | try: 53 | webbrowser.open("http://"+web_address) 54 | print(f"\n{cfg.transobj['lang8']} http://{web_address}") 55 | except Exception: 56 | pass 57 | 58 | def ms_to_time_string(*, ms=0, seconds=None): 59 | # 计算小时、分钟、秒和毫秒 60 | if seconds is None: 61 | td = timedelta(milliseconds=ms) 62 | else: 63 | td = timedelta(seconds=seconds) 64 | hours, remainder = divmod(td.seconds, 3600) 65 | minutes, seconds = divmod(remainder, 60) 66 | milliseconds = td.microseconds // 1000 67 | 68 | # 格式化为字符串 69 | time_string = f"{hours:02d}:{minutes:02d}:{seconds:02d},{milliseconds:03d}" 70 | 71 | return time_string -------------------------------------------------------------------------------- /stslib/cfg.py: -------------------------------------------------------------------------------- 1 | import locale 2 | import os 3 | import sys 4 | import torch 5 | import re 6 | ROOT_DIR = os.getcwd() 7 | 8 | def parse_ini(file=os.path.join(ROOT_DIR,'set.ini')): 9 | 10 | lang="zh" 11 | try: 12 | lang="en" if locale.getdefaultlocale()[0].split('_')[0].lower() != 'zh' else "zh" 13 | except: 14 | lang="zh" 15 | sets={ 16 | "web_address":"127.0.0.1:9977", 17 | "lang":lang, 18 | "devtype":"cpu", 19 | "cuda_com_type":"float32", 20 | "beam_size":5, 21 | "best_of":5, 22 | "vad":True, 23 | "temperature":0, 24 | "condition_on_previous_text":False, 25 | "initial_prompt_zh":"转录为中文简体。" 26 | 27 | } 28 | if not os.path.exists(file): 29 | return sets 30 | with open(file, 'r', encoding='utf-8') as f: 31 | for line in f.readlines(): 32 | if not line.strip() or line.strip().startswith(";") : 33 | continue 34 | line=[ x.strip() for x in line.strip().split('=', maxsplit=1)] 35 | if len(line)!=2: 36 | continue 37 | if line[1]=='false': 38 | sets[line[0]] = False 39 | elif line[1]=='true': 40 | sets[line[0]] = True 41 | elif re.match(r'^\d+$', line[1]): 42 | sets[line[0]]=int(line[1]) 43 | elif line[1].find(',')>0: 44 | sets[line[0]]=line[1].split(',') 45 | elif line[1]: 46 | sets[line[0]]=str(line[1]).lower() 47 | return sets 48 | 49 | sets=parse_ini() 50 | 51 | web_address=os.getenv('STS_WEB_ADDRESS', sets.get('web_address')) 52 | LANG=os.getenv('STS_LANG', sets.get('lang','zh') or 'zh') 53 | if LANG=='zh': 54 | os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com' 55 | devtype=os.getenv('STS_DEVTYPE', sets.get('devtype')) 56 | cuda_com_type=os.getenv('STS_CUDA_COM_TYPE', sets.get('cuda_com_type')) 57 | FILE_BASE_DIR=os.path.abspath('/app/lazycatpan') 58 | 59 | 60 | 61 | MODEL_DIR = os.path.join(ROOT_DIR, 'models') 62 | STATIC_DIR = os.path.join(ROOT_DIR, 'static') 63 | TMP_DIR = os.path.join(STATIC_DIR, 'tmp') 64 | 65 | progressbar={} 66 | progressresult={} 67 | 68 | 69 | if not os.path.exists(TMP_DIR): 70 | os.makedirs(TMP_DIR, 0o777, exist_ok=True) 71 | if not os.path.exists(MODEL_DIR): 72 | os.makedirs(MODEL_DIR, 0o777, exist_ok=True) 73 | if not os.path.exists(STATIC_DIR): 74 | os.makedirs(STATIC_DIR, 0o777, exist_ok=True) 75 | 76 | if sys.platform == 'win32': 77 | os.environ['PATH'] = f'{ROOT_DIR};{ROOT_DIR}\\ffmpeg;' + os.environ['PATH'] 78 | else: 79 | os.environ['PATH'] = f'{ROOT_DIR}:{ROOT_DIR}/ffmpeg:' + os.environ['PATH'] 80 | language_code_list={ 81 | "zh":{ 82 | "中文": ['zh'], 83 | "英语": ['en'], 84 | "法语": ['fr'], 85 | "德语": ['de'], 86 | "日语": ['ja'], 87 | "韩语": ['ko'], 88 | "俄语": ['ru'], 89 | "西班牙语": ['es'], 90 | "泰国语": ['th'], 91 | "意大利语": ['it'], 92 | "葡萄牙语": ['pt'], 93 | "越南语": ['vi'], 94 | "阿拉伯语": ['ar'], 95 | "土耳其语": ['tr'], 96 | "匈牙利": ['hu'], 97 | "自动检测":['auto'] 98 | }, 99 | "en":{ 100 | "Chinese": ['zh'], 101 | "English": ['en'], 102 | "French": ['fr'], 103 | "German": ['de'], 104 | "Japanese": ['ja'], 105 | "Korean": ['ko'], 106 | "Russian": ['ru'], 107 | "Spanish": ['es'], 108 | "Thai": ['th'], 109 | "Italian": ['it'], 110 | "Portuguese": ['pt'], 111 | "Vietnamese": ['vi'], 112 | "Arabic": ['ar'], 113 | "Turkish": ['tr'], 114 | "Hungarian": ['hu'], 115 | "Automatic Detection":['auto'] 116 | } 117 | } 118 | 119 | langlist = { 120 | "zh": { 121 | "lang1": "上传成功", 122 | "lang2": "上传失败", 123 | "lang3": "上传失败:不允许上传该格式", 124 | "lang4": "模型文件不存在,请下载后放到 models 目录下", 125 | "lang5": "文件不存在", 126 | "lang6": "识别成功", 127 | "lang7": "识别失败", 128 | "lang8": "浏览器已打开,若未能自动打开,请手动打开网址 ", 129 | "lang9":"已转为wav格式" 130 | }, 131 | "en": { 132 | "lang1": "Upload successful", 133 | "lang2": "Upload failed", 134 | "lang3": "Upload failed: Uploading this format is not allowed", 135 | "lang4": "Model file does not exist,download and save to models folder", 136 | "lang5": "File does not exist", 137 | "lang6": "recognition successful", 138 | "lang7": "recognition failed", 139 | "lang8": "The browser is open. If it does not open automatically, please open the URL manually", 140 | "lang9":"Converted to wav" 141 | } 142 | } 143 | updatetips = "" 144 | transobj = langlist[LANG] 145 | lang_code=language_code_list[LANG] 146 | 147 | TASK_QUEUE= [] 148 | 149 | MODEL_DICT={} 150 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | 3 | **中文简体** | [English](./docs/en/README_EN.md) 4 | 5 |
6 | 7 | --- 8 | 9 |
10 | 11 | [👑 捐助本项目](https://pyvideotrans.com/about) 12 | 13 |
14 | 15 | --- 16 | 17 | 18 | # 语音识别转文字工具 19 | 20 | 这是一个离线运行的本地语音识别转文字工具,基于 fast-whipser 开源模型,可将视频/音频中的人类声音识别并转为文字,可输出json格式、srt字幕带时间戳格式、纯文字格式。可用于自行部署后替代 openai 的语音识别接口或百度语音识别等,准确率基本等同openai官方api接口。 21 | 22 | 23 | fast-whisper 开源模型有 tiny/base/small/medium/large-v3, 内置 tiny 模型,tiny->large-v3识别效果越来越好,但所需计算机资源也更多,根据需要可自行下载后解压到 models 目录下即可。 24 | 25 | 26 | 27 | # 视频演示 28 | 29 | 30 | https://github.com/jianchang512/stt/assets/3378335/d716acb6-c20c-4174-9620-f574a7ff095d 31 | 32 | 33 | ![image](https://github.com/jianchang512/stt/assets/3378335/0f724ff1-21b3-4960-b6ba-5aa994ea414c) 34 | 35 | 36 | 37 | 38 | # 预编译Win版使用方法/Linux和Mac源码部署 39 | 40 | 1. [点击此处打开Releases页面下载](https://github.com/jianchang512/stt/releases)预编译文件 41 | 42 | 2. 下载后解压到某处,比如 E:/stt 43 | 44 | 3. 双击 start.exe ,等待自动打开浏览器窗口即可 45 | 46 | 4. 点击页面中的上传区域,在弹窗中找到想识别的音频或视频文件,或直接拖拽音频视频文件到上传区域,然后选择发生语言、文本输出格式、所用模型,点击“立即开始识别”,稍等片刻,底部文本框中会以所选格式显示识别结果 47 | 48 | 5. 如果机器拥有英伟达GPU,并正确配置了CUDA环境,将自动使用CUDA加速 49 | 50 | 51 | # 源码部署(Linux/Mac/Window) 52 | 53 | 0. 要求 python 3.9->3.11 54 | 55 | 1. 创建空目录,比如 E:/stt, 在这个目录下打开 cmd 窗口,方法是地址栏中输入 `cmd`, 然后回车。 56 | 57 | 使用git拉取源码到当前目录 ` git clone git@github.com:jianchang512/stt.git . ` 58 | 59 | 2. 创建虚拟环境 `python -m venv venv` 60 | 61 | 3. 激活环境,win下命令 `%cd%/venv/scripts/activate`,linux和Mac下命令 `source ./venv/bin/activate` 62 | 63 | 4. 安装依赖: `pip install -r requirements.txt`,如果报版本冲突错误,请执行 `pip install -r requirements.txt --no-deps` ,如果希望支持cuda加速,继续执行代码 `pip uninstall -y torch`, `pip install torch --index-url https://download.pytorch.org/whl/cu121` 64 | 65 | 5. win下解压 ffmpeg.7z,将其中的`ffmpeg.exe`和`ffprobe.exe`放在项目目录下, linux和mac 自行搜索 如何安装ffmpeg 66 | 67 | 6. [下载模型压缩包](https://github.com/jianchang512/stt/releases/tag/0.0),根据需要下载模型,下载后将压缩包里的文件夹放到项目根目录的 models 文件夹内 68 | 69 | 7. 执行 `python start.py `,等待自动打开本地浏览器窗口。 70 | 71 | 72 | # Docker 运行 73 | 74 | 如果希望在容器中运行,可使用仓库内提供的 `Dockerfile` 构建镜像(默认仅启用 CPU 推理): 75 | 76 | ```bash 77 | # 构建镜像 78 | docker build -t stt-local . 79 | 80 | # 运行容器 81 | docker run --rm -it \ 82 | -p 9977:9977 \ 83 | -e STS_WEB_ADDRESS=0.0.0.0:9977 \ 84 | -v "$(pwd)/models:/app/models" \ 85 | -v "$(pwd)/static/tmp:/app/static/tmp" \ 86 | -v "$(pwd)/set.ini:/app/set.ini" \ 87 | stt-local 88 | ``` 89 | 90 | - `STS_WEB_ADDRESS` 环境变量会覆盖 `set.ini` 中的 `web_address`,让服务监听 `0.0.0.0` 以便宿主机访问。 91 | - 将 `models` 映射到宿主机可以复用/缓存下载好的 Whisper 模型,避免重复下载。 92 | - 如需调整其他配置(例如 `devtype` 或语言),可继续传入 `STS_DEVTYPE=cpu|cuda`、`STS_LANG=zh|en` 等环境变量,或直接替换/挂载 `set.ini`。 93 | - 如果要使用 GPU,需要自定义基础镜像并在容器中正确安装 CUDA/torch,对应步骤可参照源码部署部分。 94 | 95 | 96 | # 通过路径导入本地文件 97 | 98 | 如果语音/视频文件已经存在于运行服务的机器上,可以在打开网页时通过查询参数传递路径,页面会自动导入该文件并转为 wav。所有相对路径都会基于固定目录 `/app/lazycatpan` 解析: 99 | 100 | ``` 101 | # 假设文件位于 /app/lazycatpan/demo.mp4 102 | http://127.0.0.1:9977/?file=demo.mp4 103 | ``` 104 | 105 | - 路径必须是服务端可访问的绝对路径,且当前用户对其拥有读取权限。若传入相对路径,将自动拼接到 `/app/lazycatpan` 下;若传入绝对路径,则要求其位于 `/app/lazycatpan` 目录范围内。 106 | - 页面加载后会自动调用后台接口 `/upload_local`,和手动上传的流程一致,可直接点击“立即识别”开始转写。 107 | - 也可以自行调用接口:`POST /upload_local`(JSON 或表单均可),字段为 `file_path=demo.mp4` 或 `/app/lazycatpan/demo.mp4`,接口会自动解析并返回临时 wav 文件名。 108 | 109 | 110 | # Api接口 111 | 112 | 接口地址: http://127.0.0.1:9977/api 113 | 114 | 请求方法: POST 115 | 116 | 请求参数: 117 | 118 | language: 语言代码:可选如下 119 | 120 | > 121 | > 中文:zh 122 | > 英语:en 123 | > 法语:fr 124 | > 德语:de 125 | > 日语:ja 126 | > 韩语:ko 127 | > 俄语:ru 128 | > 西班牙语:es 129 | > 泰国语:th 130 | > 意大利语:it 131 | > 葡萄牙语:pt 132 | > 越南语:vi 133 | > 阿拉伯语:ar 134 | > 土耳其语:tr 135 | > 136 | 137 | model: 模型名称,可选如下 138 | > 139 | > base 对应于 models/models--Systran--faster-whisper-base 140 | > small 对应于 models/models--Systran--faster-whisper-small 141 | > medium 对应于 models/models--Systran--faster-whisper-medium 142 | > large-v3 对应于 models/models--Systran--faster-whisper-large-v3 143 | > 144 | 145 | response_format: 返回的字幕格式,可选 text|json|srt 146 | 147 | file: 音视频文件,二进制上传 148 | 149 | Api 请求示例 150 | 151 | ```python 152 | import requests 153 | # 请求地址 154 | url = "http://127.0.0.1:9977/api" 155 | # 请求参数 file:音视频文件,language:语言代码,model:模型,response_format:text|json|srt 156 | # 返回 code==0 成功,其他失败,msg==成功为ok,其他失败原因,data=识别后返回文字 157 | files = {"file": open("C:/Users/c1/Videos/2.wav", "rb")} 158 | data={"language":"zh","model":"base","response_format":"json"} 159 | response = requests.request("POST", url, timeout=600, data=data,files=files) 160 | print(response.json()) 161 | ``` 162 | 163 | # 兼容 openai 语音转文字接口 164 | 165 | 示例代码 166 | ``` 167 | # openai兼容格式 168 | from openai import OpenAI 169 | 170 | client = OpenAI(api_key='123',base_url='http://127.0.0.1:9977/v1') 171 | audio_file= open("/users/c1/videos/60.wav", "rb") 172 | 173 | transcription = client.audio.transcriptions.create( 174 | model="tiny", 175 | file=audio_file, 176 | response_format="text" # 支持 text 、srt 格式,json格式会返回srt字幕解析后的json数据 177 | ) 178 | 179 | print(transcription.text) 180 | 181 | ``` 182 | 183 | 184 | # CUDA 加速支持 185 | 186 | **安装CUDA工具** [详细安装方法](https://juejin.cn/post/7318704408727519270) 187 | 188 | 如果你的电脑拥有 Nvidia 显卡,先升级显卡驱动到最新,然后去安装对应的 189 | [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads) 和 [cudnn for CUDA11.X](https://developer.nvidia.com/rdp/cudnn-archive)。 190 | 191 | 安装完成成,按`Win + R`,输入 `cmd`然后回车,在弹出的窗口中输入`nvcc --version`,确认有版本信息显示,类似该图 192 | ![image](https://github.com/jianchang512/pyvideotrans/assets/3378335/e68de07f-4bb1-4fc9-bccd-8f841825915a) 193 | 194 | 然后继续输入`nvidia-smi`,确认有输出信息,并且能看到cuda版本号,类似该图 195 | ![image](https://github.com/jianchang512/pyvideotrans/assets/3378335/71f1d7d3-07f9-4579-b310-39284734006b) 196 | 197 | 然后执行 `python testcuda.py`,如果提示成功,说明安装正确,否则请仔细检查重新安装 198 | 199 | 默认使用 cpu 运算,如果确定使用英伟达显卡,并且配置好了cuda环境,请修改 set.ini 中 `devtype=cpu`为 `devtype=cuda`,并重新启动,可使用cuda加速 200 | 201 | # 注意事项 202 | 203 | 0. 如果没有英伟达显卡或未配置好CUDA环境,不要使用 large/large-v3 模型,可能导致内存耗尽死机 204 | 1. 中文在某些情况下会输出繁体字 205 | 2. 有时会遇到“cublasxx.dll不存在”的错误,此时需要下载 cuBLAS,然后将dll文件复制到系统目录下,[点击下载 cuBLAS](https://github.com/jianchang512/stt/releases/download/0.0/cuBLAS_win.7z),解压后将里面的dll文件复制到 C:/Windows/System32下 206 | 3. 如果控制台出现"[W:onnxruntime:Default, onnxruntime_pybind_state.cc:1983 onnxruntime::python::CreateInferencePybindStateModule] Init provider bridge failed.", 可忽略,不影响使用 207 | 4. 默认使用 cpu 运算,如果确定使用英伟达显卡,并且配置好了cuda环境,请修改 set.ini 中 `devtype=cpu`为 `devtype=cuda`,并重新启动,可使用cuda加速 208 | 209 | 210 | 211 | 5. 尚未执行完毕就闪退 212 | 213 | 如果启用了cuda并且电脑已安装好了cuda环境,但没有手动安装配置过cudnn,那么会出现该问题,去安装和cuda匹配的cudnn。比如你安装了cuda12.3,那么就需要下载cudnn for cuda12.x压缩包,然后解压后里面的3个文件夹复制到cuda安装目录下。具体教程参考 https://juejin.cn/post/7318704408727519270 214 | 215 | 如果cudnn按照教程安装好了仍闪退,那么极大概率是GPU显存不足,可以改为使用 medium模型,显存不足8G时,尽量避免使用largev-3模型,尤其是视频大于20M时,否则可能显存不足而崩溃 216 | 217 | # 相关联项目 218 | 219 | [视频翻译配音工具:翻译字幕并配音](https://github.com/jianchang512/pyvideotrans) 220 | 221 | [声音克隆工具:用任意音色合成语音](https://github.com/jianchang512/clone-voice) 222 | 223 | [人声背景乐分离:极简的人声和背景音乐分离工具,本地化网页操作](https://github.com/jianchang512/vocal-separate) 224 | 225 | # 致谢 226 | 227 | 本项目主要依赖的其他项目 228 | 229 | 1. https://github.com/SYSTRAN/faster-whisper 230 | 2. https://github.com/pallets/flask 231 | 3. https://ffmpeg.org/ 232 | 4. https://layui.dev 233 | -------------------------------------------------------------------------------- /docs/en/README_EN.md: -------------------------------------------------------------------------------- 1 |
2 | 3 | [中文简体](../../README.md) | **English** | [Português (Brasil)](../pt/README_pt-BR.md) 4 | 5 |
6 | 7 | --- 8 | 9 |
10 | 11 | [👑 Support the project](https://github.com/jianchang512/pyvideotrans/blob/main/docs/about.md) | [Discord](https://discord.gg/SyT6GEwkJS) 12 | 13 |
14 | 15 | --- 16 | 17 | # Voice Recognition to Text Tool 18 | 19 | This is an offline local voice recognition tool to text, based on the open-source model fast-whisper. It can recognize and convert human voice in videos/audios into text, in json format, srt subtitle with timestamps format, and plain text format. It can be used after self-deployment to replace the voice recognition interface of openai or Baidu Voice Recognition, etc. The accuracy is basically the same as the official api interface of openai. 20 | 21 | > 22 | > After deployment or download, double click on start.exe to automatically call the local browser to open the local webpage. 23 | > 24 | > Drag or click to select the audio and video file to be recognized, then select the speaking language, output text format, model used (base model built-in), click start recognition, after completion, output in the selected format on the current webpage. 25 | > 26 | > The entire process does not require the internet, it operates entirely locally, and can be deployed on the intranet. 27 | > 28 | > The fast-whisper open-source model has base/small/medium/large-v3, with built-in base model, base->large-v3 recognition effect is getting better and better, but the computer resources required are also more, you can download and unzip it into the models directory according to need. 29 | > 30 | > [All model download links](https://github.com/jianchang512/stt/releases/tag/0.0) 31 | > 32 | 33 | 34 | # Video Demonstration 35 | 36 | 37 | https://github.com/jianchang512/stt/assets/3378335/d716acb6-c20c-4174-9620-f574a7ff095d 38 | 39 | 40 | ![image](https://github.com/jianchang512/stt/assets/3378335/0f724ff1-21b3-4960-b6ba-5aa994ea414c) 41 | 42 | 43 | 44 | 45 | # Precompiled Win Version Usage Method / Linux and Mac Source Code Deployment 46 | 47 | 1. [Click here to go to the Releases page to download](https://github.com/jianchang512/stt/releases) precompiled file 48 | 49 | 2. After downloading, unzip it somewhere, such as E:/stt 50 | 51 | 3. Double-click start.exe, and wait for the browser window to open automatically 52 | 53 | 4. Click on the upload area on the page, find the audio or video file you want to recognize in the pop-up window, or directly drag the audio and video file to the upload area, then select the spoken language, text output format, and model used, click "Start Recognition Immediately", wait for a while, the text boxes at the bottom will display the recognition results in the selected format 54 | 55 | 5. If the computer has an Nvidia GPU and the CUDA environment is correctly configured, CUDA acceleration will be used automatically 56 | 57 | 58 | # Source Code Deployment (Linux / Mac / Window) 59 | 60 | 0. Required python 3.9->3.11 61 | 62 | 1. Create an empty directory, such as E:/stt, open cmd window in this directory, the method is to enter `cmd` in the address bar, and then press enter. 63 | 64 | Using git to pull the source code to the current directory ` git clone git@github.com:jianchang512/stt.git . ` 65 | 66 | 2. Create a virtual environment `python -m venv venv` 67 | 68 | 3. Activate the environment, the command under win is `%cd%/venv/scripts/activate`, the linux and Mac go to google and search. if want to use cuda,and exec `pip uninstall -y torch` ,`pip install torch --index-url https://download.pytorch.org/whl/cu121` 69 | 70 | 4. Install dependencies: `pip install -r requirements.txt`, if you report a version conflict error, please run `pip install -r requirements.txt --no-deps` 71 | 72 | 5. Decompress ffmpeg.7z under Windows, and put the `ffmpeg.exe` and `ffprobe.exe` in it in the project directory, linux and mac to download the corresponding version ffmpeg from the [ffmpeg official website](https://ffmpeg.org/download.html), unzip the `ffmpeg` and `ffprobe` binary programs and put them at the root of the project 73 | 74 | 6. [Download the model compression package](https://github.com/jianchang512/stt/releases/tag/0.0), download the model as necessary, after downloading, put the folder in the compression package into the models folder of the root of the project 75 | 76 | 7. Execute `python start.py `, wait for the local browser window to open automatically. 77 | 78 | 79 | 80 | 81 | # API Interface 82 | 83 | Interface address: http://127.0.0.1:9977/api 84 | 85 | Request method: POST 86 | 87 | Request parameters: 88 | 89 | language: Language code: optional below 90 | 91 | > 92 | > Chinese: zh 93 | > English: en 94 | > French: fr 95 | > German: de 96 | > Japanese: ja 97 | > Korean: ko 98 | > Russian: ru 99 | > Spanish: es 100 | > Thai: th 101 | > Italian: it 102 | > Portuguese: pt 103 | > Vietnamese: vi 104 | > Arabic: ar 105 | > Turkish: tr 106 | > 107 | 108 | model: Model name, optional below 109 | > 110 | > base corresponds to models/models--Systran--faster-whisper-base 111 | > small corresponds to models/models--Systran--faster-whisper-small 112 | > medium corresponds to models/models--Systran--faster-whisper-medium 113 | > large-v3 corresponds to models/models--Systran--faster-whisper-large-v3 114 | > 115 | 116 | response_format: the returned subtitle format. Can be text|json|srt 117 | 118 | file: audio and video files, binary upload 119 | 120 | Api request example 121 | 122 | ```python 123 | import requests 124 | # Request address 125 | url = "http://127.0.0.1:9977/api" 126 | # Request parameters include file: audio and video files, language: language code, model: model, response_format: text|json|srt 127 | # Returns code==0 success, others fail, msg==success is ok, others fail reasons, data=returned text after recognition 128 | files = {"file": open("C:\\Users\\c1\\Videos\\2.wav", "rb")} 129 | data={"language":"zh","model":"base","response_format":"json"} 130 | response = requests.request("POST", url, timeout=600, data=data,files=files) 131 | print(response.json()) 132 | ``` 133 | 134 | 135 | 136 | # CUDA Acceleration Support 137 | 138 | **Install CUDA Tools** [Detailed installation method](https://juejin.cn/post/7318704408727519270) 139 | 140 | If your computer has Nvidia graphics card, first upgrade the graphics card driver to the latest, and then to install the corresponding 141 | [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads) and [cudnn for CUDA11.X](https://developer.nvidia.com/rdp/cudnn-archive). 142 | 143 | After the installation is completed, press `Win + R`, type `cmd` and then press enter. In the pop-up window, type `nvcc --version`, confirm that there is version information displayed, similar to the graphic shown 144 | ![Image](https://github.com/jianchang512/pyvideotrans/assets/3378335/e68de07f-4bb1-4fc9-bccd-8f841825915a) 145 | 146 | Then continue typing `nvidia-smi`, confirm there is output info and you can see the cuda version number, similar to the graphic shown 147 | ![Image](https://github.com/jianchang512/pyvideotrans/assets/3378335/71f1d7d3-07f9-4579-b310-39284734006b) 148 | 149 | Then execute `python testcuda.py`, if it prompts success, it means the installation is correct, otherwise please carefully check and reinstall 150 | By default, CPU operation is used. If you are sure to use a NVIDIA graphics card and have configured the CUDA environment, please modify the devtype=CPU in set.ini to devtype=CUDA and restart to use CUDA acceleration 151 | 152 | # Notices 153 | 154 | 0. If you do not have Nvidia graphics card or the CUDA environment is not properly configured, do not use the large/large-v3 model, it may cause the memory to exhaust and crash 155 | 1. Chinese in some cases will output traditional characters 156 | 2. Sometimes you will encounter an error "cublasxx.dll does not exist", at this time you need to download cuBLAS, and then copy the dll file to the system directory, [click to download cuBLAS](https://github.com/jianchang512/stt/releases/download/0.0/cuBLAS_win.7z), after decompression, copy the dll file inside to C:/Windows/System32 157 | 3. By default, CPU operation is used. If you are sure to use a NVIDIA graphics card and have configured the CUDA environment, please modify the devtype=CPU in set.ini to devtype=CUDA and restart to use CUDA acceleration 158 | 159 | 160 | 161 | # Related Projects 162 | 163 | [Video translation dubbing tool: translate subtitles and dub](https://github.com/jianchang512/pyvideotrans) 164 | 165 | [Voice Cloning Tool: Synthesize speech with any sound color](https://github.com/jianchang512/clone-voice) 166 | 167 | [Vocal Background Music Separation: A very simple vocal and background music separation tool, localized webpage operation](https://github.com/jianchang512/stt) 168 | 169 | # Acknowledgement 170 | 171 | The other projects mainly dependent on this project are 172 | 173 | 1. https://github.com/SYSTRAN/faster-whisper 174 | 2. https://github.com/pallets/flask 175 | 3. https://ffmpeg.org/ 176 | 4. https://layui.dev 177 | -------------------------------------------------------------------------------- /docs/pt/README_pt-BR.md: -------------------------------------------------------------------------------- 1 |
2 | 3 | [中文简体](../../README.md) | [English](../en/README_EN.md) | **Português (Brasil)** 4 | 5 |
6 | 7 | --- 8 | 9 |
10 | 11 | [👑 Apoie o projeto](https://github.com/jianchang512/pyvideotrans/blob/main/docs/pt-BR/About_pt-BR.md) | [Discord](https://discord.gg/SyT6GEwkJS) 12 | 13 |
14 | 15 | --- 16 | 17 | 18 | # Ferramenta de Transcrição de Fala para Texto (stt) 19 | 20 | Transcreva localmente seus áudios e vídeos com esta ferramenta offline. Baseada no modelo open source fast-whisper, ela converte a fala humana em texto, exportando em formatos json, srt com timestamps e texto puro. Após implantada, substitui com precisão similar serviços de reconhecimento de fala online como OpenAI ou Baidu. 21 | 22 | **Recursos:** 23 | 24 | * **Totalmente offline:** Implante em redes internas. 25 | * **Modelos flexíveis:** O fast-whisper oferece versões base/small/medium/large-v3. A qualidade aumenta do base para large-v3, mas exige mais recursos. Baixe e descompacte outros modelos na pasta `models`. 26 | * **Aceleração CUDA:** Se tiver uma GPU Nvidia e o ambiente CUDA configurado, use a aceleração CUDA automaticamente. 27 | 28 | ## 📢 **Patrocinador** 29 | 30 | [![](https://github.com/user-attachments/assets/48f4ac8f-e321-4bd3-ab2e-d6053d932f49)](https://302.ai/) 31 | 32 | **302.AI: A Plataforma de IA Sob Demanda** 33 | 34 | A 302.AI é a plataforma que reúne as melhores IAs do mundo em um só lugar, com pagamento sob demanda e sem mensalidades. Experimente diversas ferramentas de IA sem barreiras de entrada! 35 | 36 | **Benefícios:** 37 | 38 | * **Funcionalidades completas:** Chat de IA, geração de imagens e vídeos, processamento de imagens e muito mais. 39 | * **Fácil de usar:** Robôs, ferramentas e APIs para atender a todos os níveis de usuário. 40 | * **Pagamento sob demanda:** Sem planos mensais, sem barreiras para produtos, pague apenas pelo que usar. Seu saldo nunca expira! 41 | * **Separação de administradores e usuários:** Especialistas em IA configuram tudo para você, simplificando o uso. 42 | 43 | **🎁 Bônus Exclusivo:** 44 | 45 | **[Clique para se registrar](https://302.ai)** e ganhe 1 PTC (1 PTC = 1 dólar americano, cerca de 7 yuans) imediatamente. Além disso, ganhe 5 PTC por dia experimentando a plataforma através do link. 46 | 47 | **Junte-se à 302.AI e explore o mundo da inteligência artificial sem limites!** 48 | 49 | 50 | ## Demonstração 51 | 52 | https://github.com/jianchang512/stt/assets/3378335/d716acb6-c20c-4174-9620-f574a7ff095d 53 | 54 | ![Imagem de demonstração](https://github.com/jianchang512/stt/assets/3378335/0f724ff1-21b3-4960-b6ba-5aa994ea414c) 55 | 56 | 57 | # Como Usar a Versão Pré-compilada (Windows) e Implantar o Código Fonte (Linux, Mac e Windows) 58 | 59 | ## Versão Pré-compilada (Windows) 60 | 61 | 1. **Baixe os arquivos:** Acesse a [página de lançamentos](https://github.com/jianchang512/stt/releases) e baixe os arquivos pré-compilados. 62 | 2. **Descompacte:** Extraia os arquivos em um local de sua preferência (ex: `E:/stt`). 63 | 3. **Execute:** Dê um duplo clique em `start.exe` e aguarde a abertura automática da janela do navegador. 64 | 4. **Utilize a interface:** 65 | * Clique na área de upload da página. 66 | * Selecione o arquivo de áudio ou vídeo desejado (ou arraste-o para a área). 67 | * Escolha o idioma da fala, o formato de saída do texto e o modelo. 68 | * Clique em "Iniciar Reconhecimento". 69 | * O resultado será exibido na caixa de texto inferior no formato escolhido. 70 | 5. **Aceleração CUDA (opcional):** Se o seu computador possui uma GPU Nvidia e o ambiente CUDA está configurado corretamente, a aceleração CUDA será utilizada automaticamente. 71 | 72 | ## Implantação do Código Fonte (Linux, Mac e Windows) 73 | 74 | **Requisitos:** 75 | 76 | * Python 3.9, 3.10 ou 3.11 77 | 78 | **Passos:** 79 | 80 | 1. **Crie um diretório:** Crie um diretório vazio (ex: `E:/stt`). 81 | 2. **Clone o repositório:** Abra o terminal (ou prompt de comando) neste diretório e execute: 82 | ```bash 83 | git clone https://github.com/jianchang512/stt.git 84 | ``` 85 | 3. **Crie um ambiente virtual:** 86 | ```bash 87 | python -m venv venv 88 | ``` 89 | 4. **Ative o ambiente virtual:** 90 | * **Windows:** `%cd%/venv/scripts/activate` 91 | * **Linux/Mac:** `source ./venv/bin/activate` 92 | 5. **Instale as dependências:** 93 | ```bash 94 | pip install -r requirements.txt 95 | ``` 96 | * Em caso de erro de conflito de versão, execute: 97 | ```bash 98 | pip install -r requirements.txt --no-deps 99 | ``` 100 | * Para suporte à aceleração CUDA: 101 | ```bash 102 | pip uninstall -y torch 103 | pip install torch --index-url [https://download.pytorch.org/whl/cu121](https://download.pytorch.org/whl/cu121) 104 | ``` 105 | 6. **Instale o FFmpeg:** 106 | * **Windows:** Descompacte `ffmpeg.7z` e coloque `ffmpeg.exe` e `ffprobe.exe` no diretório do projeto. 107 | * **Linux/Mac:** Consulte as instruções de instalação do FFmpeg para sua distribuição. 108 | 7. **Baixe os modelos:** 109 | * **Método 01:** 110 | Baixe o [pacote de modelos compactado](https://github.com/jianchang512/stt/releases/tag/0.0) e coloque as pastas descompactadas na pasta `models` no diretório raiz do projeto. 111 | * **Método 02:** 112 | Use esta [tabela de modelos fast-whisper](https://github.com/jianchang512/pyvideotrans/blob/main/docs/pt-BR/Download-do-Modelo.md#modelos-faster-whisper) para baixar os modelos diretamente. 113 | 8. **Execute:** 114 | ```bash 115 | python start.py 116 | ``` 117 | Aguarde a abertura automática da janela do navegador. 118 | 119 | # Interface da API 120 | 121 | * **Endereço:** `http://127.0.0.1:9977/api` 122 | * **Método:** POST 123 | * **Parâmetros:** 124 | * `language` (código do idioma): 125 | * Chinês: `zh` 126 | * Inglês: `en` 127 | * Francês: `fr` 128 | * Alemão: `de` 129 | * Japonês: `ja` 130 | * Coreano: `ko` 131 | * Russo: `ru` 132 | * Espanhol: `es` 133 | * Tailandês: `th` 134 | * Italiano: `it` 135 | * Português: `pt` 136 | * Vietnamita: `vi` 137 | * Árabe: `ar` 138 | * Turco: `tr` 139 | * `model` (nome do modelo): 140 | * `base`: corresponde a `models/models--Systran--faster-whisper-base` 141 | * `small`: corresponde a `models/models--Systran--faster-whisper-small` 142 | * `medium`: corresponde a `models/models--Systran--faster-whisper-medium` 143 | * `large-v3`: corresponde a `models/models--Systran--faster-whisper-large-v3` 144 | * `response_format` (formato de legenda): `text`, `json` ou `srt` 145 | * `file` (arquivo de áudio ou vídeo) 146 | 147 | **Exemplo de Requisição (Python):** 148 | 149 | ```python 150 | import requests 151 | 152 | # Endereço da API 153 | url = "http://127.0.0.1:9977/api" 154 | 155 | # Parâmetros da requisição 156 | files = {"file": open("C:/Users/c1/Videos/2.wav", "rb")} 157 | data = {"language": "zh", "model": "base", "response_format": "json"} 158 | 159 | # Faz a requisição POST 160 | response = requests.post(url, timeout=600, data=data, files=files) 161 | 162 | # Imprime a resposta em formato JSON 163 | print(response.json()) 164 | 165 | # Interpretação da resposta: 166 | # - code == 0: sucesso 167 | # - code != 0: falha 168 | # - msg == "sucesso": reconhecimento bem-sucedido 169 | # - msg != "sucesso": motivo da falha 170 | # - data: texto retornado após o reconhecimento (se houver) 171 | ``` 172 | 173 | ## Suporte à Aceleração CUDA 174 | 175 | **Instalação de Ferramentas CUDA:** Para detalhes sobre o processo de instalação, consulte este [guia detalhado](https://juejin.cn/post/7318704408727519270). 176 | 177 | Se o seu computador possui uma placa gráfica Nvidia, siga estes passos: 178 | 179 | 1. **Atualize o driver da placa gráfica** para a versão mais recente. 180 | 2. **Instale o CUDA Toolkit** e o **cudnn for CUDA11.x** correspondentes: 181 | * [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads) 182 | * [cudnn for CUDA11.x](https://developer.nvidia.com/rdp/cudnn-archive) 183 | 3. **Verifique a instalação:** 184 | * Pressione `Win + R`, digite `cmd` e pressione Enter. 185 | * Na janela de comando, digite `nvcc --version` e confirme se as informações da versão são exibidas (similar à imagem abaixo). 186 | ![image](https://github.com/jianchang512/pyvideotrans/assets/3378335/e68de07f-4bb1-4fc9-bccd-8f841825915a) 187 | * Digite `nvidia-smi` e verifique se as informações de saída incluem o número da versão CUDA (similar à imagem abaixo). 188 | * Execute `python testcuda.py`. Se exibir uma mensagem de sucesso, a instalação está correta. Caso contrário, revise e reinstale cuidadosamente. 189 | ![image](https://github.com/jianchang512/pyvideotrans/assets/3378335/71f1d7d3-07f9-4579-b310-39284734006b) 190 | 191 | **Habilitando a Aceleração CUDA:** 192 | 193 | Por padrão, a CPU é usada para cálculos. Se você confirmou que está usando uma placa gráfica Nvidia e o ambiente CUDA está configurado corretamente, altere `devtype=cpu` para `devtype=cuda` no arquivo `set.ini` e reinicie o programa para utilizar a aceleração CUDA. 194 | 195 | ## Observações Importantes 196 | 197 | 1. **Modelos e Requisitos:** Se você não possui uma placa gráfica Nvidia ou o ambiente CUDA não está configurado corretamente, evite usar os modelos large/large-v3, pois eles podem consumir muita memória e travar o sistema. 198 | 2. **Exibição de Caracteres:** Em alguns casos, o texto em chinês pode ser exibido em caracteres tradicionais. 199 | 3. **Erro "cublasxx.dll não existe":** Baixe o cuBLAS neste link: [cuBLAS Download](https://github.com/jianchang512/stt/releases/download/0.0/cuBLAS_win.7z). Descompacte o arquivo e copie os arquivos DLL para `C:/Windows/System32`. 200 | 4. **Mensagem de Aviso no Console:** Se o console exibir a mensagem "[W:onnxruntime:Default, onnxruntime_pybind_state.cc:1983 onnxruntime::python::CreateInferencePybindStateModule] Init provider bridge failed.", ignore-a, pois não afeta o uso do programa. 201 | 5. **Falha na Execução com CUDA Habilitado:** 202 | * **Possível Causa:** Se o CUDA estiver habilitado, mas o cudnn não foi instalado e configurado manualmente, pode ocorrer falha na execução. 203 | * **Solução:** Instale a versão do cudnn que corresponde à sua versão do CUDA. Consulte o guia detalhado para instruções: [Guia de Instalação](https://juejin.cn/post/7318704408727519270). 204 | * **Memória de Vídeo Insuficiente:** Se o problema persistir após a instalação do cudnn, a memória de vídeo da GPU pode ser insuficiente. Nesse caso, tente usar o modelo medium e evite o modelo large-v3, especialmente se a memória de vídeo for inferior a 8GB e o vídeo tiver mais de 20MB. 205 | 206 | Lembre-se de que este guia fornece informações básicas e você pode precisar consultar recursos adicionais para solucionar problemas específicos. 207 | 208 | # Projetos Relacionados 209 | 210 | * [Tradução e Dublagem de Vídeo](https://github.com/jianchang512/pyvideotrans) 211 | * [Clonagem de Voz](https://github.com/jianchang512/clone-voice) 212 | * [Separação de Voz e Música](https://github.com/jianchang512/vocal-separate) 213 | 214 | # Agradecimentos 215 | 216 | Este projeto utiliza: 217 | 218 | 1. https://github.com/SYSTRAN/faster-whisper 219 | 2. https://github.com/pallets/flask 220 | 3. https://ffmpeg.org/ 221 | 4. https://layui.dev 222 | -------------------------------------------------------------------------------- /start.py: -------------------------------------------------------------------------------- 1 | import logging,shutil 2 | import re 3 | import threading 4 | import sys 5 | import torch 6 | from flask import Flask, request, render_template, jsonify, send_from_directory,Response 7 | import os 8 | from gevent.pywsgi import WSGIServer, WSGIHandler, LoggingLogAdapter 9 | from logging.handlers import RotatingFileHandler 10 | import warnings 11 | warnings.filterwarnings('ignore') 12 | import stslib 13 | from stslib import cfg, tool 14 | from stslib.cfg import ROOT_DIR 15 | from faster_whisper import WhisperModel 16 | import time 17 | from werkzeug.utils import secure_filename 18 | import uuid 19 | 20 | class CustomRequestHandler(WSGIHandler): 21 | def log_request(self): 22 | pass 23 | 24 | 25 | # 配置日志 26 | # 禁用 Werkzeug 默认的日志处理器 27 | log = logging.getLogger('werkzeug') 28 | log.handlers[:] = [] 29 | log.setLevel(logging.WARNING) 30 | app = Flask(__name__, static_folder=os.path.join(ROOT_DIR, 'static'), static_url_path='/static', template_folder=os.path.join(ROOT_DIR, 'templates')) 31 | root_log = logging.getLogger() # Flask的根日志记录器 32 | root_log.handlers = [] 33 | root_log.setLevel(logging.WARNING) 34 | 35 | # 配置日志 36 | app.logger.setLevel(logging.WARNING) # 设置日志级别为 INFO 37 | # 创建 RotatingFileHandler 对象,设置写入的文件路径和大小限制 38 | file_handler = RotatingFileHandler(os.path.join(ROOT_DIR, 'sts.log'), maxBytes=1024 * 1024, backupCount=5) 39 | # 创建日志的格式 40 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 41 | # 设置文件处理器的级别和格式 42 | file_handler.setLevel(logging.WARNING) 43 | file_handler.setFormatter(formatter) 44 | # 将文件处理器添加到日志记录器中 45 | app.logger.addHandler(file_handler) 46 | 47 | 48 | @app.route('/static/') 49 | def static_files(filename): 50 | return send_from_directory(app.config['STATIC_FOLDER'], filename) 51 | 52 | 53 | def resolve_local_path(path_value: str): 54 | """ 55 | Resolve user-provided path (possibly relative) against configured base dir. 56 | Raises ValueError if invalid. 57 | """ 58 | if not path_value: 59 | raise ValueError('file_path is required') 60 | raw_value = path_value.strip() 61 | base_dir = getattr(cfg, 'FILE_BASE_DIR', None) 62 | expanded = os.path.expanduser(raw_value) 63 | if os.path.isabs(expanded): 64 | resolved = os.path.abspath(expanded) 65 | else: 66 | if not base_dir: 67 | raise ValueError('Relative path provided but STS_FILE_BASE_DIR is not configured') 68 | resolved = os.path.abspath(os.path.join(base_dir, expanded)) 69 | if base_dir: 70 | base_dir_abs = os.path.abspath(base_dir) 71 | try: 72 | common = os.path.commonpath([resolved, base_dir_abs]) 73 | except ValueError: 74 | # Different drives on Windows 75 | raise ValueError('file_path is outside of allowed directory') 76 | if common != base_dir_abs: 77 | raise ValueError('file_path is outside of allowed directory') 78 | return resolved 79 | 80 | 81 | @app.route('/') 82 | def index(): 83 | sets=cfg.parse_ini() 84 | file_param = request.args.get('file') or request.args.get('path') or '' 85 | resolved_param = '' 86 | if file_param: 87 | try: 88 | resolved_param = resolve_local_path(file_param) 89 | except Exception as exc: 90 | app.logger.error(f'Failed to resolve initial file path: {exc}') 91 | resolved_param = '' 92 | return render_template("index.html", 93 | devtype=sets.get('devtype'), 94 | lang_code=cfg.lang_code, 95 | language=cfg.LANG, 96 | version=stslib.version_str, 97 | root_dir=ROOT_DIR.replace('\\', '/'), 98 | model_list=cfg.sets.get('model_list'), 99 | initial_local_file=resolved_param 100 | ) 101 | 102 | 103 | # 上传音频 104 | @app.route('/upload', methods=['POST']) 105 | def upload(): 106 | try: 107 | # 获取上传的文件 108 | audio_file = request.files['audio'] 109 | # 如果是mp4 110 | noextname, ext = os.path.splitext(audio_file.filename) 111 | ext = ext.lower() 112 | # 如果是视频,先分离 113 | wav_file = os.path.join(cfg.TMP_DIR, f'{noextname}.wav') 114 | if os.path.exists(wav_file) and os.path.getsize(wav_file) > 0: 115 | return jsonify({'code': 0, 'msg': cfg.transobj['lang1'], "data": os.path.basename(wav_file)}) 116 | 117 | msg = "" 118 | video_file = os.path.join(cfg.TMP_DIR, f'{noextname}{ext}') 119 | audio_file.save(video_file) 120 | params = [ 121 | "-i", 122 | video_file, 123 | "-ar", 124 | "16000", 125 | "-ac", 126 | "1", 127 | wav_file 128 | ] 129 | try: 130 | rs = tool.runffmpeg(params) 131 | except Exception as e: 132 | return jsonify({"code": 1, "msg": str(e)}) 133 | if rs != 'ok': 134 | return jsonify({"code": 1, "msg": rs}) 135 | msg = "," + cfg.transobj['lang9'] 136 | 137 | # 返回成功的响应 138 | return jsonify({'code': 0, 'msg': cfg.transobj['lang1'] + msg, "data": os.path.basename(wav_file)}) 139 | except Exception as e: 140 | app.logger.error(f'[upload]error: {e}') 141 | return jsonify({'code': 2, 'msg': cfg.transobj['lang2']}) 142 | 143 | 144 | @app.route('/upload_local', methods=['POST']) 145 | def upload_local(): 146 | """ 147 | Import a file that already exists on the server by providing its absolute path. 148 | """ 149 | payload = request.get_json(silent=True) or request.form 150 | file_path = (payload.get('file_path') if payload else '') or '' 151 | if not file_path: 152 | return jsonify({'code': 1, 'msg': 'file_path is required'}) 153 | try: 154 | abs_path = resolve_local_path(file_path) 155 | except Exception as exc: 156 | return jsonify({'code': 1, 'msg': str(exc)}) 157 | if not os.path.exists(abs_path): 158 | return jsonify({'code': 1, 'msg': cfg.langlist[cfg.LANG]['lang5']}) 159 | if not os.path.isfile(abs_path): 160 | return jsonify({'code': 1, 'msg': 'Target is not a file'}) 161 | 162 | wav_filename = f"{uuid.uuid4().hex}_{secure_filename(os.path.basename(abs_path))}.wav" 163 | wav_file = os.path.join(cfg.TMP_DIR, wav_filename) 164 | params = [ 165 | "-i", 166 | abs_path, 167 | "-ar", 168 | "16000", 169 | "-ac", 170 | "1", 171 | wav_file 172 | ] 173 | try: 174 | rs = tool.runffmpeg(params) 175 | except Exception as e: 176 | return jsonify({'code': 1, 'msg': str(e)}) 177 | if rs != 'ok': 178 | return jsonify({'code': 1, 'msg': rs}) 179 | msg = cfg.transobj['lang1'] + "," + cfg.transobj['lang9'] 180 | return jsonify({'code': 0, 'msg': msg, 'data': os.path.basename(wav_file), 'file_path': abs_path}) 181 | 182 | # 后端线程处理 183 | def shibie(): 184 | while 1: 185 | if len(cfg.TASK_QUEUE)<1: 186 | # 不存在任务,卸载所有模型 187 | for model_key in cfg.MODEL_DICT: 188 | try: 189 | cfg.MODEL_DICT[model_key]=None 190 | import torch 191 | if torch.cuda.is_available(): 192 | torch.cuda.empty_cache() 193 | except: 194 | pass 195 | time.sleep(2) 196 | continue 197 | 198 | 199 | sets=cfg.parse_ini() 200 | task=cfg.TASK_QUEUE.pop(0) 201 | print(f'{task=}') 202 | wav_name = task['wav_name'] 203 | model = task['model'] 204 | language = task['language'] 205 | data_type = task['data_type'] 206 | wav_file = task['wav_file'] 207 | key = task['key'] 208 | prompt=task.get('prompt',sets.get('initial_prompt_zh')) 209 | 210 | cfg.progressbar[key]=0 211 | print(f'{model=}') 212 | modelobj=cfg.MODEL_DICT.get(model) 213 | if not modelobj: 214 | try: 215 | print(f'开始加载模型,若不存在将自动下载') 216 | modelobj= WhisperModel( 217 | model if not model.startswith('distil') else model.replace('-whisper', ''), 218 | device=sets.get('devtype'), 219 | download_root=cfg.ROOT_DIR + "/models" 220 | ) 221 | cfg.MODEL_DICT[model]=modelobj 222 | except Exception as e: 223 | err=f'从 huggingface.co 下载模型 {model} 失败,请检查网络连接' if model.find('/')>0 else '' 224 | cfg.progressresult[key]='error:'+err+str(e) 225 | return 226 | try: 227 | segments,info = modelobj.transcribe( 228 | wav_file, 229 | beam_size=sets.get('beam_size'), 230 | best_of=sets.get('best_of'), 231 | condition_on_previous_text=sets.get('condition_on_previous_text'), 232 | vad_filter=sets.get('vad'), 233 | language=language if language and language !='auto' else None, 234 | initial_prompt=prompt 235 | ) 236 | total_duration = round(info.duration, 2) # Same precision as the Whisper timestamps. 237 | 238 | raw_subtitles = [] 239 | for segment in segments: 240 | cfg.progressbar[key]=round(segment.end/total_duration, 2) 241 | start = int(segment.start * 1000) 242 | end = int(segment.end * 1000) 243 | startTime = tool.ms_to_time_string(ms=start) 244 | endTime = tool.ms_to_time_string(ms=end) 245 | text = segment.text.strip().replace(''', "'") 246 | text = re.sub(r'&#\d+;', '', text) 247 | 248 | # 无有效字符 249 | if not text or re.match(r'^[,。、?‘’“”;:({}【】):;"\'\s \d`!@#$%^&*()_+=.,?/\\-]*$', text) or len( 250 | text) <= 1: 251 | continue 252 | if data_type == 'json': 253 | # 原语言字幕 254 | raw_subtitles.append( 255 | {"line": len(raw_subtitles) + 1, "start_time": startTime, "end_time": endTime, "text": text}) 256 | elif data_type == 'text': 257 | raw_subtitles.append(text) 258 | else: 259 | raw_subtitles.append(f'{len(raw_subtitles) + 1}\n{startTime} --> {endTime}\n{text}\n') 260 | cfg.progressbar[key]=1 261 | if data_type != 'json': 262 | raw_subtitles = "\n".join(raw_subtitles) 263 | cfg.progressresult[key]=raw_subtitles 264 | except Exception as e: 265 | cfg.progressresult[key]='error:'+str(e) 266 | print(str(e)) 267 | 268 | 269 | 270 | # params 271 | # wav_name:tmp下的wav文件 272 | # model 模型名称 273 | @app.route('/process', methods=['GET', 'POST']) 274 | def process(): 275 | # 原始字符串 276 | wav_name = request.form.get("wav_name","").strip() 277 | if not wav_name: 278 | return jsonify({"code": 1, "msg": f"No file had uploaded"}) 279 | model = request.form.get("model") 280 | # 语言 281 | language = request.form.get("language") 282 | # 返回格式 json txt srt 283 | data_type = request.form.get("data_type") 284 | wav_file = os.path.join(cfg.TMP_DIR, wav_name) 285 | if not os.path.exists(wav_file): 286 | return jsonify({"code": 1, "msg": f"{wav_file} {cfg.langlist['lang5']}"}) 287 | 288 | key=f'{wav_name}{model}{language}{data_type}' 289 | #重设结果为none 290 | cfg.progressresult[key]=None 291 | # 重设进度为0 292 | cfg.progressbar[key]=0 293 | #存入任务队列 294 | cfg.TASK_QUEUE.append({"wav_name":wav_name, "model":model, "language":language, "data_type":data_type, "wav_file":wav_file, "key":key}) 295 | return jsonify({"code":0, "msg":"ing"}) 296 | 297 | # 前端获取进度及完成后的结果 298 | @app.route('/progressbar', methods=['GET', 'POST']) 299 | def progressbar(): 300 | # 原始字符串 301 | wav_name = request.form.get("wav_name").strip() 302 | model_name = request.form.get("model") 303 | # 语言 304 | language = request.form.get("language") 305 | # 返回格式 json txt srt 306 | data_type = request.form.get("data_type") 307 | key = f'{wav_name}{model_name}{language}{data_type}' 308 | if key in cfg.progressresult and isinstance(cfg.progressresult[key],str) and cfg.progressresult[key].startswith('error:'): 309 | return jsonify({"code":1,"msg":cfg.progressresult[key][6:]}) 310 | 311 | progressbar = cfg.progressbar.get(key) 312 | if progressbar is None: 313 | return jsonify({"code":1,"msg":"No this file"}),500 314 | if progressbar>=1: 315 | return jsonify({"code":0, "data":progressbar, "msg":"ok", "result":cfg.progressresult[key]}) 316 | return jsonify({"code":0, "data":progressbar, "msg":"ok"}) 317 | 318 | 319 | """ 320 | # openai兼容格式 321 | from openai import OpenAI 322 | 323 | client = OpenAI(api_key='123',base_url='http://127.0.0.1:9977/v1') 324 | audio_file= open("C:/users/c1/videos/60.wav", "rb") 325 | 326 | transcription = client.audio.transcriptions.create( 327 | model="tiny", 328 | file=audio_file, 329 | response_format="text" # srt json 330 | ) 331 | 332 | print(transcription.text) 333 | 334 | """ 335 | @app.route('/v1/audio/transcriptions', methods=['POST']) 336 | def transcribe_audio(): 337 | if 'file' not in request.files: 338 | return jsonify({"error": "请求中未找到文件部分"}), 400 339 | file = request.files['file'] 340 | if file.filename == '': 341 | return jsonify({"error": "未选择文件"}), 400 342 | if not shutil.which('ffmpeg'): 343 | return jsonify({"error": "FFmpeg 未安装或未在系统 PATH 中"}), 500 344 | if not shutil.which('ffprobe'): 345 | return jsonify({"error": "ffprobe 未安装或未在系统 PATH 中"}), 500 346 | # 用 model 参数传递特殊要求,例如 ----*---- 分隔字符串和json 347 | model = request.form.get('model', '') 348 | # prompt 用于获取语言 349 | prompt = request.form.get('prompt', '') 350 | language = request.form.get('language', '') 351 | response_format = request.form.get('response_format', 'text') 352 | 353 | original_filename = secure_filename(file.filename) 354 | wav_name = str(uuid.uuid4())+f"_{original_filename}" 355 | temp_original_path = os.path.join(cfg.TMP_DIR, wav_name) 356 | wav_file = os.path.join(cfg.TMP_DIR, wav_name+"-target.wav") 357 | file.save(temp_original_path) 358 | 359 | params = [ 360 | "-i", 361 | temp_original_path, 362 | "-ar", 363 | "16000", 364 | "-ac", 365 | "1", 366 | wav_file 367 | ] 368 | 369 | try: 370 | print(params) 371 | rs = tool.runffmpeg(params) 372 | if rs != 'ok': 373 | return jsonify({"error": rs}),500 374 | except Exception as e: 375 | print(e) 376 | return jsonify({"error": str(e)}),500 377 | 378 | try: 379 | res=_api_process(model_name=model,wav_file=wav_file,language=language,response_format=response_format,prompt=prompt) 380 | if response_format=='srt': 381 | return Response(res,mimetype='text/plain') 382 | 383 | if response_format =='text': 384 | res={"text":res} 385 | return jsonify(res) 386 | except Exception as e: 387 | return jsonify({"error":str(e)}),500 388 | 389 | # 原api接口,保留兼容 390 | @app.route('/api',methods=['GET','POST']) 391 | def api(): 392 | try: 393 | # 获取上传的文件 394 | audio_file = request.files['file'] 395 | model_name = request.form.get("model") 396 | language = request.form.get("language") 397 | response_format = request.form.get("response_format",'srt') 398 | 399 | basename = os.path.basename(audio_file.filename) 400 | video_file = os.path.join(cfg.TMP_DIR, basename) 401 | audio_file.save(video_file) 402 | 403 | wav_file = os.path.join(cfg.TMP_DIR, f'{basename}-{time.time()}.wav') 404 | params = [ 405 | "-i", 406 | video_file, 407 | "-ar", 408 | "16000", 409 | "-ac", 410 | "1", 411 | wav_file 412 | ] 413 | 414 | try: 415 | print(params) 416 | rs = tool.runffmpeg(params) 417 | if rs != 'ok': 418 | return jsonify({"code": 1, "msg": rs}) 419 | except Exception as e: 420 | print(e) 421 | return jsonify({"code": 1, "msg": str(e)}) 422 | 423 | res=_api_process(model_name=model_name,wav_file=wav_file,language=language,response_format=response_format) 424 | return jsonify({"code": 0, "msg": 'ok', "data": res}) 425 | except Exception as e: 426 | print(e) 427 | app.logger.error(f'[api]error: {e}') 428 | return jsonify({'code': 2, 'msg': str(e)}) 429 | 430 | # api接口调用 431 | def _api_process(model_name,wav_file,language=None,response_format="text",prompt=None): 432 | try: 433 | sets=cfg.parse_ini() 434 | if model_name.startswith('distil-'): 435 | model_name = model_name.replace('-whisper', '') 436 | model = WhisperModel( 437 | model_name, 438 | device=sets.get('devtype'), 439 | download_root=cfg.ROOT_DIR + "/models" 440 | ) 441 | except Exception as e: 442 | raise 443 | 444 | segments,info = model.transcribe( 445 | wav_file, 446 | beam_size=sets.get('beam_size'), 447 | best_of=sets.get('best_of'), 448 | temperature=0 if sets.get('temperature')==0 else [0.0,0.2,0.4,0.6,0.8,1.0], 449 | condition_on_previous_text=sets.get('condition_on_previous_text'), 450 | vad_filter=sets.get('vad'), 451 | language=language if language and language !='auto' else None, 452 | initial_prompt=sets.get('initial_prompt_zh') if not prompt else prompt 453 | ) 454 | raw_subtitles = [] 455 | for segment in segments: 456 | start = int(segment.start * 1000) 457 | end = int(segment.end * 1000) 458 | startTime = tool.ms_to_time_string(ms=start) 459 | endTime = tool.ms_to_time_string(ms=end) 460 | text = segment.text.strip().replace(''', "'") 461 | text = re.sub(r'&#\d+;', '', text) 462 | 463 | # 无有效字符 464 | if not text or re.match(r'^[,。、?‘’“”;:({}【】):;"\'\s \d`!@#$%^&*()_+=.,?/\\-]*$', text) or len(text) <= 1: 465 | continue 466 | if response_format == 'json': 467 | # 原语言字幕 468 | raw_subtitles.append( 469 | {"line": len(raw_subtitles) + 1, "start_time": startTime, "end_time": endTime, "text": text}) 470 | elif response_format == 'text': 471 | raw_subtitles.append(text) 472 | else: 473 | raw_subtitles.append(f'{len(raw_subtitles) + 1}\n{startTime} --> {endTime}\n{text}\n') 474 | if response_format != 'json': 475 | raw_subtitles = "\n".join(raw_subtitles) 476 | return raw_subtitles 477 | 478 | @app.route('/checkupdate', methods=['GET', 'POST']) 479 | def checkupdate(): 480 | return jsonify({'code': 0, "msg": cfg.updatetips}) 481 | 482 | 483 | if __name__ == '__main__': 484 | http_server = None 485 | try: 486 | threading.Thread(target=tool.checkupdate).start() 487 | threading.Thread(target=shibie).start() 488 | try: 489 | if cfg.devtype=='cpu': 490 | print('\n如果设备使用英伟达显卡并且CUDA环境已正确安装,可修改set.ini中\ndevtype=cpu 为 devtype=cuda, 然后重新启动以加快识别速度\n') 491 | host = cfg.web_address.split(':') 492 | http_server = WSGIServer((host[0], int(host[1])), app, handler_class=CustomRequestHandler) 493 | threading.Thread(target=tool.openweb, args=(cfg.web_address,)).start() 494 | http_server.serve_forever() 495 | finally: 496 | if http_server: 497 | http_server.stop() 498 | except Exception as e: 499 | if http_server: 500 | http_server.stop() 501 | print("error:" + str(e)) 502 | app.logger.error(f"[app]start error:{str(e)}") 503 | -------------------------------------------------------------------------------- /templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 375 | 376 | 377 |
378 |
379 | 385 | 386 |
387 | 388 |
389 | 390 |
    391 | 392 |
  • 393 | 398 |
  • 399 |
  • 400 | Github 403 |
  • 404 |
  • 405 | 411 |
  • 412 |
  • 413 | Discord 414 |
  • 415 |
  • 416 | 422 |
  • 423 |
424 | 425 | 439 |
440 |
441 | 442 |
443 | 444 |
448 |
449 |
450 |
451 |
452 |
453 |
454 |
455 | 460 |
461 | 466 |
467 |
468 |
469 | 474 |
475 | 480 |
481 |
482 |
483 | 488 |
489 | 498 |
499 |
500 |
501 | 506 |
507 | 511 |
512 |
513 |
514 | 519 |
520 | 524 |
525 |
526 |
527 |
532 | 533 | 534 |
535 | 536 | 550 |
556 |
557 |
558 | 559 | 560 |
561 |
562 | 571 |
572 |
573 |
574 |
575 | 576 |
577 | 578 |
579 | 580 | 581 | 1013 | 1014 | 1015 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | --------------------------------------------------------------------------------