├── .gitignore
├── silence.wav
├── requirements.txt
├── README.md
├── chain.py
├── state.py
├── AwesomeProjects.md
├── playback_stream_track.py
├── audio_utils.py
├── index.html
├── client.js
├── styles.css
├── server.py
└── LICENSE
/.gitignore:
--------------------------------------------------------------------------------
1 | /.venv/
2 | /*.wav
3 | /.idea/
4 | /__pycache__/
5 | /.DS_Store
6 |
--------------------------------------------------------------------------------
/silence.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lalanikarim/webrtc-ai-voice-chat/HEAD/silence.wav
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | aiohttp~=3.9.5
2 | aiortc~=1.8.0
3 | librosa~=0.10.2
4 | transformers~=4.40.1
5 | torch~=2.3.0
6 | torchaudio~=2.3.0
7 | numpy~=1.26.4
8 | scipy~=1.13.0
9 | av~=11.0.0
10 | langchain-core==0.1.50
11 | langchain-community==0.0.36
12 |
13 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | WebRTC AI Voice Chat
2 | ====================
3 |
4 | Overview
5 | --------
6 |
7 | The goal of this project is to demo `speech <-> langchain <-> audio` workflow.
8 |
9 | 1. Speech to text is using [OpenAI's open source Whisper mini](https://huggingface.co/openai/whisper-small) model.
10 | 2. Chat model used for this demo is [Microsoft's Phi3](https://azure.microsoft.com/en-us/blog/introducing-phi-3-redefining-whats-possible-with-slms/) model running locally using [Ollama](https://ollama.com/).
11 | 3. Text to Audio is using [Suno's open source Bark small](https://huggingface.co/suno/bark-small) model.
12 |
13 | For interesting projects and related resources, checkout the [Awesome Projects Page](AwesomeProjects.md).
14 |
15 | Demo
16 | ----
17 |
18 | Unmute the audio to hear responses
19 |
20 | https://github.com/lalanikarim/webrtc-ai-voice-chat/assets/1296705/7aa05d6f-ff05-4c72-b2e8-6e4e1119a68c
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/chain.py:
--------------------------------------------------------------------------------
1 | from langchain_core.prompts.prompt import PromptTemplate
2 | from langchain_community.chat_models import ChatOllama
3 | from langchain_core.output_parsers import StrOutputParser
4 |
5 |
6 | class Chain:
7 | def __init__(self, model_name="phi3", ollama_host="http://localhost:11434"):
8 | self.__prompt = PromptTemplate.from_template("""
9 | You are a helpful assistant.
10 | Respond truthfully to the best of your abilities.
11 | Only answer in short one or two sentences.
12 | Do not answer with a question.
13 | You are allowed to use these in your response to express emotion:
14 | [laughter]
15 | [laughs]
16 | [sighs]
17 | [music]
18 | [gasps]
19 | [clears throat]
20 |
21 | You can also use these:
22 | — or … for hesitations
23 | ♪ for song lyrics
24 | capitalization for emphasis of a word
25 |
26 | Human: {human_input}
27 | AI:
28 | """)
29 | self.__model_name = model_name
30 | self.__ollama_host = ollama_host
31 | self.__chain = self.__create_chain()
32 |
33 | def __create_chain(self):
34 | model = ChatOllama(model=self.__model_name, base_url=self.__ollama_host)
35 | return self.__prompt | model | StrOutputParser()
36 |
37 | def set_model(self, model_name: str):
38 | self.__model_name = model_name
39 | self.__chain = self.__create_chain()
40 |
41 | def set_ollama_host(self, ollama_host: str):
42 | self.__ollama_host = ollama_host
43 | self.__chain = self.__create_chain()
44 |
45 | def get_chain(self):
46 | return self.__chain
47 |
--------------------------------------------------------------------------------
/state.py:
--------------------------------------------------------------------------------
1 | from asyncio import Task
2 | import copy
3 | import uuid
4 |
5 | import librosa
6 | import numpy as np
7 | from aiortc import RTCPeerConnection, RTCDataChannel, MediaStreamTrack
8 | from av import AudioFrame
9 |
10 | from playback_stream_track import PlaybackStreamTrack
11 | import logging
12 |
13 |
14 | class State:
15 | track: MediaStreamTrack
16 | buffer: list = []
17 | recording: bool = False
18 | task: Task
19 | sample_rate: int = 16000
20 | counter: int = 0
21 | response_player: PlaybackStreamTrack = PlaybackStreamTrack()
22 |
23 | logger = logging.getLogger("pc")
24 |
25 | def __init__(self):
26 | self.pc = RTCPeerConnection()
27 | self.id = str(uuid.uuid4())
28 | self.filename = f"{self.id}.wav"
29 |
30 | def log_info(self, msg, *args):
31 | self.logger.info(self.id + " " + msg, *args)
32 |
33 | def append_frame(self, frame: AudioFrame):
34 | buffer = frame.to_ndarray().flatten().astype(np.int16)
35 | # check for silence
36 | max_abs = np.max(np.abs(buffer))
37 | if True or max_abs > 50:
38 | if self.sample_rate != frame.sample_rate * 2:
39 | self.sample_rate = frame.sample_rate * 2
40 | self.buffer.append(buffer)
41 |
42 | def flush_audio(self):
43 | self.buffer = np.array(self.buffer).flatten()
44 | self.log_info(f"Buffer Size: {len(self.buffer)}")
45 | # write to file
46 | data = copy.deepcopy(self.buffer)
47 | data = librosa.util.buf_to_float(data)
48 | self.buffer = []
49 | if self.sample_rate != 16000:
50 | data = librosa.resample(data, orig_sr=self.sample_rate,
51 | target_sr=16000)
52 | return data
53 |
--------------------------------------------------------------------------------
/AwesomeProjects.md:
--------------------------------------------------------------------------------
1 | Awesome Projects and Resources
2 | ==============================
3 |
4 | Resources
5 | ---------
6 | * [WebRTC docs](https://developer.mozilla.org/en-US/docs/Web/API/WebRTC_API) - on https://developer.mozilla.org
7 | * [LangChain](https://www.langchain.com/) - A framework for developing applications powered by large language models
8 | * [Ollama](https://ollama.com/) - A local LLM inference engine for running Llama 3, Mistral, Gemma, and other LLMs
9 | * [aiortc](https://aiortc.readthedocs.io/en/latest/) - A Python Library for WebRTC and ORTC communication
10 |
11 | ### Models
12 | * [Whisper by OpenAI](https://github.com/openai/whisper) - Whisper is a general-purpose speech recognition model
13 | * [Bark by Suno.ai](https://github.com/suno-ai/bark) - Bark is Suno's open-source text-to-speech+ model
14 |
15 | ### Misc
16 | * [Animated Audio Wave Form Button](https://codepen.io/smatify/pen/xeVJda) - by Smatify
17 | * [CSS Loaders](https://css-loaders.com/continuous/) - Pure CSS Loaders
18 | * [WhatsApp CSS theme](https://codepen.io/swaibu/pen/QxJjwN) - by Rumbiiha swaibu
19 | * [Press To Talk Button](https://codepen.io/henriquetsc/pen/VmVgVe) - by Henrique Cansela
20 |
21 | Tutorials and Articles
22 | ----------------------
23 | **Coming Soon**
24 |
25 | Projects
26 | --------
27 | * [Style Guide - a fashion focused AI Assistant](https://github.com/lalanikarim/style-guide-ai-assistant/)
28 |
29 | Contribute to this page
30 | -----------------------
31 |
32 | If you found this repo useful, and it inspired you to build interesting projects using this as a template, or if you have written tutorials or articles about the underlying technologies, consider submitting a PR to have them listed on this page.
33 | Include a short one sentence description along with your GitHub repo or web url where your project is hosted so others looking for inspiration can check them out.
34 |
--------------------------------------------------------------------------------
/playback_stream_track.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from typing import Optional
3 |
4 | from aiortc import MediaStreamTrack, RTCDataChannel
5 | from aiortc.contrib.media import MediaPlayer
6 |
7 |
8 | class PlaybackStreamTrack(MediaStreamTrack):
9 | kind = "audio"
10 | response_ready: bool = False
11 | previous_response_silence: bool = False
12 | track: MediaStreamTrack = None
13 | counter: int = 0
14 | time: float = 0.0
15 | channel: Optional[RTCDataChannel] = None
16 |
17 | def __init__(self):
18 | super().__init__() # don't forget this!
19 |
20 | def select_track(self):
21 | if self.response_ready:
22 | self.track = MediaPlayer("bark_out.wav", format="wav", loop=False).audio
23 | else:
24 | self.track = MediaPlayer("silence.wav", format="wav", loop=False).audio
25 | if self.channel is not None and self.channel.readyState == "open":
26 | if self.response_ready:
27 | self.channel.send("playing: response")
28 | self.previous_response_silence = False
29 | else:
30 | if not self.previous_response_silence:
31 | self.channel.send("playing: silence")
32 | self.previous_response_silence = True
33 |
34 | async def recv(self):
35 | self.counter += 1
36 | if self.track is None:
37 | self.select_track()
38 | try:
39 | async with asyncio.timeout(1):
40 | frame = await self.track.recv()
41 | except Exception as e:
42 | self.select_track()
43 | if self.response_ready:
44 | self.response_ready = False
45 | frame = await self.track.recv()
46 | if frame.pts < frame.sample_rate * self.time:
47 | frame.pts = frame.sample_rate * self.time
48 | self.time += 0.02
49 | return frame
50 |
--------------------------------------------------------------------------------
/audio_utils.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | import torch
4 | from scipy.io import wavfile
5 | from transformers import WhisperProcessor, WhisperForConditionalGeneration, AutoProcessor, BarkModel
6 |
7 |
8 | class Whisper:
9 | def __init__(self, model_name="openai/whisper-small"):
10 | self.__device = "cuda:0" if torch.cuda.is_available() else "cpu"
11 | # whisper
12 | self.__model = WhisperForConditionalGeneration.from_pretrained(model_name).to(
13 | self.__device)
14 | self.__model.config.forced_decoder_ids = None
15 | self.__processor = WhisperProcessor.from_pretrained(model_name)
16 |
17 | def transcribe(self, data) -> List[str]:
18 | input_features = self.__processor(data, sampling_rate=16000,
19 | return_tensors="pt").input_features
20 | if self.__device != "cpu":
21 | input_features = input_features.to(self.__device, torch.float32)
22 | # generate token ids
23 | predicted_ids = self.__model.generate(input_features)
24 | transcription = self.__processor.batch_decode(predicted_ids, skip_special_tokens=True)
25 | return transcription
26 |
27 |
28 | class Bark:
29 | def __init__(self, model_name="suno/bark-small", voice_preset="v2/en_speaker_0"):
30 | self.__device = "cuda:0" if torch.cuda.is_available() else "cpu"
31 | # suno/bark
32 | self.__model = BarkModel.from_pretrained(model_name).to(self.__device)
33 | self.__synthesiser = AutoProcessor.from_pretrained(model_name)
34 | self.__voice_preset = voice_preset
35 |
36 | def set_voice_preset(self, voice_preset):
37 | self.__voice_preset = voice_preset
38 |
39 | def synthesize(self, text):
40 | input_features = self.__synthesiser(f"{text}", voice_preset=self.__voice_preset).to(self.__device)
41 | audio_array = self.__model.generate(**input_features)
42 | if self.__device != "cpu":
43 | audio_array = audio_array.to(self.__device, torch.float32)
44 | audio_array = audio_array.cpu().numpy().squeeze()
45 | sample_rate = self.__model.generation_config.sample_rate
46 | wavfile.write("bark_out.wav", rate=sample_rate, data=audio_array)
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | WebRTC AI Voice Chat
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
phi3
20 |
21 |
22 |
34 |
39 |
40 |
41 |
42 |
43 |
44 |
45 | WebRTC Connection State:
46 |
47 |
48 |
49 |
50 |
53 |
54 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
--------------------------------------------------------------------------------
/client.js:
--------------------------------------------------------------------------------
1 | let state = {
2 | pc:null,
3 | dc:null,
4 | stream:null,
5 | }
6 |
7 | let connectionStatus = document.querySelector("span#connectionStatus")
8 | let wave = document.querySelector("div.wave")
9 | let processing = document.querySelector("div.processing")
10 | let messagesContainer = document.querySelector("div#messagesContainer")
11 | let chatNameContainer = document.querySelector("div.chat-container .user-bar .name")
12 | let powerButton = document.querySelector("button#power")
13 | let presetsSelect = document.querySelector("select#presets")
14 | let modelsSelect = document.querySelector("select#models")
15 | let startRecordDiv = document.querySelector("div.circle.start")
16 | let stopRecordDiv = document.querySelector("div.circle.stop")
17 | let waitRecordDiv = document.querySelector("div.circle.wait")
18 |
19 | function getcconnectionstatus() {
20 | let status = "closed"
21 | if (state.pc) {
22 | status = state.pc.connectionState
23 | }
24 | connectionStatus.textContent = status
25 | }
26 |
27 | function negotiate() {
28 | //pc.addTransceiver('audio', { direction: 'sendrecv' });
29 | return state.pc.createOffer().then((offer) => {
30 | return state.pc.setLocalDescription(offer);
31 | }).then(() => {
32 | // wait for ICE gathering to complete
33 | return new Promise((resolve) => {
34 | if (state.pc.iceGatheringState === 'complete') {
35 | resolve();
36 | } else {
37 | const checkState = () => {
38 | if (state.pc.iceGatheringState === 'complete') {
39 | state.pc.removeEventListener('icegatheringstatechange', checkState);
40 | resolve();
41 | }
42 | };
43 | state.pc.addEventListener('icegatheringstatechange', checkState);
44 | }
45 | });
46 | }).then(() => {
47 | var offer = state.pc.localDescription;
48 | return fetch('/offer', {
49 | body: JSON.stringify({
50 | sdp: offer.sdp,
51 | type: offer.type,
52 | }),
53 | headers: {
54 | 'Content-Type': 'application/json'
55 | },
56 | method: 'POST'
57 | });
58 | }).then((response) => {
59 | return response.json();
60 | }).then((answer) => {
61 | return state.pc.setRemoteDescription(answer);
62 | }).catch((e) => {
63 | alert(e);
64 | });
65 | }
66 |
67 | function start() {
68 | stop()
69 |
70 | const config = {
71 | sdpSemantics: 'unified-plan'
72 | };
73 |
74 | if (document.getElementById('use-stun').checked) {
75 | config.iceServers = [{ urls: ['stun:stun.l.google.com:19302'] }];
76 | }
77 |
78 | state.pc = new RTCPeerConnection(config);
79 | state.pc.onconnectionstatechange = (ev) => {
80 | getcconnectionstatus()
81 | }
82 | state.dc = state.pc.createDataChannel("chat")
83 | state.dc.onopen = (ev) => {
84 | console.log("Data channel is open and ready to use");
85 | state.dc.send("Hello server");
86 | }
87 | state.dc.onmessage = (ev) => {
88 | console.log('Received message: ' + ev.data);
89 | if(ev.data === "ready") {
90 | record()
91 | }
92 | if(ev.data.startsWith("Human:") || ev.data.startsWith("AI:")) {
93 | logmessage(ev.data)
94 | }
95 | if(ev.data.startsWith("playing:")) {
96 | if(!ev.data.endsWith("silence")) {
97 | hideElement(processing)
98 | showElement(wave)
99 | } else {
100 | hideElement(wave)
101 | hideElement(waitRecordDiv)
102 | showElement(startRecordDiv)
103 | }
104 | }
105 | }
106 | state.dc.onclose = () => {
107 | console.log("Data channel is closed");
108 | }
109 |
110 | // connect audio / video
111 | state.pc.ontrack = (ev) => {
112 | console.log('Received remote stream');
113 | document.querySelector('audio#remoteAudio').srcObject = ev.streams[0];
114 | }
115 | // Adding tracks
116 | // stream.getAudioTracks().forEach((track) => pc.addTrack(track, stream))
117 | // document.querySelector('button#start').style.display = 'none';
118 | //negotiate()
119 | getMedia()
120 | showElement(chatNameContainer)
121 | showElement(presetsSelect)
122 | showElement(modelsSelect)
123 | showElement(messagesContainer)
124 | showElement(startRecordDiv)
125 | hideElement(waitRecordDiv)
126 | //document.querySelector('button#stop').style.display = 'inline-block';
127 | }
128 | function logmessage(message) {
129 | let log = document.querySelector("div.conversation-container")
130 | let splits = message.split(": ")
131 | if (splits.length > 1) {
132 | let messageText = splits.slice(1).join(": ")
133 | if (messageText.trim().length > 0) {
134 | let newMessage = document.createElement("div")
135 | newMessage.classList.add("message")
136 | if (splits[0] === "AI") {
137 | newMessage.classList.add("received")
138 | } else {
139 | newMessage.classList.add("sent")
140 | }
141 | newMessage.textContent = messageText
142 | log.appendChild(newMessage)
143 | log.scrollTop = log.scrollHeight
144 | }
145 | }
146 | }
147 | function getMedia(){
148 | const constraints = {
149 | audio: true,
150 | video: false
151 | };
152 | navigator.mediaDevices
153 | .getUserMedia(constraints)
154 | .then(handleSuccess)
155 | .catch(handleFailure);
156 | }
157 |
158 | function stop() {
159 | hideElement(startRecordDiv)
160 | showElement(waitRecordDiv)
161 | hideElement(chatNameContainer)
162 | hideElement(presetsSelect)
163 | hideElement(modelsSelect)
164 | if(state.pc) {
165 | // close peer connection
166 | setTimeout(() => {
167 | state.pc.close();
168 | getcconnectionstatus()
169 | state = {pc:null, dc:null, stream:null}
170 | }, 500);
171 | }
172 | }
173 |
174 | function record(){
175 | hideElement(wave)
176 | hideElement(startRecordDiv)
177 | showElement(stopRecordDiv)
178 | //getMedia()
179 | state.dc.send("start_recording")
180 | }
181 |
182 | function stopRecord() {
183 | state.dc.send("stop_recording")
184 | showElement(processing)
185 | hideElement(stopRecordDiv)
186 | showElement(waitRecordDiv)
187 | }
188 | function getResponse(){
189 | state.dc.send("get_response")
190 | }
191 | function getSilence(){
192 | state.dc.send("get_silence")
193 | }
194 | function handleSuccess(stream) {
195 | const tracks = stream.getAudioTracks()
196 | console.log("Received: ", tracks.length, " tracks")
197 | state.stream = stream
198 | state.stream.getAudioTracks().forEach((track) =>{
199 | state.pc.addTrack(track)
200 | })
201 | negotiate()
202 | }
203 |
204 | function handleFailure(error) {
205 | console.log('navigator.getUserMedia error: ', error);
206 | }
207 |
208 | function showElement(element) {
209 | element.classList.remove("d-none")
210 | }
211 | function hideElement(element) {
212 | element.classList.add("d-none")
213 | }
214 |
215 | function changePreset(){
216 | let preset = document.querySelector("select#presets").value
217 | state.dc.send("preset:" + preset)
218 | }
219 | function changeModel() {
220 | let model = document.querySelector("select#models").value
221 | state.dc.send("model:" + model)
222 | chatNameContainer.textContent = model
223 | }
224 |
225 | document.addEventListener('DOMContentLoaded', () => {
226 | getcconnectionstatus()
227 | powerButton.onclick = () => {
228 | if(state.pc && state.pc.connectionState === "connected") {
229 | stop()
230 | powerButton.classList.remove("text-danger")
231 | powerButton.classList.add("text-success")
232 | } else {
233 | start()
234 | powerButton.classList.remove("text-success")
235 | powerButton.classList.add("text-danger")
236 | }
237 | }
238 | })
--------------------------------------------------------------------------------
/styles.css:
--------------------------------------------------------------------------------
1 | @import url(https://fonts.googleapis.com/css?family=Montserrat);
2 | @import url(https://cdn.jsdelivr.net/npm/bootstrap@4.4.1/dist/css/bootstrap.min.css);
3 |
4 | :root {
5 | --loader-color: #005e54;
6 | }
7 |
8 | html, body, .full-height, .chat-container, div#messagesContainer {
9 | height: 100%;
10 | }
11 |
12 | div#recordingControls {
13 | min-height: 55px;
14 | }
15 |
16 | div.wave {
17 | display:flex;
18 | justify-content:center;
19 | align-items:center;
20 | margin-right: -48px;
21 | }
22 |
23 | .wave ul {
24 | /* background: rgba(0, 0, 0, 0.8);*/
25 | width:200px;
26 | height:45px;
27 | display:flex;
28 | justify-content:center;
29 | align-items:center;
30 | padding:0;
31 | margin:0;
32 | transition:ease 0.2s;
33 | position:relative;
34 | }
35 |
36 | .wave ul li{
37 | list-style:none;
38 | height:20px;
39 | width:4px;
40 | border-radius:10px;
41 | background: var(--loader-color);
42 | margin:0 3px;
43 | padding:0;
44 | animation-name: wave4;
45 | animation-duration: 0.3s;
46 | animation-iteration-count: infinite;
47 | animation-direction: alternate;
48 | transition:ease 0.2s;
49 | }
50 |
51 | .wave ul li:nth-child(2) {
52 | animation-name: wave2;
53 | animation-delay:0.2s;
54 | }
55 | .wave ul li:nth-child(3) {
56 | animation-name: wave3;
57 | animation-delay:0.23s;
58 | animation-duration: 0.4s;
59 | }
60 | .wave ul li:nth-child(4) {
61 | animation-name: wave4;
62 | animation-delay:0.1s;
63 | animation-duration: 0.3s;
64 | }
65 | .wave ul li:nth-child(5) {
66 | animation-delay:0.5s;
67 | }
68 | .wave ul li:nth-child(6) {
69 | animation-name: wave2;
70 | animation-duration: 0.5s;
71 | }
72 | .wave ul li:nth-child(8) {
73 | animation-name: wave4;
74 | animation-delay:0.4s;
75 | animation-duration: 0.25s;
76 | }
77 | .wave ul li:nth-child(9) {
78 | animation-name: wave3;
79 | animation-delay:0.15s;
80 | }
81 | .wave ul li:nth-child(10) {
82 | animation-delay:0.5s;
83 | }
84 | .wave ul li:nth-child(11) {
85 | animation-name: wave2;
86 | animation-duration: 0.5s;
87 | }
88 | .wave ul li:nth-child(12) {
89 | animation-name: wave3;
90 | animation-delay:0.4s;
91 | animation-duration: 0.25s;
92 | }
93 | .wave ul li:nth-child(13) {
94 | animation-name: wave4;
95 | animation-delay:0.15s;
96 | }
97 | .wave ul li:nth-child(14) {
98 | animation-name: wave4;
99 |
100 | animation-duration: 0.5s;
101 | }
102 | .wave ul li:nth-child(15) {
103 | animation-name: wave4;
104 | animation-delay:0.1s;
105 | animation-duration: 0.5s;
106 | }
107 |
108 | @keyframes wave1 {
109 | from {transform:scaleY(1);}
110 | to {transform:scaleY(0.5);}
111 | }
112 | @keyframes wave2 {
113 | from {transform:scaleY(0.3);}
114 | to {transform:scaleY(0.6);}
115 | }
116 | @keyframes wave3 {
117 | from {transform:scaleY(0.6);}
118 | to {transform:scaleY(0.8);}
119 | }
120 | @keyframes wave4 {
121 | from {transform:scaleY(0.2);}
122 | to {transform:scaleY(0.5);}
123 | }
124 |
125 | .user-bar {
126 | height: 55px;
127 | background: #005e54;
128 | color: #fff;
129 | padding: 0 8px;
130 | font-size: 24px;
131 | position: relative;
132 | z-index: 1;
133 | }
134 |
135 | .user-bar:after {
136 | content: "";
137 | display: table;
138 | clear: both;
139 | }
140 |
141 | .user-bar div {
142 | float: left;
143 | transform: translateY(-50%);
144 | position: relative;
145 | top: 50%;
146 | }
147 |
148 | .user-bar .actions {
149 | float: right;
150 | margin: 0 0 0 20px;
151 | }
152 |
153 | .user-bar .actions.more {
154 | margin: 0 12px 0 32px;
155 | }
156 |
157 | .user-bar .actions.attachment {
158 | margin: 0 0 0 30px;
159 | }
160 |
161 | .user-bar .actions.attachment i {
162 | display: block;
163 | transform: rotate(-45deg);
164 | }
165 |
166 | .user-bar .avatar {
167 | margin: 0 0 0 5px;
168 | width: 36px;
169 | height: 36px;
170 | }
171 |
172 | .user-bar .avatar img {
173 | border-radius: 50%;
174 | box-shadow: 0 1px 0 rgba(255, 255, 255, 0.1);
175 | display: block;
176 | width: 100%;
177 | }
178 |
179 | .user-bar .name {
180 | font-size: 17px;
181 | font-weight: 600;
182 | text-overflow: ellipsis;
183 | letter-spacing: 0.3px;
184 | margin: 0 0 0 8px;
185 | overflow: hidden;
186 | white-space: nowrap;
187 | width: 110px;
188 | }
189 |
190 | .user-bar .status {
191 | display: block;
192 | font-size: 13px;
193 | font-weight: 400;
194 | letter-spacing: 0;
195 | }
196 |
197 | .conversation {
198 | flex-grow: 1;
199 | position: relative;
200 | background: #efe7dd;
201 | z-index: 0;
202 | max-height: calc(100% - 55px);
203 | }
204 |
205 | .conversation ::-webkit-scrollbar {
206 | transition: all .5s;
207 | width: 5px;
208 | height: 1px;
209 | z-index: 10;
210 | }
211 |
212 | .conversation ::-webkit-scrollbar-track {
213 | background: transparent;
214 | }
215 |
216 | .conversation ::-webkit-scrollbar-thumb {
217 | background: #b3ada7;
218 | }
219 |
220 | .conversation .conversation-container {
221 | overflow-x: hidden;
222 | padding: 0 16px;
223 | margin-bottom: 5px;
224 | height: calc(100% - 93px);
225 | min-height: 100px;
226 | }
227 | .message {
228 | color: #000;
229 | clear: both;
230 | line-height: 18px;
231 | font-size: 15px;
232 | padding: 8px;
233 | position: relative;
234 | margin: 8px 0;
235 | max-width: 85%;
236 | word-wrap: break-word;
237 | z-index: -1;
238 | }
239 |
240 | .message:after {
241 | position: absolute;
242 | content: "";
243 | width: 0;
244 | height: 0;
245 | border-style: solid;
246 | }
247 | .message:first-child {
248 | margin: 16px 0 8px;
249 | }
250 |
251 | .message.received {
252 | background: #fff;
253 | border-radius: 0px 5px 5px 5px;
254 | float: left;
255 | }
256 |
257 | .message.received:after {
258 | border-width: 0px 10px 10px 0;
259 | border-color: transparent #fff transparent transparent;
260 | top: 0;
261 | left: -10px;
262 | }
263 |
264 | .message.sent {
265 | background: #e1ffc7;
266 | border-radius: 5px 0px 5px 5px;
267 | float: right;
268 | }
269 |
270 | .message.sent:after {
271 | border-width: 0px 0 10px 10px;
272 | border-color: transparent transparent transparent #e1ffc7;
273 | top: 0;
274 | right: -10px;
275 | }
276 |
277 | .conversation-compose {
278 | display: flex;
279 | flex-direction: row;
280 | align-items: flex-end;
281 | overflow: hidden;
282 | height: 50px;
283 | width: 100%;
284 | z-index: 2;
285 | }
286 |
287 | .conversation-compose div:not(.processing),
288 | .conversation-compose input {
289 | background: #fff;
290 | height: 100%;
291 | }
292 | .conversation-compose .emoji {
293 | display: flex;
294 | align-items: center;
295 | justify-content: center;
296 | background: white;
297 | border-radius: 5px 0 0 5px;
298 | flex: 0 0 auto;
299 | margin-left: 8px;
300 | width: 48px;
301 | }
302 |
303 | .conversation-compose .input-msg {
304 | border: 0;
305 | flex: 1 1 auto;
306 | font-size: 16px;
307 | margin: 0;
308 | outline: none;
309 | min-width: 50px;
310 | }
311 |
312 | .conversation-compose .photo {
313 | flex: 0 0 auto;
314 | border-radius: 0 0 5px 0;
315 | text-align: center;
316 | position: relative;
317 | width: 48px;
318 | }
319 |
320 | .conversation-compose .photo:after {
321 | border-width: 0px 0 10px 10px;
322 | border-color: transparent transparent transparent #fff;
323 | border-style: solid;
324 | position: absolute;
325 | width: 0;
326 | height: 0;
327 | content: "";
328 | top: 0;
329 | right: -10px;
330 | }
331 |
332 | .conversation-compose .photo i {
333 | display: block;
334 | color: #7d8488;
335 | font-size: 24px;
336 | transform: translate(-50%, -50%);
337 | position: relative;
338 | top: 50%;
339 | left: 50%;
340 | }
341 |
342 | .conversation-compose .send {
343 | background: transparent !important;
344 | border: 0;
345 | cursor: pointer;
346 | flex: 0 0 auto;
347 | margin-left: 8px;
348 | margin-right: 8px;
349 | padding: 0;
350 | position: relative;
351 | outline: none;
352 | }
353 |
354 | .conversation-compose .send .circle {
355 | border-radius: 50%;
356 | color: #fff;
357 | position: relative;
358 | width: 48px;
359 | height: 48px;
360 | display: flex;
361 | align-items: center;
362 | justify-content: center;
363 | }
364 |
365 | .conversation-compose .send .circle.start {
366 | background: #008a7c;
367 | }
368 | .conversation-compose .send .circle.stop {
369 | background: #fb283b;
370 | }
371 |
372 | .conversation-compose .send .circle.wait {
373 | background: gray;
374 | }
375 |
376 | .conversation-compose .send .circle i {
377 | font-size: 24px;
378 | }
379 |
380 | .conversation .status {
381 | display: flex;
382 | justify-content: center;
383 | font-size: 13px;
384 | height: 30px;
385 | }
386 |
387 | .conversation .status div {
388 | background-color: white;
389 | border-radius: 5px;
390 | margin: 5px;
391 | padding: 0 5px;
392 | }
393 | /* HTML: */
394 | .processing {
395 | width: 120px;
396 | height: 20px;
397 | margin-top: 7px;
398 | margin-right: -48px;
399 | -webkit-mask: linear-gradient(90deg,#000 70%,#0000 0) left/20% 100%;
400 | background:
401 | linear-gradient(var(--loader-color) 0 0) left -25% top 0 /20% 100% no-repeat
402 | #ddd;
403 | animation: l7 1s infinite steps(6);
404 | }
405 | @keyframes l7 {
406 | 100% {background-position: right -25% top 0}
407 | }
408 |
409 |
410 | .chat-container {
411 | display: flex;
412 | flex-direction: column;
413 | }
--------------------------------------------------------------------------------
/server.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import asyncio
3 | import json
4 | import logging
5 | import os
6 | import ssl
7 | import threading
8 | from asyncio import create_task, AbstractEventLoop
9 | from typing import Optional
10 |
11 | from aiohttp import web
12 | from aiortc import RTCSessionDescription, MediaStreamTrack
13 | from av import AudioFrame
14 |
15 | from audio_utils import Whisper, Bark
16 | from chain import Chain
17 | from state import State
18 |
19 | logger = logging.getLogger("pc")
20 | ROOT = os.path.dirname(__file__)
21 |
22 | pcs = set()
23 |
24 | whisper: Optional[Whisper] = None
25 | bark: Optional[Bark] = None
26 | chain: Optional[Chain] = None
27 |
28 |
29 | async def index(request):
30 | content = open(os.path.join(ROOT, "index.html"), "r").read()
31 | return web.Response(content_type="text/html", text=content)
32 |
33 |
34 | async def javascript(request):
35 | content = open(os.path.join(ROOT, "client.js"), "r").read()
36 | return web.Response(content_type="application/javascript", text=content)
37 |
38 |
39 | async def css(request):
40 | content = open(os.path.join(ROOT, "styles.css"), "r").read()
41 | return web.Response(content_type="text/css", text=content)
42 |
43 |
44 | async def offer(request):
45 | params = await request.json()
46 |
47 | offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
48 |
49 | state = State()
50 | pcs.add(state)
51 |
52 | state.log_info("Created for %s", request.remote)
53 |
54 | state.pc.addTrack(state.response_player)
55 |
56 | @state.pc.on("iceconnectionstatechange")
57 | async def on_iceconnectionstatechange():
58 | state.log_info("ICE connection state is %s", state.pc.iceConnectionState)
59 | if state.pc.iceConnectionState == "failed":
60 | await state.pc.close()
61 |
62 | async def record():
63 | track = state.track
64 | state.log_info("Recording %s", state.filename)
65 | while True:
66 | frame: AudioFrame = await track.recv()
67 | if state.recording:
68 | state.append_frame(frame)
69 | await asyncio.sleep(0)
70 |
71 | @state.pc.on("track")
72 | async def on_track(track: MediaStreamTrack):
73 | state.log_info("Track %s received", track.kind)
74 |
75 | if track.kind == "audio":
76 | state.log_info("Received %s", track.kind)
77 | state.track = track
78 | state.task = create_task(record())
79 |
80 | @track.on("ended")
81 | async def on_ended():
82 | state.log_info("Track %s ended", track.kind)
83 | state.task.cancel()
84 | track.stop()
85 |
86 | # handle offer
87 | await state.pc.setRemoteDescription(offer)
88 |
89 | # send answer
90 | answer = await state.pc.createAnswer()
91 | await state.pc.setLocalDescription(answer)
92 |
93 | @state.pc.on("datachannel")
94 | async def on_datachannel(channel):
95 | state.log_info("DataChannel")
96 | state.response_player.channel = channel
97 |
98 | @channel.on("message")
99 | async def on_message(message):
100 | state.log_info("Received message on channel: %s", message)
101 | if message == "get_response":
102 | state.response_player.response_ready = True
103 | if message == "get_silence":
104 | state.response_player.response_ready = False
105 | if message == "start_recording":
106 | state.log_info("Start Recording")
107 | state.response_player.response_ready = False
108 | state.buffer = []
109 | state.recording = True
110 | state.counter += 1
111 | state.filename = f"{state.id}_{state.counter}.wav"
112 | if message == "stop_recording":
113 | state.log_info("Stop Recording")
114 | state.recording = False
115 | await asyncio.sleep(0.5)
116 | data = state.flush_audio()
117 | process_loop = create_bg_loop()
118 | asyncio.run_coroutine_threadsafe(process_request(data), process_loop)
119 | if message[0:7] == "preset:":
120 | preset = message[7:]
121 | bark.set_voice_preset(preset)
122 | state.log_info("Changed voice preset to %s", preset)
123 | if message[0:6] == "model:":
124 | model = message[6:]
125 | chain.set_model(model)
126 | state.log_info("Changed model to %s", model)
127 |
128 | async def process_request(data):
129 | continue_to_synthesize, response = await transcribe_request(data)
130 | if continue_to_synthesize:
131 | response = response.strip().split("\n")[0]
132 | state.log_info(response)
133 | await synthesize_response(response)
134 | try:
135 | loop = asyncio.get_running_loop()
136 | loop.stop()
137 | finally:
138 | pass
139 |
140 | async def transcribe_request(data):
141 | response = None
142 | transcription = whisper.transcribe(data)
143 | channel.send(f"Human: {transcription[0]}")
144 | state.log_info(transcription[0])
145 | await asyncio.sleep(0)
146 | try:
147 | response = chain.get_chain().invoke({"human_input": transcription[0]})
148 | continue_to_synthesize = True
149 | except Exception as e:
150 | channel.send("AI: Error communicating with Ollama")
151 | channel.send(f"AI: {e}")
152 | channel.send("playing: response")
153 | channel.send("playing: silence")
154 | continue_to_synthesize = False
155 | return continue_to_synthesize, response
156 |
157 | async def synthesize_response(response):
158 | if len(response.strip()) > 0:
159 | channel.send(f"AI: {response}")
160 | await asyncio.sleep(0)
161 | bark.synthesize(response)
162 | state.response_player.response_ready = True
163 | else:
164 | channel.send("playing: response")
165 | channel.send("playing: silence")
166 | await asyncio.sleep(0)
167 |
168 | return web.Response(
169 | content_type="application/json",
170 | text=json.dumps(
171 | {"sdp": state.pc.localDescription.sdp, "type": state.pc.localDescription.type}
172 | ),
173 | )
174 |
175 |
176 | async def on_shutdown(app):
177 | # close peer connections
178 | coros = [state.pc.close() for state in pcs]
179 | for state in pcs:
180 | deleteFile(state.filename)
181 | await asyncio.gather(*coros)
182 |
183 |
184 | def deleteFile(filename):
185 | try:
186 | os.remove(filename)
187 | except OSError:
188 | pass
189 |
190 |
191 | # https://gist.github.com/ultrafunkamsterdam/8be3d55ac45759aa1bd843ab64ce876d
192 | def create_bg_loop():
193 | def to_bg(loop):
194 | asyncio.set_event_loop(loop)
195 | try:
196 | loop.run_forever()
197 | except asyncio.CancelledError as e:
198 | print('CANCELLEDERROR {}'.format(e))
199 | finally:
200 | for task in asyncio.all_tasks(loop):
201 | task.cancel()
202 | loop.run_until_complete(loop.shutdown_asyncgens())
203 | loop.stop()
204 | loop.close()
205 |
206 | new_loop = asyncio.new_event_loop()
207 | t = threading.Thread(target=to_bg, args=(new_loop,))
208 | t.start()
209 | return new_loop
210 |
211 |
212 | if __name__ == "__main__":
213 | parser = argparse.ArgumentParser(description="WebRTC AI Voice Chat")
214 | parser.add_argument("--cert-file", help="SSL certificate file (for HTTPS)")
215 | parser.add_argument("--key-file", help="SSL key file (for HTTPS)")
216 | parser.add_argument(
217 | "--host", default="0.0.0.0", help="Host for HTTP server (default: 0.0.0.0)"
218 | )
219 | parser.add_argument(
220 | "--port", type=int, default=8080, help="Port for HTTP server (default: 8080)"
221 | )
222 | parser.add_argument(
223 | "--ollama-host", default="http://localhost:11434", help="Ollama API Server (default: http://localhost:11434"
224 | )
225 | parser.add_argument(
226 | "--whisper-model", default="openai/whisper-small", help="Whisper model (default: openai/whisper-small)"
227 | )
228 | parser.add_argument(
229 | "--bark-model", default="suno/bark-small", help="Bark model (default: suno/bark-small)"
230 | )
231 | parser.add_argument("--verbose", "-v", action="count")
232 |
233 | args = parser.parse_args()
234 |
235 | if args.verbose:
236 | logging.basicConfig(level=logging.DEBUG)
237 | else:
238 | logging.basicConfig(level=logging.INFO)
239 |
240 | chain = Chain()
241 | if args.ollama_host:
242 | chain.set_ollama_host(args.ollama_host)
243 |
244 | if args.whisper_model:
245 | whisper = Whisper(model_name=args.whisper_model)
246 | else:
247 | whisper = Whisper()
248 |
249 | if args.bark_model:
250 | bark = Bark(model_name=args.bark_model)
251 | else:
252 | bark = Bark()
253 |
254 | if args.cert_file:
255 | ssl_context = ssl.SSLContext()
256 | ssl_context.load_cert_chain(args.cert_file, args.key_file)
257 | else:
258 | ssl_context = None
259 |
260 | app = web.Application()
261 | app.on_shutdown.append(on_shutdown)
262 | app.router.add_get("/", index)
263 | app.router.add_get("/client.js", javascript)
264 | app.router.add_get("/styles.css", css)
265 | app.router.add_post("/offer", offer)
266 | web.run_app(app, host=args.host, port=args.port, ssl_context=ssl_context)
267 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------