1?1:a,s=M(a),d=t.startX+(t.x-t.startX)*s,p=t.startY+(t.y-t.startY)*s,t.method.call(t.scrollable,d,p),(d!==t.x||p!==t.y)&&e.requestAnimationFrame(S.bind(e,t))}function h(t,l,s){var d,p,a,y,_=u();t===o.body?(d=e,p=e.scrollX||e.pageXOffset,a=e.scrollY||e.pageYOffset,y=f.scroll):(d=t,p=t.scrollLeft,a=t.scrollTop,y=b),S({scrollable:d,method:y,startTime:_,startX:p,startY:a,x:l,y:s})}e.scroll=e.scrollTo=function(){if(arguments[0]!==void 0){if(m(arguments[0])===!0){f.scroll.call(e,arguments[0].left!==void 0?arguments[0].left:typeof arguments[0]!="object"?arguments[0]:e.scrollX||e.pageXOffset,arguments[0].top!==void 0?arguments[0].top:arguments[1]!==void 0?arguments[1]:e.scrollY||e.pageYOffset);return}h.call(e,o.body,arguments[0].left!==void 0?~~arguments[0].left:e.scrollX||e.pageXOffset,arguments[0].top!==void 0?~~arguments[0].top:e.scrollY||e.pageYOffset)}},e.scrollBy=function(){if(arguments[0]!==void 0){if(m(arguments[0])){f.scrollBy.call(e,arguments[0].left!==void 0?arguments[0].left:typeof arguments[0]!="object"?arguments[0]:0,arguments[0].top!==void 0?arguments[0].top:arguments[1]!==void 0?arguments[1]:0);return}h.call(e,o.body,~~arguments[0].left+(e.scrollX||e.pageXOffset),~~arguments[0].top+(e.scrollY||e.pageYOffset))}},r.prototype.scroll=r.prototype.scrollTo=function(){if(arguments[0]!==void 0){if(m(arguments[0])===!0){if(typeof arguments[0]=="number"&&arguments[1]===void 0)throw new SyntaxError("Value could not be converted");f.elementScroll.call(this,arguments[0].left!==void 0?~~arguments[0].left:typeof arguments[0]!="object"?~~arguments[0]:this.scrollLeft,arguments[0].top!==void 0?~~arguments[0].top:arguments[1]!==void 0?~~arguments[1]:this.scrollTop);return}var t=arguments[0].left,l=arguments[0].top;h.call(this,this,typeof t=="undefined"?this.scrollLeft:~~t,typeof l=="undefined"?this.scrollTop:~~l)}},r.prototype.scrollBy=function(){if(arguments[0]!==void 0){if(m(arguments[0])===!0){f.elementScroll.call(this,arguments[0].left!==void 0?~~arguments[0].left+this.scrollLeft:~~arguments[0]+this.scrollLeft,arguments[0].top!==void 0?~~arguments[0].top+this.scrollTop:~~arguments[1]+this.scrollTop);return}this.scroll({left:~~arguments[0].left+this.scrollLeft,top:~~arguments[0].top+this.scrollTop,behavior:arguments[0].behavior})}},r.prototype.scrollIntoView=function(){if(m(arguments[0])===!0){f.scrollIntoView.call(this,arguments[0]===void 0?!0:arguments[0]);return}var t=$(this),l=t.getBoundingClientRect(),s=this.getBoundingClientRect();t!==o.body?(h.call(this,t,t.scrollLeft+s.left-l.left,t.scrollTop+s.top-l.top),e.getComputedStyle(t).position!=="fixed"&&e.scrollBy({left:l.left,top:l.top,behavior:"smooth"})):e.scrollBy({left:s.left,top:s.top,behavior:"smooth"})}}typeof E=="object"&&typeof w!="undefined"?w.exports={polyfill:n}:n()})()});function j(n){n.magic("range",()=>function(e,o,r=1){typeof o=="undefined"&&(o=e,e=e?1:0);let i=e>o;i&&([e,o]=[o,e]);let f=Array.from({length:(o-e)/r+1},(u,c)=>e+c*r);return i?f.reverse():f})}var Y=U(I());function X(n){Y.default.polyfill(),n.magic("scroll",()=>function(e,o={}){let r=e,i=o.offset?parseInt(o.offset,10):0;if(delete o.offset,typeof e=="string"&&/^[0-9]+?/g.test(e)&&(e=parseInt(e,10)),typeof e=="string"&&(e=document.querySelector(e)),e instanceof Element&&(e=Math.floor(e.getBoundingClientRect().top+window.pageYOffset)),Number.isInteger(e)&&(e={top:e-i,behavior:"smooth"}),typeof e!="object")throw Error("Unsupported $scroll target: ",r);Object.assign(e,o),window.scroll(e)})}function B(n){let e=(o,r)=>{if(r[0].length<=o.length)return o;let i="\u2026";return typeof r[2]!="undefined"&&(i=r[2]),Object.prototype.hasOwnProperty.call(r[1],"ellipsis")&&(i=r[1].ellipsis),o+i};n.magic("truncate",()=>function(...o){return typeof o[0]!="string"||!o[1]?o[0]:typeof o[1]!="object"?e(o[0].slice(0,o[1]),o):Object.prototype.hasOwnProperty.call(o[1],"words")&&o[1].words?e(o[0].split(" ").splice(0,o[1].words).join(" "),o):Object.prototype.hasOwnProperty.call(o[1],"characters")&&o[1].characters?e(o[0].slice(0,o[1].characters),o):o[0]})}function L(n){n.magic("dbg",e=>function(...o){let r=o.map(i=>n.raw(i));console.log(...r)})}function x(n){let e=n.reactive({screensize:window.innerWidth}),o={xs:0,sm:640,md:768,lg:1024,xl:1280,"2xl":1536},r=window.AlpineMagicHelpersConfig&&window.AlpineMagicHelpersConfig.breakpoints?window.AlpineMagicHelpersConfig.breakpoints:o,i;window.addEventListener("resize",()=>{clearTimeout(i),i=setTimeout(()=>{e.screensize=window.innerWidth},150)}),n.magic("screen",f=>u=>{let c=e.screensize;if(Number.isInteger(u))return u<=c;if(r[u]===void 0)throw Error("Undefined $screen property: "+u+". Supported properties: "+Object.keys(r).join(", "));return r[u]<=c})}function P(n){n.magic("interval",()=>function(...e){if(typeof e[0]!="function")return e[0];let o=e[1],r=0,i=!1;typeof e[1]=="object"&&(Object.prototype.hasOwnProperty.call(e[1],"timer")&&(o=e[1].timer),Object.prototype.hasOwnProperty.call(e[1],"delay")&&(r=e[1].delay),Object.prototype.hasOwnProperty.call(e[1],"forceInterval")&&(i=e[1].forceInterval));let f=null,u=!0,c=()=>{let g=u?o+r:o;u=!1,f=setTimeout(()=>{e[0].call(this),i?c():requestAnimationFrame(c)},g)};n.effect(()=>{this.autoIntervalTest==null||this.autoIntervalTest?i?c():requestAnimationFrame(c):clearTimeout(f)})})}function C(n){j(n),X(n),B(n),L(n),x(n),P(n)}document.addEventListener("alpine:initializing",()=>{C(window.Alpine)});})();
2 |
--------------------------------------------------------------------------------
/exo/tinychat/static/cdn.jsdelivr.net/npm/@alpinejs/intersect@3.x.x/dist/cdn.min.js:
--------------------------------------------------------------------------------
1 | (()=>{function o(e){e.directive("intersect",e.skipDuringClone((t,{value:i,expression:l,modifiers:n},{evaluateLater:r,cleanup:c})=>{let s=r(l),a={rootMargin:x(n),threshold:f(n)},u=new IntersectionObserver(d=>{d.forEach(h=>{h.isIntersecting!==(i==="leave")&&(s(),n.includes("once")&&u.disconnect())})},a);u.observe(t),c(()=>{u.disconnect()})}))}function f(e){if(e.includes("full"))return .99;if(e.includes("half"))return .5;if(!e.includes("threshold"))return 0;let t=e[e.indexOf("threshold")+1];return t==="100"?1:t==="0"?0:Number(`.${t}`)}function p(e){let t=e.match(/^(-?[0-9]+)(px|%)?$/);return t?t[1]+(t[2]||"px"):void 0}function x(e){let t="margin",i="0px 0px 0px 0px",l=e.indexOf(t);if(l===-1)return i;let n=[];for(let r=1;r<5;r++)n.push(p(e[l+r]||""));return n=n.filter(r=>r!==void 0),n.length?n.join(" ").trim():i}document.addEventListener("alpine:init",()=>{window.Alpine.plugin(o)});})();
2 |
--------------------------------------------------------------------------------
/exo/tinychat/static/cdn.jsdelivr.net/npm/purecss@3.0.0/build/base-min.css:
--------------------------------------------------------------------------------
1 | /*!
2 | Pure v3.0.0
3 | Copyright 2013 Yahoo!
4 | Licensed under the BSD License.
5 | https://github.com/pure-css/pure/blob/master/LICENSE
6 | */
7 | /*!
8 | normalize.css v | MIT License | https://necolas.github.io/normalize.css/
9 | Copyright (c) Nicolas Gallagher and Jonathan Neal
10 | */
11 | /*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */html{line-height:1.15;-webkit-text-size-adjust:100%}body{margin:0}main{display:block}h1{font-size:2em;margin:.67em 0}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent}abbr[title]{border-bottom:none;text-decoration:underline;-webkit-text-decoration:underline dotted;text-decoration:underline dotted}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{border-style:none;padding:0}[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring,button:-moz-focusring{outline:1px dotted ButtonText}fieldset{padding:.35em .75em .625em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details{display:block}summary{display:list-item}template{display:none}[hidden]{display:none}html{font-family:sans-serif}.hidden,[hidden]{display:none!important}.pure-img{max-width:100%;height:auto;display:block}
--------------------------------------------------------------------------------
/exo/tinychat/static/cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2/webfonts/fa-brands-400.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/exo-explore/exo/e4238f9ef369037252c7542e40ea1a8a625afba7/exo/tinychat/static/cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2/webfonts/fa-brands-400.ttf
--------------------------------------------------------------------------------
/exo/tinychat/static/cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2/webfonts/fa-brands-400.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/exo-explore/exo/e4238f9ef369037252c7542e40ea1a8a625afba7/exo/tinychat/static/cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2/webfonts/fa-brands-400.woff2
--------------------------------------------------------------------------------
/exo/tinychat/static/cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2/webfonts/fa-regular-400.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/exo-explore/exo/e4238f9ef369037252c7542e40ea1a8a625afba7/exo/tinychat/static/cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2/webfonts/fa-regular-400.ttf
--------------------------------------------------------------------------------
/exo/tinychat/static/cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2/webfonts/fa-regular-400.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/exo-explore/exo/e4238f9ef369037252c7542e40ea1a8a625afba7/exo/tinychat/static/cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2/webfonts/fa-regular-400.woff2
--------------------------------------------------------------------------------
/exo/tinychat/static/cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2/webfonts/fa-solid-900.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/exo-explore/exo/e4238f9ef369037252c7542e40ea1a8a625afba7/exo/tinychat/static/cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2/webfonts/fa-solid-900.ttf
--------------------------------------------------------------------------------
/exo/tinychat/static/cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2/webfonts/fa-solid-900.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/exo-explore/exo/e4238f9ef369037252c7542e40ea1a8a625afba7/exo/tinychat/static/cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2/webfonts/fa-solid-900.woff2
--------------------------------------------------------------------------------
/exo/tinychat/static/cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2/webfonts/fa-v4compatibility.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/exo-explore/exo/e4238f9ef369037252c7542e40ea1a8a625afba7/exo/tinychat/static/cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2/webfonts/fa-v4compatibility.ttf
--------------------------------------------------------------------------------
/exo/tinychat/static/cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2/webfonts/fa-v4compatibility.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/exo-explore/exo/e4238f9ef369037252c7542e40ea1a8a625afba7/exo/tinychat/static/cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2/webfonts/fa-v4compatibility.woff2
--------------------------------------------------------------------------------
/exo/tinychat/static/fonts.googleapis.com/css2:
--------------------------------------------------------------------------------
1 | @font-face {
2 | font-family: 'Megrim';
3 | font-style: normal;
4 | font-weight: 400;
5 | font-display: swap;
6 | src: url(https://fonts.gstatic.com/s/megrim/v16/46kulbz5WjvLqJZlbQ.ttf) format('truetype');
7 | }
8 |
--------------------------------------------------------------------------------
/exo/tinychat/static/unpkg.com/@highlightjs/cdn-assets@11.9.0/styles/vs2015.min.css:
--------------------------------------------------------------------------------
1 | pre code.hljs{display:block;overflow-x:auto;padding:1em}code.hljs{padding:3px 5px}.hljs{background:#1e1e1e;color:#dcdcdc}.hljs-keyword,.hljs-literal,.hljs-name,.hljs-symbol{color:#569cd6}.hljs-link{color:#569cd6;text-decoration:underline}.hljs-built_in,.hljs-type{color:#4ec9b0}.hljs-class,.hljs-number{color:#b8d7a3}.hljs-meta .hljs-string,.hljs-string{color:#d69d85}.hljs-regexp,.hljs-template-tag{color:#9a5334}.hljs-formula,.hljs-function,.hljs-params,.hljs-subst,.hljs-title{color:#dcdcdc}.hljs-comment,.hljs-quote{color:#57a64a;font-style:italic}.hljs-doctag{color:#608b4e}.hljs-meta,.hljs-meta .hljs-keyword,.hljs-tag{color:#9b9b9b}.hljs-template-variable,.hljs-variable{color:#bd63c5}.hljs-attr,.hljs-attribute{color:#9cdcfe}.hljs-section{color:gold}.hljs-emphasis{font-style:italic}.hljs-strong{font-weight:700}.hljs-bullet,.hljs-selector-attr,.hljs-selector-class,.hljs-selector-id,.hljs-selector-pseudo,.hljs-selector-tag{color:#d7ba7d}.hljs-addition{background-color:#144212;display:inline-block;width:100%}.hljs-deletion{background-color:#600;display:inline-block;width:100%}
--------------------------------------------------------------------------------
/exo/tinychat/static/unpkg.com/@marcreichel/alpine-autosize@1.3.x/dist/alpine-autosize.min.js:
--------------------------------------------------------------------------------
1 | !function(e){"function"==typeof define&&define.amd?define(e):e()}((function(){"use strict";var e=new Map;function t(t){var o=e.get(t);o&&o.destroy()}function o(t){var o=e.get(t);o&&o.update()}var r=null;"undefined"==typeof window?((r=function(e){return e}).destroy=function(e){return e},r.update=function(e){return e}):((r=function(t,o){return t&&Array.prototype.forEach.call(t.length?t:[t],(function(t){return function(t){if(t&&t.nodeName&&"TEXTAREA"===t.nodeName&&!e.has(t)){var o,r=null,n=window.getComputedStyle(t),i=(o=t.value,function(){s({testForHeightReduction:""===o||!t.value.startsWith(o),restoreTextAlign:null}),o=t.value}),l=function(o){t.removeEventListener("autosize:destroy",l),t.removeEventListener("autosize:update",a),t.removeEventListener("input",i),window.removeEventListener("resize",a),Object.keys(o).forEach((function(e){return t.style[e]=o[e]})),e.delete(t)}.bind(t,{height:t.style.height,resize:t.style.resize,textAlign:t.style.textAlign,overflowY:t.style.overflowY,overflowX:t.style.overflowX,wordWrap:t.style.wordWrap});t.addEventListener("autosize:destroy",l),t.addEventListener("autosize:update",a),t.addEventListener("input",i),window.addEventListener("resize",a),t.style.overflowX="hidden",t.style.wordWrap="break-word",e.set(t,{destroy:l,update:a}),a()}function s(e){var o,i,l=e.restoreTextAlign,a=void 0===l?null:l,d=e.testForHeightReduction,u=void 0===d||d,c=n.overflowY;if(0!==t.scrollHeight&&("vertical"===n.resize?t.style.resize="none":"both"===n.resize&&(t.style.resize="horizontal"),u&&(o=function(e){for(var t=[];e&&e.parentNode&&e.parentNode instanceof Element;)e.parentNode.scrollTop&&t.push([e.parentNode,e.parentNode.scrollTop]),e=e.parentNode;return function(){return t.forEach((function(e){var t=e[0],o=e[1];t.style.scrollBehavior="auto",t.scrollTop=o,t.style.scrollBehavior=null}))}}(t),t.style.height=""),i="content-box"===n.boxSizing?t.scrollHeight-(parseFloat(n.paddingTop)+parseFloat(n.paddingBottom)):t.scrollHeight+parseFloat(n.borderTopWidth)+parseFloat(n.borderBottomWidth),"none"!==n.maxHeight&&i>parseFloat(n.maxHeight)?("hidden"===n.overflowY&&(t.style.overflow="scroll"),i=parseFloat(n.maxHeight)):"hidden"!==n.overflowY&&(t.style.overflow="hidden"),t.style.height=i+"px",a&&(t.style.textAlign=a),o&&o(),r!==i&&(t.dispatchEvent(new Event("autosize:resized",{bubbles:!0})),r=i),c!==n.overflow&&!a)){var f=n.textAlign;"hidden"===n.overflow&&(t.style.textAlign="start"===f?"end":"start"),s({restoreTextAlign:f,testForHeightReduction:!0})}}function a(){s({testForHeightReduction:!0,restoreTextAlign:null})}}(t)})),t}).destroy=function(e){return e&&Array.prototype.forEach.call(e.length?e:[e],t),e},r.update=function(e){return e&&Array.prototype.forEach.call(e.length?e:[e],o),e});var n=r;document.addEventListener("alpine:init",(()=>{var e;(e=window.Alpine).directive("autosize",((e,{modifiers:t},{cleanup:o})=>{n(e);const r=Array.from(e.attributes);let i=!1;for(let{nodeName:e}of r)if("wire:model"===e||e.startsWith("wire:model.")){i=!0;break}!e.hasAttribute("wire:ignore")&&i&&e.setAttribute("wire:ignore","");const l=()=>{n.update(e)};e.addEventListener("autosize",l),o((()=>{n.destroy(e),e.removeEventListener("autosize",l)}))})),e.magic("autosize",(e=>t=>{(t||e).dispatchEvent(new Event("autosize"))}))}))}));
2 | //# sourceMappingURL=alpine-autosize.min.js.map
3 |
--------------------------------------------------------------------------------
/exo/tinychat/static/unpkg.com/marked-highlight@2.1.2/lib/index.umd.js:
--------------------------------------------------------------------------------
1 | (function (global, factory) {
2 | typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) :
3 | typeof define === 'function' && define.amd ? define(['exports'], factory) :
4 | (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global.markedHighlight = {}));
5 | })(this, (function (exports) { 'use strict';
6 |
7 | function markedHighlight(options) {
8 | if (typeof options === 'function') {
9 | options = {
10 | highlight: options
11 | };
12 | }
13 |
14 | if (!options || typeof options.highlight !== 'function') {
15 | throw new Error('Must provide highlight function');
16 | }
17 |
18 | if (typeof options.langPrefix !== 'string') {
19 | options.langPrefix = 'language-';
20 | }
21 |
22 | return {
23 | async: !!options.async,
24 | walkTokens(token) {
25 | if (token.type !== 'code') {
26 | return;
27 | }
28 |
29 | const lang = getLang(token.lang);
30 |
31 | if (options.async) {
32 | return Promise.resolve(options.highlight(token.text, lang, token.lang || '')).then(updateToken(token));
33 | }
34 |
35 | const code = options.highlight(token.text, lang, token.lang || '');
36 | if (code instanceof Promise) {
37 | throw new Error('markedHighlight is not set to async but the highlight function is async. Set the async option to true on markedHighlight to await the async highlight function.');
38 | }
39 | updateToken(token)(code);
40 | },
41 | useNewRenderer: true,
42 | renderer: {
43 | code({ text, lang, escaped }) {
44 | const language = getLang(lang);
45 | const classAttr = language
46 | ? ` class="${options.langPrefix}${escape(language)}"`
47 | : '';
48 | text = text.replace(/\n$/, '');
49 | return `${escaped ? text : escape(text, true)}\n
`;
50 | }
51 | }
52 | };
53 | }
54 |
55 | function getLang(lang) {
56 | return (lang || '').match(/\S*/)[0];
57 | }
58 |
59 | function updateToken(token) {
60 | return (code) => {
61 | if (typeof code === 'string' && code !== token.text) {
62 | token.escaped = true;
63 | token.text = code;
64 | }
65 | };
66 | }
67 |
68 | // copied from marked helpers
69 | const escapeTest = /[&<>"']/;
70 | const escapeReplace = new RegExp(escapeTest.source, 'g');
71 | const escapeTestNoEncode = /[<>"']|&(?!(#\d{1,7}|#[Xx][a-fA-F0-9]{1,6}|\w+);)/;
72 | const escapeReplaceNoEncode = new RegExp(escapeTestNoEncode.source, 'g');
73 | const escapeReplacements = {
74 | '&': '&',
75 | '<': '<',
76 | '>': '>',
77 | '"': '"',
78 | "'": '''
79 | };
80 | const getEscapeReplacement = (ch) => escapeReplacements[ch];
81 | function escape(html, encode) {
82 | if (encode) {
83 | if (escapeTest.test(html)) {
84 | return html.replace(escapeReplace, getEscapeReplacement);
85 | }
86 | } else {
87 | if (escapeTestNoEncode.test(html)) {
88 | return html.replace(escapeReplaceNoEncode, getEscapeReplacement);
89 | }
90 | }
91 |
92 | return html;
93 | }
94 |
95 | exports.markedHighlight = markedHighlight;
96 |
97 | }));
98 |
--------------------------------------------------------------------------------
/exo/tinychat/update_deps.py:
--------------------------------------------------------------------------------
1 | import os
2 | import requests
3 | from bs4 import BeautifulSoup
4 | from urllib.parse import urljoin, urlparse
5 | import re
6 |
7 |
8 | def download_file(url, local_path):
9 | response = requests.get(url)
10 | if response.status_code == 200:
11 | os.makedirs(os.path.dirname(local_path), exist_ok=True)
12 | with open(local_path, 'wb') as f:
13 | f.write(response.content)
14 | print(f"Downloaded: {local_path}")
15 | else:
16 | print(response.status_code)
17 | print(f"Failed to download: {url}")
18 |
19 |
20 | def update_html(html_content, base_url):
21 | soup = BeautifulSoup(html_content, 'html.parser')
22 |
23 | for tag in soup.find_all(['script', 'link']):
24 | if tag.has_attr('src'):
25 | url = tag['src']
26 | elif tag.has_attr('href'):
27 | url = tag['href']
28 | else:
29 | continue
30 |
31 | if url.startswith(('http://', 'https://')):
32 | full_url = url
33 | else:
34 | full_url = urljoin(base_url, url)
35 |
36 | parsed_url = urlparse(full_url)
37 | local_path = os.path.join('static', parsed_url.netloc, parsed_url.path.lstrip('/'))
38 |
39 | download_file(full_url, local_path)
40 |
41 | relative_path = os.path.relpath(local_path, '.')
42 | if tag.name == 'script':
43 | tag['src'] = "/" + relative_path
44 | elif tag.name == 'link':
45 | tag['href'] = "/" + relative_path
46 |
47 | return str(soup)
48 |
49 |
50 | # Read the HTML file
51 | with open('./index.html', 'r') as f:
52 | html_content = f.read()
53 |
54 | # Update HTML and download files
55 | # updated_html = update_html(html_content, 'https://example.com')
56 |
57 | # # Write the updated HTML
58 | # with open('./index.html', 'w') as f:
59 | # f.write(updated_html)
60 |
61 | print("HTML file updated with local paths.")
62 |
63 | # Download Font Awesome CSS and font files
64 | base_url = "https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2/"
65 | css_url = urljoin(base_url, "css/all.min.css")
66 | output_dir = "static/cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.2"
67 |
68 | # Download CSS file
69 | css_output_path = os.path.join(output_dir, "css", "all.min.css")
70 | download_file(css_url, css_output_path)
71 |
72 | # Parse CSS file for font URLs
73 | with open(css_output_path, 'r', encoding='utf-8') as f:
74 | css_content = f.read()
75 |
76 | # Extract font URLs from the CSS content
77 | font_urls = re.findall(r'url\((.*?\.(?:woff2|ttf))\)', css_content)
78 |
79 | print(f"Found {len(font_urls)} font URLs")
80 |
81 | # Download font files
82 | for font_url in font_urls:
83 | font_url = font_url.strip('"\'')
84 | if font_url.startswith('../'):
85 | font_url = font_url[3:]
86 |
87 | # Use base_url instead of urljoin to keep the version number
88 | full_url = base_url + font_url
89 | relative_path = font_url
90 | output_path = os.path.join(output_dir, relative_path)
91 | download_file(full_url, output_path)
92 |
93 | print("Download complete!")
94 |
--------------------------------------------------------------------------------
/exo/topology/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/exo-explore/exo/e4238f9ef369037252c7542e40ea1a8a625afba7/exo/topology/__init__.py
--------------------------------------------------------------------------------
/exo/topology/partitioning_strategy.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import List, Dict
3 | from dataclasses import dataclass
4 | from .topology import Topology
5 | from exo.inference.shard import Shard
6 | from exo.topology.device_capabilities import device_capabilities
7 | import asyncio
8 |
9 |
10 | # Partitions shard-space into pieces of contiguous shards, represented by floating point range [start, end) between 0 and 1
11 | @dataclass
12 | class Partition:
13 | node_id: str
14 | start: float
15 | end: float
16 |
17 |
18 | class PartitioningStrategy(ABC):
19 | @abstractmethod
20 | def partition(self, topology: Topology) -> List[Partition]:
21 | pass
22 |
23 |
24 | def map_partitions_to_shards(partitions: List[Partition], num_layers: int, model_id: str) -> List[Shard]:
25 | shards = []
26 | for i, partition in enumerate(partitions):
27 | start_layer = int(partition.start*num_layers)
28 | end_layer = int(partition.end*num_layers) - 1
29 |
30 | # Ensure the last partition covers up to num_layers - 1
31 | if i == len(partitions) - 1:
32 | end_layer = num_layers - 1
33 |
34 | # Ensure no empty shards
35 | if start_layer <= end_layer:
36 | shards.append(Shard(model_id, start_layer, end_layer, num_layers))
37 |
38 | # Ensure full coverage
39 | if shards and shards[-1].end_layer < num_layers - 1:
40 | shards[-1] = Shard(model_id, shards[-1].start_layer, num_layers - 1, num_layers)
41 |
42 | return shards
43 |
--------------------------------------------------------------------------------
/exo/topology/ring_memory_weighted_partitioning_strategy.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 | from .partitioning_strategy import PartitioningStrategy
3 | from .topology import Topology
4 | from .partitioning_strategy import Partition
5 |
6 |
7 | class RingMemoryWeightedPartitioningStrategy(PartitioningStrategy):
8 | def partition(self, topology: Topology) -> List[Partition]:
9 | nodes = list(topology.all_nodes())
10 | nodes.sort(key=lambda x: (x[1].memory, x[0]), reverse=True)
11 | total_memory = sum(node[1].memory for node in nodes)
12 | partitions = []
13 | start = 0
14 | for node in nodes:
15 | end = round(start + (node[1].memory/total_memory), 5)
16 | partitions.append(Partition(node[0], start, end))
17 | start = end
18 | return partitions
19 |
--------------------------------------------------------------------------------
/exo/topology/test_device_capabilities.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from unittest.mock import patch
3 | from exo.topology.device_capabilities import mac_device_capabilities, DeviceCapabilities, DeviceFlops, TFLOPS, device_capabilities
4 |
5 |
6 | @pytest.mark.asyncio
7 | @patch("subprocess.check_output")
8 | async def test_mac_device_capabilities_pro(mock_check_output):
9 | # Mock the subprocess output
10 | mock_check_output.return_value = b"""
11 | Hardware:
12 |
13 | Hardware Overview:
14 |
15 | Model Name: MacBook Pro
16 | Model Identifier: Mac15,9
17 | Model Number: Z1CM000EFB/A
18 | Chip: Apple M3 Max
19 | Total Number of Cores: 16 (12 performance and 4 efficiency)
20 | Memory: 128 GB
21 | System Firmware Version: 10000.000.0
22 | OS Loader Version: 10000.000.0
23 | Serial Number (system): XXXXXXXXXX
24 | Hardware UUID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
25 | Provisioning UDID: XXXXXXXX-XXXXXXXXXXXXXXXX
26 | Activation Lock Status: Enabled
27 | """
28 |
29 | # Call the function
30 | result = await mac_device_capabilities()
31 |
32 | # Check the results
33 | assert isinstance(result, DeviceCapabilities)
34 | assert result.model == "MacBook Pro"
35 | assert result.chip == "Apple M3 Max"
36 | assert result.memory == 131072 # 128 GB in MB
37 | assert str(result) == "Model: MacBook Pro. Chip: Apple M3 Max. Memory: 131072MB. Flops: 14.20 TFLOPS, fp16: 28.40 TFLOPS, int8: 56.80 TFLOPS"
38 |
39 |
40 | @pytest.mark.asyncio
41 | @patch("subprocess.check_output")
42 | async def test_mac_device_capabilities_air(mock_check_output):
43 | # Mock the subprocess output
44 | mock_check_output.return_value = b"""
45 | Hardware:
46 |
47 | Hardware Overview:
48 |
49 | Model Name: MacBook Air
50 | Model Identifier: Mac14,2
51 | Model Number: MLY33B/A
52 | Chip: Apple M2
53 | Total Number of Cores: 8 (4 performance and 4 efficiency)
54 | Memory: 8 GB
55 | System Firmware Version: 10000.00.0
56 | OS Loader Version: 10000.00.0
57 | Serial Number (system): XXXXXXXXXX
58 | Hardware UUID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
59 | Provisioning UDID: XXXXXXXX-XXXXXXXXXXXXXXXX
60 | Activation Lock Status: Disabled
61 | """
62 |
63 | # Call the function
64 | result = await mac_device_capabilities()
65 |
66 | # Check the results
67 | assert isinstance(result, DeviceCapabilities)
68 | assert result.model == "MacBook Air"
69 | assert result.chip == "Apple M2"
70 | assert result.memory == 8192 # 8 GB in MB
71 |
72 |
73 | @pytest.mark.skip(reason="Unskip this test when running on a MacBook Pro, Apple M3 Max, 128GB")
74 | @pytest.mark.asyncio
75 | async def test_mac_device_capabilities_real():
76 | # Call the function without mocking
77 | result = await mac_device_capabilities()
78 |
79 | # Check the results
80 | assert isinstance(result, DeviceCapabilities)
81 | assert result.model == "MacBook Pro"
82 | assert result.chip == "Apple M3 Max"
83 | assert result.memory == 131072 # 128 GB in MB
84 | assert result.flops == DeviceFlops(fp32=14.20*TFLOPS, fp16=28.40*TFLOPS, int8=56.80*TFLOPS)
85 | assert str(result) == "Model: MacBook Pro. Chip: Apple M3 Max. Memory: 131072MB. Flops: 14.20 TFLOPS, fp16: 28.40 TFLOPS, int8: 56.80 TFLOPS"
86 |
87 |
88 | @pytest.mark.asyncio
89 | async def test_device_capabilities():
90 | caps = await device_capabilities()
91 | assert caps.model != ""
92 | assert caps.chip != ""
93 | assert caps.memory > 0
94 | assert caps.flops is not None
95 |
--------------------------------------------------------------------------------
/exo/topology/test_map_partitions.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from typing import List
3 | from exo.topology.partitioning_strategy import Partition, map_partitions_to_shards
4 | from exo.inference.shard import Shard
5 |
6 |
7 | class TestRingMemoryWeightedPartitioningStrategy(unittest.TestCase):
8 | def test_map_partitions_to_shards(self):
9 | partitions = [
10 | Partition("node1", 0.0, 0.42857),
11 | Partition("node2", 0.42857, 0.71428),
12 | Partition("node3", 0.71428, 0.99999),
13 | ]
14 | shards = map_partitions_to_shards(partitions, 32, "model")
15 | self.assertEqual(
16 | shards,
17 | [
18 | Shard("model", 0, 12, 32),
19 | Shard("model", 13, 21, 32),
20 | Shard("model", 22, 31, 32),
21 | ],
22 | )
23 |
24 | partitions = [
25 | Partition("node1", 0.0, 0.1),
26 | Partition("node2", 0.1, 0.2),
27 | Partition("node3", 0.2, 1.0),
28 | ]
29 | shards = map_partitions_to_shards(partitions, 32, "model")
30 | self.assertEqual(
31 | shards,
32 | [
33 | Shard("model", 0, 2, 32),
34 | Shard("model", 3, 5, 32),
35 | Shard("model", 6, 31, 32),
36 | ],
37 | )
38 |
39 | partitions = [
40 | Partition("node1", 0.0, 1.0),
41 | ]
42 | shards = map_partitions_to_shards(partitions, 32, "model")
43 | self.assertEqual(
44 | shards,
45 | [
46 | Shard("model", 0, 31, 32),
47 | ],
48 | )
49 |
50 | partitions = []
51 | shards = map_partitions_to_shards(partitions, 32, "model")
52 | self.assertEqual(shards, [])
53 |
54 | def test_broken_map_partitions_to_shards(self):
55 | # this was an old broken implementation that sometimes had rounding errors!
56 | def _broken_map_partitions_to_shards(partitions: List[Partition], num_layers, model_id: str):
57 | shards = []
58 | for i, partition in enumerate(partitions):
59 | start_layer = int(partition.start*num_layers)
60 | end_layer = int(partition.end*num_layers) - 1
61 | shards.append(Shard(model_id, start_layer, end_layer, num_layers))
62 | return shards
63 |
64 | partitions = [
65 | Partition("node1", 0.0, 0.42857),
66 | Partition("node2", 0.42857, 0.71428),
67 | Partition("node3", 0.71428, 0.99999),
68 | ]
69 | shards = _broken_map_partitions_to_shards(partitions, 32, "model")
70 | self.assertEqual(
71 | shards,
72 | [
73 | Shard("model", 0, 12, 32),
74 | Shard("model", 13, 21, 32),
75 | Shard("model", 22, 30, 32),
76 | ],
77 | )
78 |
79 |
80 | if __name__ == "__main__":
81 | unittest.main()
82 |
--------------------------------------------------------------------------------
/exo/topology/test_ring_memory_weighted_partitioning_strategy.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from exo.topology.ring_memory_weighted_partitioning_strategy import RingMemoryWeightedPartitioningStrategy
3 | from exo.topology.topology import Topology
4 | from exo.topology.device_capabilities import DeviceCapabilities, DeviceFlops
5 | from exo.topology.partitioning_strategy import Partition
6 |
7 |
8 | class TestRingMemoryWeightedPartitioningStrategy(unittest.TestCase):
9 | def test_partition(self):
10 | # triangle
11 | # node1 -> node2 -> node3 -> node1
12 | topology = Topology()
13 | topology.update_node(
14 | "node1",
15 | DeviceCapabilities(model="test1", chip="test1", memory=3000, flops=DeviceFlops(fp32=0, fp16=0, int8=0)),
16 | )
17 | topology.update_node(
18 | "node2",
19 | DeviceCapabilities(model="test2", chip="test2", memory=1000, flops=DeviceFlops(fp32=0, fp16=0, int8=0)),
20 | )
21 | topology.update_node(
22 | "node3",
23 | DeviceCapabilities(model="test3", chip="test3", memory=6000, flops=DeviceFlops(fp32=0, fp16=0, int8=0)),
24 | )
25 | topology.add_edge("node1", "node2")
26 | topology.add_edge("node2", "node3")
27 | topology.add_edge("node3", "node1")
28 | topology.add_edge("node1", "node3")
29 |
30 | strategy = RingMemoryWeightedPartitioningStrategy()
31 | partitions = strategy.partition(topology)
32 |
33 | self.assertEqual(len(partitions), 3)
34 | self.assertEqual(
35 | partitions,
36 | [
37 | Partition("node3", 0.0, 0.6),
38 | Partition("node1", 0.6, 0.9),
39 | Partition("node2", 0.9, 1.0),
40 | ],
41 | )
42 |
43 | def test_partition_rounding(self):
44 | # triangle
45 | # node1 -> node2 -> node3 -> node1
46 | topology = Topology()
47 | topology.update_node(
48 | "node1",
49 | DeviceCapabilities(
50 | model="MacBook Pro",
51 | chip="test1",
52 | memory=128*1024*1024*1024,
53 | flops=DeviceFlops(fp32=0, fp16=0, int8=0),
54 | ),
55 | )
56 | topology.update_node(
57 | "node2",
58 | DeviceCapabilities(
59 | model="Mac Studio",
60 | chip="test2",
61 | memory=192*1024*1024*1024,
62 | flops=DeviceFlops(fp32=0, fp16=0, int8=0),
63 | ),
64 | )
65 | topology.update_node(
66 | "node3",
67 | DeviceCapabilities(
68 | model="MacBook Pro",
69 | chip="test3",
70 | memory=128*1024*1024*1024,
71 | flops=DeviceFlops(fp32=0, fp16=0, int8=0),
72 | ),
73 | )
74 |
75 | strategy = RingMemoryWeightedPartitioningStrategy()
76 | partitions = strategy.partition(topology)
77 |
78 | self.assertEqual(len(partitions), 3)
79 | self.assertEqual(
80 | partitions,
81 | [
82 | Partition("node3", 0.0, 0.42857),
83 | Partition("node1", 0.6, 0.9),
84 | Partition("node2", 0.9, 1.0),
85 | ],
86 | )
87 |
88 |
89 | if __name__ == "__main__":
90 | unittest.main()
91 |
--------------------------------------------------------------------------------
/exo/topology/topology.py:
--------------------------------------------------------------------------------
1 | from .device_capabilities import DeviceCapabilities
2 | from typing import Dict, Set, Optional
3 | from dataclasses import dataclass
4 |
5 | @dataclass
6 | class PeerConnection:
7 | from_id: str
8 | to_id: str
9 | description: Optional[str] = None
10 |
11 | def __hash__(self):
12 | # Use both from_id and to_id for uniqueness in sets
13 | return hash((self.from_id, self.to_id))
14 |
15 | def __eq__(self, other):
16 | if not isinstance(other, PeerConnection):
17 | return False
18 | # Compare both from_id and to_id for equality
19 | return self.from_id == other.from_id and self.to_id == other.to_id
20 |
21 | class Topology:
22 | def __init__(self):
23 | self.nodes: Dict[str, DeviceCapabilities] = {}
24 | self.peer_graph: Dict[str, Set[PeerConnection]] = {}
25 | self.active_node_id: Optional[str] = None
26 |
27 | def update_node(self, node_id: str, device_capabilities: DeviceCapabilities):
28 | self.nodes[node_id] = device_capabilities
29 |
30 | def get_node(self, node_id: str) -> DeviceCapabilities:
31 | return self.nodes.get(node_id)
32 |
33 | def all_nodes(self):
34 | return self.nodes.items()
35 |
36 | def add_edge(self, from_id: str, to_id: str, description: Optional[str] = None):
37 | if from_id not in self.peer_graph:
38 | self.peer_graph[from_id] = set()
39 | conn = PeerConnection(from_id, to_id, description)
40 | self.peer_graph[from_id].add(conn)
41 |
42 | def merge(self, peer_node_id: str, other: "Topology"):
43 | for node_id, capabilities in other.nodes.items():
44 | if node_id != peer_node_id: continue
45 | self.update_node(node_id, capabilities)
46 | for node_id, connections in other.peer_graph.items():
47 | for conn in connections:
48 | if conn.from_id != peer_node_id: continue
49 | self.add_edge(conn.from_id, conn.to_id, conn.description)
50 |
51 | def __str__(self):
52 | nodes_str = ", ".join(f"{node_id}: {cap}" for node_id, cap in self.nodes.items())
53 | edges_str = ", ".join(f"{node}: {[f'{c.to_id}({c.description})' for c in conns]}"
54 | for node, conns in self.peer_graph.items())
55 | return f"Topology(Nodes: {{{nodes_str}}}, Edges: {{{edges_str}}})"
56 |
57 | def to_json(self):
58 | return {
59 | "nodes": {
60 | node_id: capabilities.to_dict()
61 | for node_id, capabilities in self.nodes.items()
62 | },
63 | "peer_graph": {
64 | node_id: [
65 | {
66 | "from_id": conn.from_id,
67 | "to_id": conn.to_id,
68 | "description": conn.description
69 | }
70 | for conn in connections
71 | ]
72 | for node_id, connections in self.peer_graph.items()
73 | },
74 | "active_node_id": self.active_node_id
75 | }
76 |
--------------------------------------------------------------------------------
/exo/train/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/exo-explore/exo/e4238f9ef369037252c7542e40ea1a8a625afba7/exo/train/__init__.py
--------------------------------------------------------------------------------
/exo/train/dataset.py:
--------------------------------------------------------------------------------
1 | #from https://github.com/ml-explore/mlx-examples
2 | from pathlib import Path
3 | import numpy as np
4 | import json
5 | from functools import partial, reduce
6 | def compose(*funcs):
7 | return reduce(lambda f, g: lambda x: f(g(x)), funcs, lambda x : x)
8 |
9 | def batch_with_lengths(tokens, maxlen = None):
10 | lengths = [len(x) for x in tokens]
11 | batch_size = len(lengths)
12 | if maxlen is None:
13 | maxlen = max(lengths)
14 | else:
15 | lengths = [min(maxlen, l) for l in lengths]
16 |
17 | # Pad to the max length
18 | batch_arr = np.zeros((batch_size, maxlen), np.int32)
19 |
20 | for j in range(batch_size):
21 | batch_arr[j, : lengths[j]] = tokens[j]
22 | batch = np.array(batch_arr)
23 | return batch[:, :-1], batch[:, 1:], np.array(lengths)
24 |
25 | def batch_chunk(batch_size):
26 | return lambda d, i: d[i:i + batch_size]
27 |
28 |
29 | def iterate_batches(dset, batch_size, train=False, uniform_length=True):
30 | # Shuffle indices
31 | make_batch = lambda b: batch_with_lengths(b, maxlen=dset._maxlen if uniform_length else None)
32 | chunk = batch_chunk(batch_size)
33 | while True:
34 | indices = np.arange(len(dset))
35 | if train:
36 | indices = np.random.permutation(indices)
37 | batch = compose(make_batch, lambda i: [dset[k] for k in i], partial(chunk, indices))
38 |
39 | # Collect batches from dataset
40 | for i in range(0, len(indices) - batch_size + 1, batch_size):
41 | yield batch(i)
42 |
43 | if not train:
44 | break
45 |
46 | class Dataset:
47 | def __init__(self, path: Path, preprocess=lambda item: item, loadline=json.loads, metrics={}):
48 | if not path.exists():
49 | self._data = None
50 | else:
51 | self.preprocess = preprocess
52 | with open(path, "r") as fid:
53 | self._data = [loadline(l) for l in fid]
54 | self._maxlen = max([len(preprocess(x)) for x in self._data])
55 | # Check if any sequence is longer than 2048 tokens
56 | if self._maxlen > 2048:
57 | print("You've got sequences with over 2048 tokens in here! Split your data fool!")
58 |
59 |
60 | def __getitem__(self, idx: int):
61 | return self.preprocess(self._data[idx])
62 |
63 | def __len__(self):
64 | return len(self._data)
65 |
66 |
67 | def load_dataset(data_path: str, preprocess=lambda i: i, loadline=json.loads):
68 | def load_and_check(name):
69 | dataset_path = Path(data_path) / f"{name}.jsonl"
70 | try:
71 | return Dataset(dataset_path, preprocess=preprocess, loadline=loadline)
72 | except Exception as e:
73 | print(f"Unable to build dataset {dataset_path} ({e})")
74 | raise
75 |
76 | names = ("train", "valid", "test")
77 | train, valid, test = (load_and_check(n) for n in names)
78 |
79 | return train, valid, test
80 |
81 |
--------------------------------------------------------------------------------
/exo/viz/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/exo-explore/exo/e4238f9ef369037252c7542e40ea1a8a625afba7/exo/viz/__init__.py
--------------------------------------------------------------------------------
/exo/viz/test_topology_viz.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import unittest
3 | from datetime import timedelta
4 | from exo.viz.topology_viz import TopologyViz
5 | from exo.topology.topology import Topology
6 | from exo.topology.device_capabilities import DeviceCapabilities, DeviceFlops
7 | from exo.topology.partitioning_strategy import Partition
8 | from exo.download.download_progress import RepoProgressEvent
9 |
10 |
11 | def create_hf_repo_progress_event(
12 | completed_files: int = 5,
13 | total_files: int = 10,
14 | downloaded_bytes: int = 500000000,
15 | downloaded_bytes_this_session: int = 250000000,
16 | total_bytes: int = 1000000000,
17 | overall_speed: int = 5000000,
18 | overall_eta: timedelta = timedelta(seconds=100),
19 | file_progress: dict = None,
20 | status: str = "in_progress"
21 | ) -> RepoProgressEvent:
22 | if file_progress is None:
23 | file_progress = {
24 | "file1.bin":
25 | RepoFileProgressEvent(
26 | repo_id="repo_id",
27 | repo_revision="repo_revision",
28 | file_path="file1.bin",
29 | downloaded=100000000,
30 | downloaded_this_session=50000000,
31 | total=200000000,
32 | speed=1000000,
33 | eta=timedelta(seconds=100),
34 | status="in_progress"
35 | ), "file2.bin":
36 | RepoFileProgressEvent(
37 | repo_id="repo_id",
38 | repo_revision="repo_revision",
39 | file_path="file2.bin",
40 | downloaded=200000000,
41 | downloaded_this_session=100000000,
42 | total=200000000,
43 | speed=2000000,
44 | eta=timedelta(seconds=0),
45 | status="complete"
46 | )
47 | }
48 |
49 | return RepoProgressEvent(
50 | repo_id="repo_id",
51 | repo_revision="repo_revision",
52 | completed_files=completed_files,
53 | total_files=total_files,
54 | downloaded_bytes=downloaded_bytes,
55 | downloaded_bytes_this_session=downloaded_bytes_this_session,
56 | total_bytes=total_bytes,
57 | overall_speed=overall_speed,
58 | overall_eta=overall_eta,
59 | file_progress=file_progress,
60 | status=status
61 | )
62 |
63 |
64 | class TestNodeViz(unittest.IsolatedAsyncioTestCase):
65 | async def asyncSetUp(self):
66 | self.topology = Topology()
67 | self.topology.update_node(
68 | "node1",
69 | DeviceCapabilities(model="ModelA", chip="ChipA", memory=8*1024, flops=DeviceFlops(fp32=1.0, fp16=2.0, int8=4.0)),
70 | )
71 | self.topology.update_node(
72 | "node2",
73 | DeviceCapabilities(model="ModelB", chip="ChipB", memory=16*1024, flops=DeviceFlops(fp32=2.0, fp16=4.0, int8=8.0)),
74 | )
75 | self.topology.update_node(
76 | "node3",
77 | DeviceCapabilities(model="ModelC", chip="ChipC", memory=32*1024, flops=DeviceFlops(fp32=4.0, fp16=8.0, int8=16.0)),
78 | )
79 | self.topology.update_node(
80 | "node4",
81 | DeviceCapabilities(model="ModelD", chip="ChipD", memory=64*1024, flops=DeviceFlops(fp32=8.0, fp16=16.0, int8=32.0)),
82 | )
83 |
84 | self.top_viz = TopologyViz()
85 | await asyncio.sleep(2) # Simulate running for a short time
86 |
87 | async def test_layout_generation(self):
88 | # self.top_viz._generate_layout()
89 | self.top_viz.refresh()
90 | import time
91 |
92 | time.sleep(2)
93 | self.top_viz.update_visualization(
94 | self.topology,
95 | [
96 | Partition("node1", 0, 0.2),
97 | Partition("node4", 0.2, 0.4),
98 | Partition("node2", 0.4, 0.8),
99 | Partition("node3", 0.8, 0.9),
100 | ],
101 | "node1",
102 | {
103 | "node1": create_hf_repo_progress_event(),
104 | "node2": create_hf_repo_progress_event(),
105 | "node3": create_hf_repo_progress_event(),
106 | "node4": create_hf_repo_progress_event(),
107 | },
108 | )
109 | time.sleep(2)
110 | self.topology.active_node_id = "node3"
111 | self.top_viz.update_visualization(
112 | self.topology,
113 | [
114 | Partition("node1", 0, 0.3),
115 | Partition("node5", 0.3, 0.5),
116 | Partition("node2", 0.5, 0.7),
117 | Partition("node4", 0.7, 0.9),
118 | ],
119 | "node5",
120 | {
121 | "node1": create_hf_repo_progress_event(),
122 | "node5": create_hf_repo_progress_event(),
123 | },
124 | )
125 | time.sleep(2)
126 |
127 |
128 | if __name__ == "__main__":
129 | unittest.main()
130 |
--------------------------------------------------------------------------------
/extra/dashboard/requirements.txt:
--------------------------------------------------------------------------------
1 | plotly
2 | pandas
3 | requests
4 | aiohttp
5 | pygame
--------------------------------------------------------------------------------
/extra/dashboard/sounds/gta5_wasted.mp3:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:fb3fb66dd02827fbff86ef1ce3bc6438371c823aed7d4c3803ed522f008e4947
3 | size 206399
4 |
--------------------------------------------------------------------------------
/extra/dashboard/sounds/pokemon_evolve.mp3:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:d99cc9bdab4a4639d50f439b424547000e7c79f195b5b121734ad4ead435911c
3 | size 633345
4 |
--------------------------------------------------------------------------------
/extra/pipsize.py:
--------------------------------------------------------------------------------
1 | import os
2 | import importlib.metadata
3 | import importlib.util
4 | import json
5 | import sys
6 |
7 |
8 | def calc_container(path):
9 | """Calculate total size of a directory or file."""
10 | if os.path.isfile(path):
11 | try:
12 | return os.path.getsize(path)
13 | except (OSError, FileNotFoundError):
14 | return 0
15 |
16 | total_size = 0
17 | for dirpath, dirnames, filenames in os.walk(path):
18 | for f in filenames:
19 | fp = os.path.join(dirpath, f)
20 | try:
21 | total_size += os.path.getsize(fp)
22 | except (OSError, FileNotFoundError):
23 | continue
24 | return total_size
25 |
26 |
27 | def get_package_location(package_name):
28 | """Get the actual location of a package's files."""
29 | try:
30 | spec = importlib.util.find_spec(package_name)
31 | if spec is None:
32 | return None
33 |
34 | if spec.submodule_search_locations:
35 | # Return the first location for namespace packages
36 | return spec.submodule_search_locations[0]
37 | elif spec.origin:
38 | # For single-file modules, return the file path itself
39 | return spec.origin
40 | except ImportError:
41 | return None
42 |
43 |
44 | def get_package_sizes(min_size_mb=0.1):
45 | """Get sizes of installed packages above minimum size threshold."""
46 | package_sizes = []
47 |
48 | # Get all installed distributions
49 | for dist in importlib.metadata.distributions():
50 | try:
51 | package_name = dist.metadata["Name"]
52 | location = get_package_location(package_name.replace("-", "_"))
53 |
54 | if location and os.path.exists(location):
55 | size = calc_container(location)
56 | size_mb = size / (1024 * 1024)
57 |
58 | if size_mb > min_size_mb:
59 | package_sizes.append((package_name, size))
60 | except Exception as e:
61 | print(
62 | f"Error processing {dist.metadata.get('Name', 'Unknown package')}: {e}"
63 | )
64 |
65 | return package_sizes
66 |
67 |
68 | def main():
69 | # Get and sort package sizes
70 | package_sizes = get_package_sizes()
71 | package_sizes.sort(key=lambda x: x[1], reverse=True)
72 |
73 | # Convert sizes to MB and prepare data
74 | table_data = [(name, size/(1024*1024)) for name, size in package_sizes]
75 | total_size = sum(size for _, size in package_sizes)/(1024*1024)
76 |
77 | # Check if --json flag is present
78 | if "--json" in sys.argv:
79 | try:
80 | output_file = sys.argv[sys.argv.index("--json") + 1]
81 | json_data = {
82 | "packages": [{
83 | "name": name,
84 | "size_mb": round(size, 2)
85 | } for name, size in table_data],
86 | "total_size_mb": round(total_size, 2)
87 | }
88 |
89 | with open(output_file, 'w') as f:
90 | json.dump(json_data, f, indent=2)
91 | print(f"JSON data written to {output_file}")
92 | return
93 | except IndexError:
94 | print("Error: Please provide a filename after --json")
95 | sys.exit(1)
96 | except Exception as e:
97 | print(f"Error writing JSON file: {e}")
98 | sys.exit(1)
99 |
100 | # Original table output code
101 | max_name_width = max(len(name) for name, _ in table_data)
102 | max_name_width = max(max_name_width, len("Package"))
103 |
104 | print(f"\n{'Package':<{max_name_width}} | Size (MB)")
105 | print("-" * max_name_width + "-+-" + "-" * 10)
106 |
107 | for name, size in table_data:
108 | print(f"{name:<{max_name_width}} | {size:>8.2f}")
109 |
110 | print(f"\nTotal size: {total_size:.2f} MB\n")
111 |
112 | if __name__ == "__main__":
113 | main()
--------------------------------------------------------------------------------
/extra/start_openwebui.sh:
--------------------------------------------------------------------------------
1 | API_ENDPOINT="http://${API_ENDPOINT:-$(ifconfig | grep 'inet ' | grep -v '127.0.0.1' | awk '{print $2}' | head -n 1):52415}"
2 | echo "Using API_ENDPOINT=${API_ENDPOINT}"
3 | docker run -d -p 3000:8080 -e OPENAI_API_BASE_URL="${API_ENDPOINT}" -e OPENAI_API_KEY=your_secret_key -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main
4 |
--------------------------------------------------------------------------------
/format.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import subprocess
3 | import sys
4 | import os
5 |
6 |
7 | def run_yapf(target):
8 | if os.path.isfile(target):
9 | files = [target]
10 | else:
11 | files = [os.path.join(root, file) for root, _, files in os.walk(target) for file in files if file.endswith('.py')]
12 |
13 | for file in files:
14 | try:
15 | command = ["yapf", "-i", file]
16 | subprocess.run(command, check=True, capture_output=True, text=True)
17 | print(f"Formatted: {file}")
18 | except subprocess.CalledProcessError as e:
19 | print(f"Error formatting {file}: {e.stderr}")
20 |
21 |
22 | def main():
23 | if len(sys.argv) < 2:
24 | print("Usage: python3 format.py e.g. python3 format.py ./exo")
25 | sys.exit(1)
26 |
27 | target = sys.argv[1]
28 | run_yapf(target)
29 | print("Formatting completed.")
30 |
31 |
32 | if __name__ == "__main__":
33 | main()
34 |
--------------------------------------------------------------------------------
/install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | if command -v python3.12 &>/dev/null; then
4 | echo "Python 3.12 is installed, proceeding with python3.12..."
5 | python3.12 -m venv .venv
6 | else
7 | echo "The recommended version of Python to run exo with is Python 3.12, but $(python3 --version) is installed. Proceeding with $(python3 --version)"
8 | python3 -m venv .venv
9 | fi
10 | source .venv/bin/activate
11 | pip install -e .
12 |
--------------------------------------------------------------------------------
/scripts/build_exo.py:
--------------------------------------------------------------------------------
1 | import site
2 | import subprocess
3 | import sys
4 | import os
5 | import pkgutil
6 |
7 | def run():
8 | site_packages = site.getsitepackages()[0]
9 | base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
10 | baseimages_dir = os.path.join(base_dir, "exo", "apputil", "baseimages")
11 |
12 | command = [
13 | f"{sys.executable}", "-m", "nuitka", "exo/main.py",
14 | "--company-name=exolabs",
15 | "--product-name=exo",
16 | "--output-dir=dist",
17 | "--follow-imports",
18 | "--standalone",
19 | "--output-filename=exo",
20 | "--python-flag=no_site",
21 | "--onefile",
22 | f"--include-data-dir={baseimages_dir}=exo/apputil/baseimages"
23 | ]
24 |
25 | if sys.platform == "darwin":
26 | command.extend([
27 | "--macos-app-name=exo",
28 | "--macos-app-mode=gui",
29 | "--macos-app-version=0.0.1",
30 | "--macos-signed-app-name=net.exolabs.exo",
31 | "--include-distribution-meta=mlx",
32 | "--include-module=mlx._reprlib_fix",
33 | "--include-module=mlx._os_warning",
34 | "--include-distribution-meta=huggingface_hub",
35 | "--include-module=huggingface_hub.repocard",
36 | f"--include-data-files={site_packages}/mlx/lib/mlx.metallib=mlx/lib/mlx.metallib",
37 | f"--include-data-files={site_packages}/mlx/lib/mlx.metallib=./mlx.metallib",
38 | "--include-distribution-meta=pygments",
39 | "--nofollow-import-to=tinygrad"
40 | ])
41 | inference_modules = [
42 | name for _, name, _ in pkgutil.iter_modules(['exo/inference/mlx/models'])
43 | ]
44 | for module in inference_modules:
45 | command.append(f"--include-module=exo.inference.mlx.models.{module}")
46 | elif sys.platform == "win32":
47 | command.extend([
48 | "--windows-icon-from-ico=docs/exo-logo-win.ico",
49 | "--file-version=0.0.1",
50 | "--product-version=0.0.1"
51 | ])
52 | elif sys.platform.startswith("linux"):
53 | command.extend([
54 | "--include-distribution-metadata=pygments",
55 | "--linux-icon=docs/exo-rounded.png"
56 | ])
57 | try:
58 | subprocess.run(command, check=True)
59 | print("Build completed!")
60 | except subprocess.CalledProcessError as e:
61 | print(f"An error occurred: {e}")
62 |
63 | if __name__ == "__main__":
64 | run()
65 |
--------------------------------------------------------------------------------
/scripts/compile_grpc.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | source ./install.sh
3 | pushd exo/networking/grpc
4 | python3 -m grpc_tools.protoc -I. --python_out=. --grpc_python_out=. node_service.proto
5 | sed -i '' "s/import\ node_service_pb2/from . &/" node_service_pb2_grpc.py
6 | popd
7 |
8 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import platform
3 | import subprocess
4 |
5 | from setuptools import find_packages, setup
6 |
7 | # Base requirements for all platforms
8 | install_requires = [
9 | "aiohttp==3.10.11",
10 | "aiohttp_cors==0.7.0",
11 | "aiofiles==24.1.0",
12 | "grpcio==1.70.0",
13 | "grpcio-tools==1.70.0",
14 | "Jinja2==3.1.4",
15 | "numpy==2.0.0",
16 | "nuitka==2.5.1",
17 | "nvidia-ml-py==12.560.30",
18 | "opencv-python==4.10.0.84",
19 | "pillow==10.4.0",
20 | "prometheus-client==0.20.0",
21 | "protobuf==5.28.1",
22 | "psutil==6.0.0",
23 | "pyamdgpuinfo==2.1.6;platform_system=='Linux'",
24 | "pydantic==2.9.2",
25 | "requests==2.32.3",
26 | "rich==13.7.1",
27 | "scapy==2.6.1",
28 | "tqdm==4.66.4",
29 | "transformers==4.46.3",
30 | "uuid==1.30",
31 | "uvloop==0.21.0",
32 | "tinygrad @ git+https://github.com/tinygrad/tinygrad.git@ec120ce6b9ce8e4ff4b5692566a683ef240e8bc8",
33 | ]
34 |
35 | extras_require = {
36 | "formatting": ["yapf==0.40.2",],
37 | "apple_silicon": [
38 | "mlx==0.22.0",
39 | "mlx-lm==0.21.1",
40 | ],
41 | "windows": ["pywin32==308",],
42 | "nvidia-gpu": ["nvidia-ml-py==12.560.30",],
43 | "amd-gpu": ["pyrsmi==0.2.0"],
44 | }
45 |
46 | # Check if running on macOS with Apple Silicon
47 | if sys.platform.startswith("darwin") and platform.machine() == "arm64":
48 | install_requires.extend(extras_require["apple_silicon"])
49 |
50 | # Check if running Windows
51 | if sys.platform.startswith("win32"):
52 | install_requires.extend(extras_require["windows"])
53 |
54 |
55 | def _add_gpu_requires():
56 | global install_requires
57 | # Add Nvidia-GPU
58 | try:
59 | out = subprocess.run(['nvidia-smi', '--query-gpu=name', '--format=csv,noheader'], shell=True, text=True, capture_output=True, check=False)
60 | if out.returncode == 0:
61 | install_requires.extend(extras_require["nvidia-gpu"])
62 | except subprocess.CalledProcessError:
63 | pass
64 |
65 | # Add AMD-GPU
66 | # This will mostly work only on Linux, amd/rocm-smi is not yet supported on Windows
67 | try:
68 | out = subprocess.run(['amd-smi', 'list', '--csv'], shell=True, text=True, capture_output=True, check=False)
69 | if out.returncode == 0:
70 | install_requires.extend(extras_require["amd-gpu"])
71 | except:
72 | out = subprocess.run(['rocm-smi', 'list', '--csv'], shell=True, text=True, capture_output=True, check=False)
73 | if out.returncode == 0:
74 | install_requires.extend(extras_require["amd-gpu"])
75 | finally:
76 | pass
77 |
78 |
79 | _add_gpu_requires()
80 |
81 | setup(
82 | name="exo",
83 | version="0.0.1",
84 | packages=find_packages(),
85 | install_requires=install_requires,
86 | extras_require=extras_require,
87 | package_data={"exo": ["tinychat/**/*"]},
88 | entry_points={"console_scripts": ["exo = exo.main:run"]},
89 | )
90 |
--------------------------------------------------------------------------------
/test/reconnect.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | echo "Starting node 1"
4 | DEBUG_DISCOVERY=7 DEBUG=7 python3 main.py --node-id "node1" --listen-port 5678 --broadcast-port 5679 --chatgpt-api-port 52415 --chatgpt-api-response-timeout 900 > output1.log 2>&1 &
5 | PID1=$!
6 | echo "Started node 1 PID: $PID1"
7 | echo "Starting node 2"
8 | DEBUG_DISCOVERY=7 DEBUG=7 python3 main.py --node-id "node2" --listen-port 5679 --broadcast-port 5678 --chatgpt-api-port 8001 --chatgpt-api-response-timeout 900 > output2.log 2>&1 &
9 | PID2=$!
10 | echo "Started node 2 PID: $PID2"
11 | sleep 5
12 | kill $PID2
13 | sleep 5
14 | echo "Starting node 2 again..."
15 | DEBUG_DISCOVERY=7 DEBUG=7 python3 main.py --node-id "node2" --listen-port 5679 --broadcast-port 5678 --chatgpt-api-port 8001 --chatgpt-api-response-timeout 900 > output3.log 2>&1 &
16 | PID2=$!
17 | sleep 5
18 | echo "Killing nodes and ending test..."
19 | kill $PID1
20 | kill $PID2
21 | echo "Test complete."
--------------------------------------------------------------------------------
/test/test_model_helpers.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from exo.models import get_supported_models, model_cards
3 | from exo.inference.inference_engine import inference_engine_classes
4 | from typing import NamedTuple
5 |
6 | class TestCase(NamedTuple):
7 | name: str
8 | engine_lists: list # Will contain short names, will be mapped to class names
9 | expected_models_contains: list
10 | min_count: int | None
11 | exact_count: int | None
12 | max_count: int | None
13 |
14 | # Helper function to map short names to class names
15 | def expand_engine_lists(engine_lists):
16 | def map_engine(engine):
17 | return inference_engine_classes.get(engine, engine) # Return original name if not found
18 |
19 | return [[map_engine(engine) for engine in sublist]
20 | for sublist in engine_lists]
21 |
22 | test_cases = [
23 | TestCase(
24 | name="single_mlx_engine",
25 | engine_lists=[["mlx"]],
26 | expected_models_contains=["llama-3.2-1b", "llama-3.1-70b", "mistral-nemo"],
27 | min_count=10,
28 | exact_count=None,
29 | max_count=None
30 | ),
31 | TestCase(
32 | name="single_tinygrad_engine",
33 | engine_lists=[["tinygrad"]],
34 | expected_models_contains=["llama-3.2-1b", "llama-3.2-3b"],
35 | min_count=5,
36 | exact_count=None,
37 | max_count=15
38 | ),
39 | TestCase(
40 | name="multiple_engines_or",
41 | engine_lists=[["mlx", "tinygrad"], ["mlx"]],
42 | expected_models_contains=["llama-3.2-1b", "llama-3.2-3b", "mistral-nemo"],
43 | min_count=10,
44 | exact_count=None,
45 | max_count=None
46 | ),
47 | TestCase(
48 | name="multiple_engines_all",
49 | engine_lists=[["mlx", "tinygrad"], ["mlx", "tinygrad"]],
50 | expected_models_contains=["llama-3.2-1b", "llama-3.2-3b", "mistral-nemo"],
51 | min_count=10,
52 | exact_count=None,
53 | max_count=None
54 | ),
55 | TestCase(
56 | name="distinct_engine_lists",
57 | engine_lists=[["mlx"], ["tinygrad"]],
58 | expected_models_contains=["llama-3.2-1b"],
59 | min_count=5,
60 | exact_count=None,
61 | max_count=15
62 | ),
63 | TestCase(
64 | name="no_engines",
65 | engine_lists=[],
66 | expected_models_contains=None,
67 | min_count=None,
68 | exact_count=len(model_cards),
69 | max_count=None
70 | ),
71 | TestCase(
72 | name="nonexistent_engine",
73 | engine_lists=[["NonexistentEngine"]],
74 | expected_models_contains=[],
75 | min_count=None,
76 | exact_count=0,
77 | max_count=None
78 | ),
79 | TestCase(
80 | name="dummy_engine",
81 | engine_lists=[["dummy"]],
82 | expected_models_contains=["dummy"],
83 | min_count=None,
84 | exact_count=1,
85 | max_count=None
86 | ),
87 | ]
88 |
89 | class TestModelHelpers(unittest.TestCase):
90 | def test_get_supported_models(self):
91 | for case in test_cases:
92 | with self.subTest(f"{case.name}_short_names"):
93 | result = get_supported_models(case.engine_lists)
94 | self._verify_results(case, result)
95 |
96 | with self.subTest(f"{case.name}_class_names"):
97 | class_name_lists = expand_engine_lists(case.engine_lists)
98 | result = get_supported_models(class_name_lists)
99 | self._verify_results(case, result)
100 |
101 | def _verify_results(self, case, result):
102 | if case.expected_models_contains:
103 | for model in case.expected_models_contains:
104 | self.assertIn(model, result)
105 |
106 | if case.min_count:
107 | self.assertGreater(len(result), case.min_count)
108 |
109 | if case.exact_count is not None:
110 | self.assertEqual(len(result), case.exact_count)
111 |
112 | # Special case for distinct lists test
113 | if case.name == "distinct_engine_lists":
114 | self.assertLess(len(result), 15)
115 | self.assertNotIn("mistral-nemo", result)
116 |
117 | if case.max_count:
118 | self.assertLess(len(result), case.max_count)
119 |
120 | if __name__ == '__main__':
121 | unittest.main()
122 |
--------------------------------------------------------------------------------
/test/test_tokenizers.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | from transformers import AutoTokenizer, AutoProcessor
4 | from exo.models import model_cards
5 |
6 |
7 | def test_tokenizer(name, tokenizer, verbose=False):
8 | print(f"--- {name} ({tokenizer.__class__.__name__}) ---")
9 | text = "Hello! How can I assist you today? Let me know if you need help with something or just want to chat."
10 | encoded = tokenizer.encode(text)
11 | decoded = tokenizer.decode(encoded)
12 |
13 | print(f"{encoded=}")
14 | print(f"{decoded=}")
15 |
16 | reconstructed = ""
17 | for token in encoded:
18 | if verbose:
19 | print(f"{token=}")
20 | print(f"{tokenizer.decode([token])=}")
21 | reconstructed += tokenizer.decode([token])
22 | print(f"{reconstructed=}")
23 |
24 | strip_tokens = lambda s: s.lstrip(tokenizer.decode([tokenizer.bos_token_id])).rstrip(tokenizer.decode([tokenizer.eos_token_id]))
25 | assert text == strip_tokens(decoded) == strip_tokens(reconstructed)
26 |
27 | ignore = ["TriAiExperiments/SFR-Iterative-DPO-LLaMA-3-70B-R", "mlx-community/DeepSeek-Coder-V2-Lite-Instruct-4bit-mlx", "mlx-community/DeepSeek-V2.5-MLX-AQ4_1_64", "llava-hf/llava-1.5-7b-hf", "mlx-community/Qwen*", "dummy", "mlx-community/Meta-Llama-3.1-405B-Instruct-8bit", "mlx-community/Phi-3.5-mini-instruct-4bit", "mlx-community/phi-4-4bit", "stabilityai/stable-diffusion-2-1-base"]
28 | ignore_pattern = re.compile(r"^(" + "|".join(model.replace("*", ".*") for model in ignore) + r")")
29 | models = []
30 | for model_id in model_cards:
31 | for engine_type, repo_id in model_cards[model_id].get("repo", {}).items():
32 | if not ignore_pattern.match(repo_id):
33 | models.append(repo_id)
34 | models = list(set(models))
35 |
36 | verbose = os.environ.get("VERBOSE", "0").lower() == "1"
37 | for m in models:
38 | # TODO: figure out why use_fast=False is giving inconsistent behaviour (no spaces decoding invididual tokens) for Mistral-Large-Instruct-2407-4bit
39 | # test_tokenizer(m, AutoProcessor.from_pretrained(m, use_fast=False), verbose)
40 | if m not in ["mlx-community/DeepSeek-R1-4bit", "mlx-community/DeepSeek-R1-3bit", "mlx-community/DeepSeek-V3-4bit", "mlx-community/DeepSeek-V3-3bit"]:
41 | test_tokenizer(m, AutoProcessor.from_pretrained(m, use_fast=True, trust_remote_code=True), verbose)
42 | test_tokenizer(m, AutoTokenizer.from_pretrained(m, trust_remote_code=True), verbose)
43 |
--------------------------------------------------------------------------------