43 | {value}
44 |
45 | );
46 | }
47 |
48 | function codeBlock({node, className, children, ...props}: any) {
49 | if (!children) {
50 | return null;
51 | }
52 | const value = String(children).replace(/\n$/, '');
53 | if (!value) {
54 | return null;
55 | }
56 | // Note: OpenAI does not always annotate the Markdown code block with the language
57 | // Note: In this case, we will fall back to plaintext
58 | const match = /language-(\w+)/.exec(className || '');
59 | let language: string = match ? match[1] : 'plaintext';
60 | const isInline = node.properties.dataInline;
61 |
62 | return isInline ? (
63 | inlineCodeBlock({value: value, language})
64 | ) : (
65 |
88 | {children}
89 |
*/}
90 | 102 | {children} 103 |104 | ); 105 | } 106 | 107 | const renderers = { 108 | code: codeBlock, 109 | pre: customPre, 110 | }; 111 | 112 | return ( 113 |
Adjust the speech speed to your preference. Lower values will slow down the speech, 38 | while higher values will speed it up.
39 |Higher values like 0.8 will make the output more random, while lower values like 0.2 45 | will make it more focused and deterministic. 46 | We recommend altering this or top_p but not both.
47 |43 | An alternative to sampling with temperature, called nucleus sampling, where the model considers the 44 | results of 45 | the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability 46 | mass are 47 | considered. 48 | We generally recommend altering this or temperature but not both. 49 |
50 |