├── Claude2alpaca.png
├── claude2alpaca_logo.png
├── README.md
├── requirements.txt
└── claude2AlpacaPG_full.py
/Claude2alpaca.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabiomatricardi/Claude2Alpaca7B-Playgorund/main/Claude2alpaca.png
--------------------------------------------------------------------------------
/claude2alpaca_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabiomatricardi/Claude2Alpaca7B-Playgorund/main/claude2alpaca_logo.png
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # Claude2Alpaca7B-Playgorund
4 | Repo of the code for Gradio Playground with Ctransformers of 7B parameters q4 GGUF model
5 |
6 | ## Instructions
7 | - create a new directory
8 | - Create a virtualEnvironment and activate it
9 | - Install the dependencies
10 | - Download the python file and the png file
11 | - download from [Hugging Face Hub](https://huggingface.co/TheBloke/claude2-alpaca-7B-GGUF) the GGUF file claude2-alpaca-7b.Q4_0.gguf
12 | - put it into the subfolder `model`
13 |
14 | ### Dependencies
15 | ```
16 | pip install gradio
17 | pip install ctransformers
18 | ```
19 | ### Run the GUI
20 | from the terminal, with the venv activated, run
21 | ```
22 | python claude2AlpacaPG_full.py
23 | ```
24 |
25 |
26 | Here the result...
27 |
28 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | aiofiles==23.2.1
2 | altair==5.1.2
3 | annotated-types==0.6.0
4 | anyio==3.7.1
5 | attrs==23.1.0
6 | certifi==2023.11.17
7 | charset-normalizer==3.3.2
8 | click==8.1.7
9 | colorama==0.4.6
10 | contourpy==1.2.0
11 | ctransformers==0.2.27
12 | cycler==0.12.1
13 | exceptiongroup==1.1.3
14 | fastapi==0.104.1
15 | ffmpy==0.3.1
16 | filelock==3.13.1
17 | fonttools==4.44.3
18 | fsspec==2023.10.0
19 | gradio==4.4.1
20 | gradio_client==0.7.0
21 | h11==0.14.0
22 | httpcore==1.0.2
23 | httpx==0.25.1
24 | huggingface-hub==0.19.4
25 | idna==3.4
26 | importlib-resources==6.1.1
27 | Jinja2==3.1.2
28 | jsonschema==4.20.0
29 | jsonschema-specifications==2023.11.1
30 | kiwisolver==1.4.5
31 | markdown-it-py==3.0.0
32 | MarkupSafe==2.1.3
33 | matplotlib==3.8.2
34 | mdurl==0.1.2
35 | numpy==1.26.2
36 | orjson==3.9.10
37 | packaging==23.2
38 | pandas==2.1.3
39 | Pillow==10.1.0
40 | py-cpuinfo==9.0.0
41 | pydantic==2.5.1
42 | pydantic_core==2.14.3
43 | pydub==0.25.1
44 | Pygments==2.17.1
45 | pyparsing==3.1.1
46 | python-dateutil==2.8.2
47 | python-multipart==0.0.6
48 | pytz==2023.3.post1
49 | PyYAML==6.0.1
50 | referencing==0.31.0
51 | requests==2.31.0
52 | rich==13.7.0
53 | rpds-py==0.13.0
54 | semantic-version==2.10.0
55 | shellingham==1.5.4
56 | six==1.16.0
57 | sniffio==1.3.0
58 | starlette==0.27.0
59 | tomlkit==0.12.0
60 | toolz==0.12.0
61 | tqdm==4.66.1
62 | typer==0.9.0
63 | typing_extensions==4.8.0
64 | tzdata==2023.3
65 | urllib3==2.1.0
66 | uvicorn==0.24.0.post1
67 | websockets==11.0.3
68 |
--------------------------------------------------------------------------------
/claude2AlpacaPG_full.py:
--------------------------------------------------------------------------------
1 | import gradio as gr
2 | import os
3 | from ctransformers import AutoModelForCausalLM, AutoConfig, Config #import for GGML models
4 | import datetime
5 |
6 | #MODEL SETTINGS also for DISPLAY
7 | convHistory = ''
8 | modelfile = "model/claude2-alpaca-7b.Q4_K_M.gguf"
9 | repetitionpenalty = 1.15
10 | contextlength=4096
11 | logfile = 'Claude2Alpaca_logs.txt'
12 | print("loading model...")
13 | stt = datetime.datetime.now()
14 | conf = AutoConfig(Config(temperature=0.3,
15 | repetition_penalty=repetitionpenalty,
16 | batch_size=64,
17 | max_new_tokens=2048,
18 | context_length=contextlength))
19 | llm = AutoModelForCausalLM.from_pretrained(modelfile,
20 | model_type="llama",
21 | config = conf)
22 | dt = datetime.datetime.now() - stt
23 | print(f"Model loaded in {dt}")
24 |
25 | def writehistory(text):
26 | with open(logfile, 'a') as f:
27 | f.write(text)
28 | f.write('\n')
29 | f.close()
30 |
31 | """
32 | gr.themes.Base()
33 | gr.themes.Default()
34 | gr.themes.Glass()
35 | gr.themes.Monochrome()
36 | gr.themes.Soft()
37 | """
38 | def combine(a, b, c, d):
39 | global convHistory
40 | import datetime
41 | SYSTEM_PROMPT = f"""{a}
42 |
43 |
44 | """
45 | temperature = c
46 | max_new_tokens = d
47 | prompt = a + "\n\n### Instruction:\n" + b + "\n\n### Response:\n"
48 | start = datetime.datetime.now()
49 | generation = ""
50 | delta = ""
51 | prompt_tokens = f"Prompt Tokens: {len(llm.tokenize(prompt))}"
52 | answer_tokens = ''
53 | total_tokens = ''
54 | for character in llm(prompt,
55 | temperature = temperature,
56 | repetition_penalty = 1.15,
57 | max_new_tokens=max_new_tokens,
58 | stream = True):
59 | generation += character
60 | answer_tokens = f"Out Tkns: {len(llm.tokenize(generation))}"
61 | total_tokens = f"Total Tkns: {len(llm.tokenize(prompt)) + len(llm.tokenize(generation))}"
62 | delta = datetime.datetime.now() - start
63 | yield generation, delta, prompt_tokens, answer_tokens, total_tokens
64 | timestamp = datetime.datetime.now()
65 | logger = f"""time: {timestamp}\n Temp: {temperature} - MaxNewTokens: {max_new_tokens} - RepPenalty: 1.5 \nPROMPT: \n{prompt}\nClaude2Alpaca-7B: {generation}\nGenerated in {delta}\nPromptTokens: {prompt_tokens} Output Tokens: {answer_tokens} Total Tokens: {total_tokens}\n\n---\n\n"""
66 | writehistory(logger)
67 | convHistory = convHistory + prompt + "\n" + generation + "\n"
68 | print(convHistory)
69 | return generation, delta, prompt_tokens, answer_tokens, total_tokens
70 | #return generation, delta
71 |
72 |
73 | # MAIN GRADIO INTERFACE
74 | with gr.Blocks(theme='remilia/Ghostly') as demo: #theme=gr.themes.Glass()
75 | #TITLE SECTION
76 | with gr.Row(variant='compact'):
77 | with gr.Column(scale=12):
78 | gr.HTML("
Test your favourite LLM for advanced inferences