├── demo.png
├── images
└── huahua.jpg
├── llamaCpp
├── llama.lib
├── ggml_shared.lib
├── ggml_static.lib
├── common
│ ├── build-info.cpp
│ ├── build-info.cpp.in
│ ├── console.h
│ ├── grammar-parser.h
│ ├── CMakeLists.txt
│ ├── sampling.h
│ ├── train.h
│ ├── sampling.cpp
│ ├── common.h
│ ├── base64.hpp
│ ├── console.cpp
│ ├── grammar-parser.cpp
│ └── log.h
├── llamaCpp.pri
├── ggml-backend-impl.h
├── ggml-alloc.h
├── ggml-backend.h
├── ggml-impl.h
├── ggml-quants.h
└── ggml-alloc.c
├── shader.qrc
├── images.qrc
├── README.md
├── MiniQwen.pro
├── humanassets.cpp
├── humanassets.h
├── llmmodel.h
├── main.cpp
├── sqlconversationmodel.h
├── main.qml
├── sqlconversationmodel.cpp
├── LICENSE.md
└── llmmodel.cpp
/demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kleinlee/MiniQwen/HEAD/demo.png
--------------------------------------------------------------------------------
/images/huahua.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kleinlee/MiniQwen/HEAD/images/huahua.jpg
--------------------------------------------------------------------------------
/llamaCpp/llama.lib:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kleinlee/MiniQwen/HEAD/llamaCpp/llama.lib
--------------------------------------------------------------------------------
/llamaCpp/ggml_shared.lib:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kleinlee/MiniQwen/HEAD/llamaCpp/ggml_shared.lib
--------------------------------------------------------------------------------
/llamaCpp/ggml_static.lib:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kleinlee/MiniQwen/HEAD/llamaCpp/ggml_static.lib
--------------------------------------------------------------------------------
/shader.qrc:
--------------------------------------------------------------------------------
1 |
2 |
3 | main.qml
4 |
5 |
6 |
--------------------------------------------------------------------------------
/images.qrc:
--------------------------------------------------------------------------------
1 |
2 |
3 | images/huahua.jpg
4 |
5 |
6 |
--------------------------------------------------------------------------------
/llamaCpp/common/build-info.cpp:
--------------------------------------------------------------------------------
1 | int LLAMA_BUILD_NUMBER = 1601;
2 | char const *LLAMA_COMMIT = "5a7d312";
3 | char const *LLAMA_COMPILER = "MSVC 19.33.31629.0";
4 | char const *LLAMA_BUILD_TARGET = "x64";
5 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # MiniQwen
2 | license: apache-2.0
3 | - llama.cpp+Qwen1.8B,并使用QT搭建一个建议客户端。
4 | - 简化调用代码复杂度,专供Qwen使用
5 | - 内存占用在1.3GB
6 | - i5-12600K CPU上4线程可以达到14-15token/s。对终端设备非常友好。
7 |
8 |
9 |
--------------------------------------------------------------------------------
/llamaCpp/common/build-info.cpp.in:
--------------------------------------------------------------------------------
1 | int LLAMA_BUILD_NUMBER = @BUILD_NUMBER@;
2 | char const *LLAMA_COMMIT = "@BUILD_COMMIT@";
3 | char const *LLAMA_COMPILER = "@BUILD_COMPILER@";
4 | char const *LLAMA_BUILD_TARGET = "@BUILD_TARGET@";
5 |
--------------------------------------------------------------------------------
/llamaCpp/common/console.h:
--------------------------------------------------------------------------------
1 | // Console functions
2 |
3 | #pragma once
4 |
5 | #include
6 |
7 | namespace console {
8 | enum display_t {
9 | reset = 0,
10 | prompt,
11 | user_input,
12 | error
13 | };
14 |
15 | void init(bool use_simple_io, bool use_advanced_display);
16 | void cleanup();
17 | void set_display(display_t display);
18 | bool readline(std::string & line, bool multiline_input);
19 | }
20 |
--------------------------------------------------------------------------------
/MiniQwen.pro:
--------------------------------------------------------------------------------
1 | QT += qml quick sql quickcontrols2
2 |
3 | CONFIG += qmltypes
4 | QML_IMPORT_NAME = AAAAA
5 | QML_IMPORT_MAJOR_VERSION = 1
6 |
7 | HEADERS += \
8 | humanassets.h \
9 | llmmodel.h \
10 | sqlconversationmodel.h
11 | SOURCES += main.cpp \
12 | humanassets.cpp \
13 | llmmodel.cpp \
14 | sqlconversationmodel.cpp
15 |
16 | RESOURCES += \
17 | shader.qrc\
18 | images.qrc
19 |
20 | include(llamaCpp/llamaCpp.pri)
21 |
22 | INSTALLS += target
23 |
24 | OTHER_FILES += \
25 | main.qml
26 |
--------------------------------------------------------------------------------
/humanassets.cpp:
--------------------------------------------------------------------------------
1 | #include "humanassets.h"
2 |
3 | HumanAssets* HumanAssets::instance_ = NULL;
4 | HumanAssets::HumanAssets()
5 | {
6 | // instance = NULL;
7 | m_chat_model = new LLMModel;
8 | m_chat_model->LoadModel();
9 | // connect(ui->pushButton_stop, SIGNAL(clicked()), m_chat_model, SLOT(Reset()));
10 | }
11 |
12 | void HumanAssets::ChatUpdated()
13 | {
14 | emit ChatUpdatedSignal();
15 | }
16 |
17 | void HumanAssets::SlotStopChat()
18 | {
19 | m_chat_model->Reset();
20 | }
21 | void HumanAssets::SlotNewChat(QString question)
22 | {
23 | m_chat_model->Run(question);
24 |
25 | }
26 |
27 | void HumanAssets::SlotNewAnswer(QString str)
28 | {
29 |
30 | }
31 |
--------------------------------------------------------------------------------
/llamaCpp/common/grammar-parser.h:
--------------------------------------------------------------------------------
1 | // Implements a parser for an extended Backus-Naur form (BNF), producing the
2 | // binary context-free grammar format specified by llama.h. Supports character
3 | // ranges, grouping, and repetition operators. As an example, a grammar for
4 | // arithmetic might look like:
5 | //
6 | // root ::= expr
7 | // expr ::= term ([-+*/] term)*
8 | // term ::= num | "(" space expr ")" space
9 | // num ::= [0-9]+ space
10 | // space ::= [ \t\n]*
11 |
12 | #pragma once
13 | #include "llama.h"
14 | #include
15 | #include