-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllama-server.service
More file actions
49 lines (41 loc) · 1.18 KB
/
llama-server.service
File metadata and controls
49 lines (41 loc) · 1.18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
# llama-server.service — Persistent LLM inference backend for Christopher
#
# Install on t3610:
# sudo cp llama-server.service /etc/systemd/system/
# sudo systemctl daemon-reload
# sudo systemctl enable llama-server
# sudo systemctl start llama-server
#
# Check status:
# sudo systemctl status llama-server
# journalctl -u llama-server -f
#
# Christopher then connects with --no-server flag:
# python3 ~/Projects/Christopher-AI/christopher.py --chat --no-server
[Unit]
Description=llama-server — Local LLM inference for Christopher AI
After=network.target
Documentation=https://gitlab.com/JRM-FusionAL/Christopher-AI
[Service]
Type=simple
User=jrm_fusional
Group=jrm_fusional
ExecStart=/home/jrm_fusional/llama.cpp/build/bin/llama-server \
-m /home/jrm_fusional/llama.cpp/models/Llama-3.2-3B-Instruct-Q4_K_M.gguf \
-ngl 99 \
-t 4 \
-c 2048 \
--host 127.0.0.1 \
--port 8080 \
--log-disable
# Restart automatically if it crashes
Restart=on-failure
RestartSec=10
# Give the GPU time to be ready on boot
TimeoutStartSec=120
# Log to journald
StandardOutput=journal
StandardError=journal
SyslogIdentifier=llama-server
[Install]
WantedBy=multi-user.target