Start working on Galah

This commit is contained in:
t3chn0m4g3 2024-09-17 18:27:27 +02:00
parent 33a197f4a6
commit b2467d4f40
4 changed files with 123 additions and 12 deletions

View file

@ -1,6 +1,7 @@
# T-Pot: LLM
networks:
beelzebub_local:
galah_local:
spiderfoot_local:
ewsposter_local:
@ -51,20 +52,52 @@ services:
- beelzebub_local
ports:
- "22:22"
- "80:80"
# - "80:80"
# - "2222:2222"
# - "3306:3306"
# - "8080:8080"
image: ${TPOT_REPO}/beelzebub:${TPOT_VERSION}
environment:
LLMMODEL: ${LLMMODEL}
LLMHOST: ${LLMHOST}
OLLAMAMODEL: ${OLLAMAMODEL}
LLM_MODEL: ${BEELZEBUB_LLM_MODEL}
LLM_HOST: ${BEELZEBUB_LLM_HOST}
OLLAMA_MODEL: ${BEELZEBUB_OLLAMA_MODEL}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/beelzebub/key:/opt/beelzebub/configurations/key
- ${TPOT_DATA_PATH}/beelzebub/log:/opt/beelzebub/configurations/log
# Galah service
galah:
container_name: galah
restart: always
depends_on:
tpotinit:
condition: service_healthy
# cpu_count: 1
# cpus: 0.25
networks:
- galah_local
ports:
- "80:80"
- "443:443"
- "8443:8443"
- "8888:8888"
image: ${TPOT_REPO}/galah:${TPOT_VERSION}
environment:
LLM_PROVIDER: ${GALAH_LLM_PROVIDER}
LLM_SERVER_URL: ${GALAH_LLM_SERVER_URL}
LLM_MODEL: ${GALAH_LLM_MODEL}
# LLM_TEMPERATURE: ${GALAH_LLM_TEMPERATURE}
# LLM_API_KEY: ${GALAH_LLM_API_KEY}
# LLM_CLOUD_LOCATION: ${GALAH_LLM_CLOUD_LOCATION}
# LLM_CLOUD_PROJECT: ${GALAH_LLM_CLOUD_PROJECT}
read_only: true
volumes:
- ${TPOT_DATA_PATH}/galah/cache:/opt/galah/config/cache
- ${TPOT_DATA_PATH}/galah/cert:/opt/galah/config/cert
- ${TPOT_DATA_PATH}/galah/log:/opt/galah/log
##################
#### NSM
##################

29
docker/galah/Dockerfile Normal file
View file

@ -0,0 +1,29 @@
FROM golang:1.23-alpine AS builder
RUN <<EOF
apk -U add build-base git
mkdir -p /opt
cd /opt
git clone https://github.com/t3chn0m4g3/galah
EOF
WORKDIR /opt/galah
ENV CGO_ENABLED=1
RUN <<EOF
go mod download
go build -o galah ./cmd/galah
EOF
#
FROM alpine:3.20
#
COPY --from=builder /opt/galah/ /opt/galah/
#
# Setup user, groups and configs
RUN <<EOF
apk -U add bash openssl
addgroup -g 2000 galah
adduser -S -s /bin/ash -u 2000 -D -g 2000 galah
EOF
#
# Start galah
WORKDIR /opt/galah
USER galah:galah
CMD ["./entrypoint.sh"]

View file

@ -0,0 +1,33 @@
networks:
galah_local:
services:
# Galah service
galah:
build: .
container_name: galah
restart: always
# cpu_count: 1
# cpus: 0.25
networks:
- galah_local
ports:
- "80:80"
- "443:443"
- "8443:8443"
- "8888:8888"
image: dtagdevsec/galah:24.04
environment:
LLM_PROVIDER: "ollama"
LLM_SERVER_URL: "http://ollama.local:11434"
LLM_MODEL: "gemma2"
# LLM_TEMPERATURE: ${GALAH_LLM_TEMPERATURE}
# LLM_API_KEY: ${GALAH_LLM_API_KEY}
# LLM_CLOUD_LOCATION: ${GALAH_LLM_CLOUD_LOCATION}
# LLM_CLOUD_PROJECT: ${GALAH_LLM_CLOUD_PROJECT}
read_only: true
volumes:
- $HOME/tpotce/data/galah/cache:/opt/galah/config/cache
- $HOME/tpotce/data/galah/cert:/opt/galah/config/cert
- $HOME/tpotce/data/galah/log:/opt/galah/log

View file

@ -104,14 +104,30 @@ OINKCODE=OPEN
# Beelzebub is not part of the standard edition, please follow the README regarding setup.
# It is recommended to use the Ollama backend to keep costs at bay.
# Remember to rate limit API usage / set budget alerts when using ChatGPT API.
# LLMMODEL: Set to "ollama" or "gpt4-o".
# LLMHOST: When using "ollama" set it to the URL of your Ollama backend.
# OLLAMAMODEL: Set to the model you are serving on your Ollama backend, i.e. "llama3.1".
LLMMODEL: "ollama"
LLMHOST: "http://ollama.local:11434/api/chat"
OLLAMAMODEL: "llama3.1"
#LLMMODEL: "gpt4-o"
#OPENAISECRETKEY: "sk-proj-123456"
# BEELZEBUB_LLM_MODEL: Set to "ollama" or "gpt4-o".
# BEELZEBUB_LLM_HOST: When using "ollama" set it to the URL of your Ollama backend.
# BEELZEBUB_OLLAMA_MODEL: Set to the model you are serving on your Ollama backend, i.e. "llama3.1".
# BEELZEBUB_LLM_MODEL: "gpt4-o"
# BEELZEBUB_OPENAISECRETKEY: "sk-proj-123456"
BEELZEBUB_LLM_MODEL: "ollama"
BEELZEBUB_LLM_HOST: "http://ollama.local:11434/api/chat"
BEELZEBUB_OLLAMA_MODEL: "llama3.1"
# Galah is a LLM-powered web honeypot supporting various LLM backends.
# Galah is not part of the standard edition, please follow the README regarding setup.
# It is recommended to use the Ollama backend to keep costs at bay.
# Remember to rate limit API usage / set budget alerts when using ChatGPT API.
# GALAH_LLM_PROVIDER: Set to "ollama" or "gpt4-o".
# GALAH_LLM_SERVER_URL: When using "ollama" set it to the URL of your Ollama backend.
# GALAH_LLM_MODEL: Set to the model you are serving on your Ollama backend, i.e. "llama3.1".
# GALAH_LLM_TEMPERATURE: "1"
# GALAH_LLM_API_KEY: "sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
# GALAH_LLM_CLOUD_LOCATION: ""
# GALAH_LLM_CLOUD_PROJECT: ""
GALAH_LLM_PROVIDER: "ollama"
GALAH_LLM_SERVER_URL: "http://ollama.local:11434/api/chat"
GALAH_LLM_MODEL: "gemma2"
###################################################################################
# NEVER MAKE CHANGES TO THIS SECTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!! #