Finalize adding galah

This commit is contained in:
t3chn0m4g3 2024-10-04 20:03:09 +02:00
parent c6d76f51fb
commit c0b4dd1f8e
7 changed files with 30 additions and 15 deletions

32
.env
View file

@ -104,14 +104,30 @@ OINKCODE=OPEN
# Beelzebub is not part of the standard edition, please follow the README regarding setup.
# It is recommended to use the Ollama backend to keep costs at bay.
# Remember to rate limit API usage / set budget alerts when using ChatGPT API.
# LLMMODEL: Set to "ollama" or "gpt4-o".
# LLMHOST: When using "ollama" set it to the URL of your Ollama backend.
# OLLAMAMODEL: Set to the model you are serving on your Ollama backend, i.e. "llama3.1".
LLMMODEL: "ollama"
LLMHOST: "http://ollama.local:11434/api/chat"
OLLAMAMODEL: "llama3.1"
#LLMMODEL: "gpt4-o"
#OPENAISECRETKEY: "sk-proj-123456"
# BEELZEBUB_LLM_MODEL: Set to "ollama" or "gpt4-o".
# BEELZEBUB_LLM_HOST: When using "ollama" set it to the URL of your Ollama backend.
# BEELZEBUB_OLLAMA_MODEL: Set to the model you are serving on your Ollama backend, i.e. "openchat".
# BEELZEBUB_LLM_MODEL: "gpt4-o"
# BEELZEBUB_OPENAISECRETKEY: "sk-proj-123456"
BEELZEBUB_LLM_MODEL: "ollama"
BEELZEBUB_LLM_HOST: "http://ollama.local:11434/api/chat"
BEELZEBUB_OLLAMA_MODEL: "openchat"
# Galah is a LLM-powered web honeypot supporting various LLM backends.
# Galah is not part of the standard edition, please follow the README regarding setup.
# It is recommended to use the Ollama backend to keep costs at bay.
# Remember to rate limit API usage / set budget alerts when using ChatGPT API.
# GALAH_LLM_PROVIDER: Set to "ollama" or "gpt4-o".
# GALAH_LLM_SERVER_URL: When using "ollama" set it to the URL of your Ollama backend.
# GALAH_LLM_MODEL: Set to the model you are serving on your Ollama backend, i.e. "llama3".
# GALAH_LLM_TEMPERATURE: "1"
# GALAH_LLM_API_KEY: "sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
# GALAH_LLM_CLOUD_LOCATION: ""
# GALAH_LLM_CLOUD_PROJECT: ""
GALAH_LLM_PROVIDER: "ollama"
GALAH_LLM_SERVER_URL: "http://ollama.local:11434"
GALAH_LLM_MODEL: "llama3"
###################################################################################
# NEVER MAKE CHANGES TO THIS SECTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!! #

View file

@ -81,7 +81,7 @@ services:
- "80:80"
- "443:443"
- "8443:8443"
- "8888:8888"
- "8080:8080"
image: ${TPOT_REPO}/galah:${TPOT_VERSION}
environment:
LLM_PROVIDER: ${GALAH_LLM_PROVIDER}

View file

@ -14,11 +14,10 @@ services:
- beelzebub_local
ports:
- "22:22"
- "2222:2222"
- "8080:8080"
- "8081:8081"
- "80:80"
- "2222:2222"
- "3306:3306"
- "8080:8080"
environment:
LLMMODEL: "ollama"
LLMHOST: "http://ollama.local:11434/api/chat"

View file

@ -16,7 +16,7 @@ services:
- "80:80"
- "443:443"
- "8443:8443"
- "8888:8888"
- "8080:8080"
image: dtagdevsec/galah:24.04
environment:
LLM_PROVIDER: "ollama"

Binary file not shown.

View file

@ -106,7 +106,7 @@ OINKCODE=OPEN
# Remember to rate limit API usage / set budget alerts when using ChatGPT API.
# BEELZEBUB_LLM_MODEL: Set to "ollama" or "gpt4-o".
# BEELZEBUB_LLM_HOST: When using "ollama" set it to the URL of your Ollama backend.
# BEELZEBUB_OLLAMA_MODEL: Set to the model you are serving on your Ollama backend, i.e. "llama3.1".
# BEELZEBUB_OLLAMA_MODEL: Set to the model you are serving on your Ollama backend, i.e. "openchat".
# BEELZEBUB_LLM_MODEL: "gpt4-o"
# BEELZEBUB_OPENAISECRETKEY: "sk-proj-123456"
BEELZEBUB_LLM_MODEL: "ollama"
@ -119,7 +119,7 @@ BEELZEBUB_OLLAMA_MODEL: "openchat"
# Remember to rate limit API usage / set budget alerts when using ChatGPT API.
# GALAH_LLM_PROVIDER: Set to "ollama" or "gpt4-o".
# GALAH_LLM_SERVER_URL: When using "ollama" set it to the URL of your Ollama backend.
# GALAH_LLM_MODEL: Set to the model you are serving on your Ollama backend, i.e. "llama3.1".
# GALAH_LLM_MODEL: Set to the model you are serving on your Ollama backend, i.e. "llama3".
# GALAH_LLM_TEMPERATURE: "1"
# GALAH_LLM_API_KEY: "sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
# GALAH_LLM_CLOUD_LOCATION: ""