3x-ui/web/service/server.go

1308 lines
32 KiB
Go
Raw Normal View History

2023-02-09 19:18:06 +00:00
package service
import (
"archive/zip"
"bufio"
2023-02-09 19:18:06 +00:00
"bytes"
"encoding/json"
"fmt"
"io"
"io/fs"
2023-05-05 18:19:42 +00:00
"mime/multipart"
2023-02-09 19:18:06 +00:00
"net/http"
"os"
"os/exec"
2025-09-10 19:12:37 +00:00
"path/filepath"
"regexp"
2023-02-09 19:18:06 +00:00
"runtime"
"strconv"
"strings"
2025-09-16 12:15:18 +00:00
"sync"
2023-02-09 19:18:06 +00:00
"time"
2025-09-19 08:05:43 +00:00
"github.com/mhsanaei/3x-ui/v2/config"
"github.com/mhsanaei/3x-ui/v2/database"
"github.com/mhsanaei/3x-ui/v2/logger"
"github.com/mhsanaei/3x-ui/v2/util/common"
"github.com/mhsanaei/3x-ui/v2/util/sys"
"github.com/mhsanaei/3x-ui/v2/xray"
2023-02-09 19:18:06 +00:00
2025-09-08 23:22:43 +00:00
"github.com/google/uuid"
2024-06-17 19:48:49 +00:00
"github.com/shirou/gopsutil/v4/cpu"
"github.com/shirou/gopsutil/v4/disk"
"github.com/shirou/gopsutil/v4/host"
"github.com/shirou/gopsutil/v4/load"
"github.com/shirou/gopsutil/v4/mem"
"github.com/shirou/gopsutil/v4/net"
2023-02-09 19:18:06 +00:00
)
2025-09-20 07:35:50 +00:00
// ProcessState represents the current state of a system process.
2023-02-09 19:18:06 +00:00
type ProcessState string
2025-09-20 07:35:50 +00:00
// Process state constants
2023-02-09 19:18:06 +00:00
const (
2025-09-20 07:35:50 +00:00
Running ProcessState = "running" // Process is running normally
Stop ProcessState = "stop" // Process is stopped
Error ProcessState = "error" // Process is in error state
2023-02-09 19:18:06 +00:00
)
2025-09-20 07:35:50 +00:00
// Status represents comprehensive system and application status information.
// It includes CPU, memory, disk, network statistics, and Xray process status.
2023-02-09 19:18:06 +00:00
type Status struct {
T time.Time `json:"-"`
Cpu float64 `json:"cpu"`
CpuCores int `json:"cpuCores"`
2024-05-28 13:11:46 +00:00
LogicalPro int `json:"logicalPro"`
CpuSpeedMhz float64 `json:"cpuSpeedMhz"`
Mem struct {
2023-02-09 19:18:06 +00:00
Current uint64 `json:"current"`
Total uint64 `json:"total"`
} `json:"mem"`
Swap struct {
Current uint64 `json:"current"`
Total uint64 `json:"total"`
} `json:"swap"`
Disk struct {
Current uint64 `json:"current"`
Total uint64 `json:"total"`
} `json:"disk"`
Xray struct {
State ProcessState `json:"state"`
ErrorMsg string `json:"errorMsg"`
Version string `json:"version"`
} `json:"xray"`
Uptime uint64 `json:"uptime"`
Loads []float64 `json:"loads"`
TcpCount int `json:"tcpCount"`
UdpCount int `json:"udpCount"`
NetIO struct {
Up uint64 `json:"up"`
Down uint64 `json:"down"`
} `json:"netIO"`
NetTraffic struct {
Sent uint64 `json:"sent"`
Recv uint64 `json:"recv"`
} `json:"netTraffic"`
PublicIP struct {
IPv4 string `json:"ipv4"`
IPv6 string `json:"ipv6"`
} `json:"publicIP"`
2023-08-08 21:07:05 +00:00
AppStats struct {
Threads uint32 `json:"threads"`
Mem uint64 `json:"mem"`
Uptime uint64 `json:"uptime"`
} `json:"appStats"`
2023-02-09 19:18:06 +00:00
}
2025-09-20 07:35:50 +00:00
// Release represents information about a software release from GitHub.
2023-02-09 19:18:06 +00:00
type Release struct {
2025-09-20 07:35:50 +00:00
TagName string `json:"tag_name"` // The tag name of the release
2023-02-09 19:18:06 +00:00
}
2025-09-20 07:35:50 +00:00
// ServerService provides business logic for server monitoring and management.
// It handles system status collection, IP detection, and application statistics.
2023-02-09 19:18:06 +00:00
type ServerService struct {
2025-09-16 23:08:59 +00:00
xrayService XrayService
inboundService InboundService
cachedIPv4 string
cachedIPv6 string
noIPv6 bool
mu sync.Mutex
lastCPUTimes cpu.TimesStat
hasLastCPUSample bool
2025-10-01 18:13:32 +00:00
hasNativeCPUSample bool
2025-09-16 23:08:59 +00:00
emaCPU float64
cpuHistory []CPUSample
cachedCpuSpeedMhz float64
lastCpuInfoAttempt time.Time
2025-09-16 12:15:18 +00:00
}
2025-09-16 23:08:59 +00:00
// AggregateCpuHistory returns up to maxPoints averaged buckets of size bucketSeconds over recent data.
func (s *ServerService) AggregateCpuHistory(bucketSeconds int, maxPoints int) []map[string]any {
if bucketSeconds <= 0 || maxPoints <= 0 {
return nil
}
cutoff := time.Now().Add(-time.Duration(bucketSeconds*maxPoints) * time.Second).Unix()
s.mu.Lock()
// find start index (history sorted ascending)
hist := s.cpuHistory
// binary-ish scan (simple linear from end since size capped ~10800 is fine)
startIdx := 0
for i := len(hist) - 1; i >= 0; i-- {
if hist[i].T < cutoff {
startIdx = i + 1
break
}
}
if startIdx >= len(hist) {
s.mu.Unlock()
return []map[string]any{}
}
slice := hist[startIdx:]
// copy for unlock
tmp := make([]CPUSample, len(slice))
copy(tmp, slice)
s.mu.Unlock()
if len(tmp) == 0 {
return []map[string]any{}
}
var out []map[string]any
var acc []float64
bSize := int64(bucketSeconds)
curBucket := (tmp[0].T / bSize) * bSize
flush := func(ts int64) {
if len(acc) == 0 {
return
}
sum := 0.0
for _, v := range acc {
sum += v
}
avg := sum / float64(len(acc))
out = append(out, map[string]any{"t": ts, "cpu": avg})
acc = acc[:0]
}
for _, p := range tmp {
b := (p.T / bSize) * bSize
if b != curBucket {
flush(curBucket)
curBucket = b
}
acc = append(acc, p.Cpu)
}
flush(curBucket)
if len(out) > maxPoints {
out = out[len(out)-maxPoints:]
}
return out
}
// CPUSample single CPU utilization sample
2025-09-16 12:15:18 +00:00
type CPUSample struct {
T int64 `json:"t"` // unix seconds
Cpu float64 `json:"cpu"` // percent 0..100
2023-02-09 19:18:06 +00:00
}
type LogEntry struct {
DateTime time.Time
FromAddress string
ToAddress string
Inbound string
Outbound string
Email string
Event int
}
func getPublicIP(url string) string {
client := &http.Client{
Timeout: 3 * time.Second,
}
resp, err := client.Get(url)
if err != nil {
return "N/A"
}
defer resp.Body.Close()
// Don't retry if access is blocked or region-restricted
if resp.StatusCode == http.StatusForbidden || resp.StatusCode == http.StatusUnavailableForLegalReasons {
return "N/A"
}
if resp.StatusCode != http.StatusOK {
return "N/A"
}
ip, err := io.ReadAll(resp.Body)
if err != nil {
return "N/A"
}
ipString := strings.TrimSpace(string(ip))
2023-06-14 16:20:19 +00:00
if ipString == "" {
return "N/A"
}
2023-06-14 16:20:19 +00:00
return ipString
}
2023-02-09 19:18:06 +00:00
func (s *ServerService) GetStatus(lastStatus *Status) *Status {
now := time.Now()
status := &Status{
T: now,
}
2025-03-12 18:20:13 +00:00
// CPU stats
2025-09-16 12:15:18 +00:00
util, err := s.sampleCPUUtilization()
2023-02-09 19:18:06 +00:00
if err != nil {
logger.Warning("get cpu percent failed:", err)
} else {
2025-09-16 12:15:18 +00:00
status.Cpu = util
2023-02-09 19:18:06 +00:00
}
status.CpuCores, err = cpu.Counts(false)
if err != nil {
logger.Warning("get cpu cores count failed:", err)
}
2024-05-28 13:11:46 +00:00
status.LogicalPro = runtime.NumCPU()
2025-09-16 23:08:59 +00:00
if status.CpuSpeedMhz = s.cachedCpuSpeedMhz; s.cachedCpuSpeedMhz == 0 && time.Since(s.lastCpuInfoAttempt) > 5*time.Minute {
s.lastCpuInfoAttempt = time.Now()
done := make(chan struct{})
go func() {
defer close(done)
cpuInfos, err := cpu.Info()
if err != nil {
logger.Warning("get cpu info failed:", err)
return
}
if len(cpuInfos) > 0 {
s.cachedCpuSpeedMhz = cpuInfos[0].Mhz
status.CpuSpeedMhz = s.cachedCpuSpeedMhz
} else {
logger.Warning("could not find cpu info")
}
}()
select {
case <-done:
case <-time.After(1500 * time.Millisecond):
logger.Warning("cpu info query timed out; will retry later")
}
} else if s.cachedCpuSpeedMhz != 0 {
status.CpuSpeedMhz = s.cachedCpuSpeedMhz
}
2025-03-12 18:20:13 +00:00
// Uptime
2023-02-09 19:18:06 +00:00
upTime, err := host.Uptime()
if err != nil {
logger.Warning("get uptime failed:", err)
} else {
status.Uptime = upTime
}
2025-03-12 18:20:13 +00:00
// Memory stats
2023-02-09 19:18:06 +00:00
memInfo, err := mem.VirtualMemory()
if err != nil {
logger.Warning("get virtual memory failed:", err)
} else {
status.Mem.Current = memInfo.Used
status.Mem.Total = memInfo.Total
}
swapInfo, err := mem.SwapMemory()
if err != nil {
logger.Warning("get swap memory failed:", err)
} else {
status.Swap.Current = swapInfo.Used
status.Swap.Total = swapInfo.Total
}
2025-03-12 18:20:13 +00:00
// Disk stats
diskInfo, err := disk.Usage("/")
2023-02-09 19:18:06 +00:00
if err != nil {
2025-03-12 18:20:13 +00:00
logger.Warning("get disk usage failed:", err)
2023-02-09 19:18:06 +00:00
} else {
2025-03-12 18:20:13 +00:00
status.Disk.Current = diskInfo.Used
status.Disk.Total = diskInfo.Total
2023-02-09 19:18:06 +00:00
}
2025-03-12 18:20:13 +00:00
// Load averages
2023-02-09 19:18:06 +00:00
avgState, err := load.Avg()
if err != nil {
logger.Warning("get load avg failed:", err)
} else {
status.Loads = []float64{avgState.Load1, avgState.Load5, avgState.Load15}
}
2025-03-12 18:20:13 +00:00
// Network stats
2023-02-09 19:18:06 +00:00
ioStats, err := net.IOCounters(false)
if err != nil {
logger.Warning("get io counters failed:", err)
} else if len(ioStats) > 0 {
ioStat := ioStats[0]
status.NetTraffic.Sent = ioStat.BytesSent
status.NetTraffic.Recv = ioStat.BytesRecv
if lastStatus != nil {
duration := now.Sub(lastStatus.T)
seconds := float64(duration) / float64(time.Second)
up := uint64(float64(status.NetTraffic.Sent-lastStatus.NetTraffic.Sent) / seconds)
down := uint64(float64(status.NetTraffic.Recv-lastStatus.NetTraffic.Recv) / seconds)
status.NetIO.Up = up
status.NetIO.Down = down
}
} else {
logger.Warning("can not find io counters")
}
2023-03-17 16:07:49 +00:00
2025-03-12 18:20:13 +00:00
// TCP/UDP connections
2023-02-09 19:18:06 +00:00
status.TcpCount, err = sys.GetTCPCount()
if err != nil {
logger.Warning("get tcp connections failed:", err)
}
status.UdpCount, err = sys.GetUDPCount()
if err != nil {
logger.Warning("get udp connections failed:", err)
}
2023-03-17 16:07:49 +00:00
2025-03-12 18:20:13 +00:00
// IP fetching with caching
2025-08-21 12:24:25 +00:00
showIp4ServiceLists := []string{
"https://api4.ipify.org",
"https://ipv4.icanhazip.com",
"https://v4.api.ipinfo.io/ip",
"https://ipv4.myexternalip.com/raw",
"https://4.ident.me",
"https://check-host.net/ip",
}
showIp6ServiceLists := []string{
"https://api6.ipify.org",
"https://ipv6.icanhazip.com",
"https://v6.api.ipinfo.io/ip",
"https://ipv6.myexternalip.com/raw",
"https://6.ident.me",
}
if s.cachedIPv4 == "" {
for _, ip4Service := range showIp4ServiceLists {
s.cachedIPv4 = getPublicIP(ip4Service)
if s.cachedIPv4 != "N/A" {
break
}
}
}
if s.cachedIPv6 == "" && !s.noIPv6 {
for _, ip6Service := range showIp6ServiceLists {
s.cachedIPv6 = getPublicIP(ip6Service)
if s.cachedIPv6 != "N/A" {
break
}
}
2025-03-12 18:20:13 +00:00
}
if s.cachedIPv6 == "N/A" {
s.noIPv6 = true
}
2025-03-12 18:20:13 +00:00
status.PublicIP.IPv4 = s.cachedIPv4
status.PublicIP.IPv6 = s.cachedIPv6
2025-03-12 18:20:13 +00:00
// Xray status
2023-02-09 19:18:06 +00:00
if s.xrayService.IsXrayRunning() {
status.Xray.State = Running
status.Xray.ErrorMsg = ""
} else {
err := s.xrayService.GetXrayErr()
if err != nil {
status.Xray.State = Error
} else {
status.Xray.State = Stop
}
status.Xray.ErrorMsg = s.xrayService.GetXrayResult()
}
status.Xray.Version = s.xrayService.GetXrayVersion()
2025-03-12 18:20:13 +00:00
// Application stats
2023-08-08 21:07:05 +00:00
var rtm runtime.MemStats
runtime.ReadMemStats(&rtm)
status.AppStats.Mem = rtm.Sys
status.AppStats.Threads = uint32(runtime.NumGoroutine())
2023-12-04 18:13:21 +00:00
if p != nil && p.IsRunning() {
2023-08-08 21:07:05 +00:00
status.AppStats.Uptime = p.GetUptime()
} else {
status.AppStats.Uptime = 0
}
2023-02-09 19:18:06 +00:00
return status
}
2025-09-16 12:15:18 +00:00
func (s *ServerService) AppendCpuSample(t time.Time, v float64) {
2025-09-16 23:08:59 +00:00
const capacity = 9000 // ~5 hours @ 2s interval
2025-09-16 12:15:18 +00:00
s.mu.Lock()
defer s.mu.Unlock()
p := CPUSample{T: t.Unix(), Cpu: v}
2025-09-16 23:08:59 +00:00
if n := len(s.cpuHistory); n > 0 && s.cpuHistory[n-1].T == p.T {
s.cpuHistory[n-1] = p
} else {
s.cpuHistory = append(s.cpuHistory, p)
2025-09-16 12:15:18 +00:00
}
2025-09-16 23:08:59 +00:00
if len(s.cpuHistory) > capacity {
s.cpuHistory = s.cpuHistory[len(s.cpuHistory)-capacity:]
2025-09-16 12:15:18 +00:00
}
}
func (s *ServerService) sampleCPUUtilization() (float64, error) {
2025-10-01 18:13:32 +00:00
// Try native platform-specific CPU implementation first (Windows, Linux, macOS)
if pct, err := sys.CPUPercentRaw(); err == nil {
s.mu.Lock()
// First call to native method returns 0 (initializes baseline)
if !s.hasNativeCPUSample {
s.hasNativeCPUSample = true
2025-09-16 12:15:18 +00:00
s.mu.Unlock()
2025-10-01 18:13:32 +00:00
return 0, nil
2025-09-16 12:15:18 +00:00
}
2025-10-01 18:13:32 +00:00
// Smooth with EMA
const alpha = 0.3
if s.emaCPU == 0 {
s.emaCPU = pct
} else {
s.emaCPU = alpha*pct + (1-alpha)*s.emaCPU
}
val := s.emaCPU
s.mu.Unlock()
return val, nil
2025-09-16 12:15:18 +00:00
}
2025-10-01 18:13:32 +00:00
// If native call fails, fall back to gopsutil times
2025-09-16 12:15:18 +00:00
// Read aggregate CPU times (all CPUs combined)
times, err := cpu.Times(false)
if err != nil {
return 0, err
}
if len(times) == 0 {
return 0, fmt.Errorf("no cpu times available")
}
cur := times[0]
s.mu.Lock()
defer s.mu.Unlock()
// If this is the first sample, initialize and return current EMA (0 by default)
if !s.hasLastCPUSample {
s.lastCPUTimes = cur
s.hasLastCPUSample = true
return s.emaCPU, nil
}
// Compute busy and total deltas
2025-10-01 18:13:32 +00:00
// Note: Guest and GuestNice times are already included in User and Nice respectively,
// so we exclude them to avoid double-counting (Linux kernel accounting)
2025-09-16 12:15:18 +00:00
idleDelta := cur.Idle - s.lastCPUTimes.Idle
busyDelta := (cur.User - s.lastCPUTimes.User) +
(cur.System - s.lastCPUTimes.System) +
(cur.Nice - s.lastCPUTimes.Nice) +
(cur.Iowait - s.lastCPUTimes.Iowait) +
(cur.Irq - s.lastCPUTimes.Irq) +
(cur.Softirq - s.lastCPUTimes.Softirq) +
2025-10-01 18:13:32 +00:00
(cur.Steal - s.lastCPUTimes.Steal)
2025-09-16 12:15:18 +00:00
totalDelta := busyDelta + idleDelta
// Update last sample for next time
s.lastCPUTimes = cur
// Guard against division by zero or negative deltas (e.g., counter resets)
if totalDelta <= 0 {
return s.emaCPU, nil
}
raw := 100.0 * (busyDelta / totalDelta)
if raw < 0 {
raw = 0
}
if raw > 100 {
raw = 100
}
// Exponential moving average to smooth spikes
const alpha = 0.3 // smoothing factor (0<alpha<=1). Higher = more responsive, lower = smoother
if s.emaCPU == 0 {
// Initialize EMA with the first real reading to avoid long warm-up from zero
s.emaCPU = raw
} else {
s.emaCPU = alpha*raw + (1-alpha)*s.emaCPU
}
return s.emaCPU, nil
}
2023-02-09 19:18:06 +00:00
func (s *ServerService) GetXrayVersions() ([]string, error) {
const (
XrayURL = "https://api.github.com/repos/XTLS/Xray-core/releases"
bufferSize = 8192
)
resp, err := http.Get(XrayURL)
2023-02-09 19:18:06 +00:00
if err != nil {
return nil, err
}
defer resp.Body.Close()
// Check HTTP status code - GitHub API returns object instead of array on error
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
var errorResponse struct {
Message string `json:"message"`
}
if json.Unmarshal(bodyBytes, &errorResponse) == nil && errorResponse.Message != "" {
return nil, fmt.Errorf("GitHub API error: %s", errorResponse.Message)
}
return nil, fmt.Errorf("GitHub API returned status %d: %s", resp.StatusCode, resp.Status)
}
buffer := bytes.NewBuffer(make([]byte, bufferSize))
2023-02-09 19:18:06 +00:00
buffer.Reset()
if _, err := buffer.ReadFrom(resp.Body); err != nil {
2023-02-09 19:18:06 +00:00
return nil, err
}
var releases []Release
if err := json.Unmarshal(buffer.Bytes(), &releases); err != nil {
2023-02-09 19:18:06 +00:00
return nil, err
}
var versions []string
2023-02-09 19:18:06 +00:00
for _, release := range releases {
tagVersion := strings.TrimPrefix(release.TagName, "v")
tagParts := strings.Split(tagVersion, ".")
if len(tagParts) != 3 {
continue
}
major, err1 := strconv.Atoi(tagParts[0])
minor, err2 := strconv.Atoi(tagParts[1])
patch, err3 := strconv.Atoi(tagParts[2])
if err1 != nil || err2 != nil || err3 != nil {
continue
}
# Pull Request: Connection Reporting System & Improvements for Restricted Networks ## Description This PR introduces a comprehensive **Connection Reporting System** designed to improve the reliability and monitoring of connections, specifically tailored for environments with restricted internet access (e.g., active censorship, GFW). ### Key Changes 1. **New Reporting API (`/report`)**: * Added `ReportController` and `ReportService` to handle incoming connection reports. * Endpoint receives data such as `Latency`, `Success` status, `Protocol`, and Client Interface details. * Data is persisted to the database via the new `ConnectionReport` model. 2. **Subscription Link Updates**: * Modified `subService` to append a `reportUrl` parameter to generated subscription links (VLESS, VMess, etc.). * This allows compatible clients to automatically discover the reporting endpoint and send feedback. 3. **Database Integration**: * Added `ConnectionReport` schema to `database/model` and registered it in `database/db.go` for auto-migration. ## Why is this helpful for Restricted Internet Locations? In regions with heavy internet censorship, connection stability is volatile. * **Dynamic Reporting Endpoint**: The `reportUrl` parameter embedded in the subscription link explicitly tells the client *where* to send connection data. * **Bypassing Blocking**: By decoupling the reporting URL from the node address, clients can ensure diagnostic data reaches the panel even if specific node IPs are being interfered with (assuming the panel itself is reachable). * **Real-time Network Intelligence**: This mechanism enables the panel to aggregate "ground truth" data from clients inside the restricted network (e.g., latency, accessibility of specific protocols), allowing admins to react faster to blocking events. * **Protocol Performance Tracking**: Allows comparison of different protocols (Reality vs. VLESS+TLS vs. Trojan) based on real-world latency and success rates from actual users. * **Rapid Troubleshooting**: Administrators can see connection quality trends and rotate IPs/domains proactively when success rates drop, minimizing downtime for users. ## Technical Details * **API Endpoint**: `POST /report` * **Payload Format**: JSON containing `SystemInfo` (Interface), `ConnectionQuality` (Latency, Success), and `ProtocolInfo`. * **Security**: Reports are tied to valid client request contexts (implementation detail: ensure endpoint is rate-limited or authenticated if necessary, though currently designed for open reporting from valid sub links). ## How to Test 1. Update the panel. 2. Generate a subscription link. 3. Observe the `reportUrl` parameter in the link. 4. Simulate a client POST to the report URL and verify the entry in the `ConnectionReports` table.
2026-02-04 10:00:00 +00:00
if major > 26 || (major == 26 && minor > 1) || (major == 26 && minor == 1 && patch >= 18) {
versions = append(versions, release.TagName)
}
2023-02-09 19:18:06 +00:00
}
return versions, nil
}
func (s *ServerService) StopXrayService() error {
err := s.xrayService.StopXray()
if err != nil {
logger.Error("stop xray failed:", err)
return err
}
2023-03-16 22:01:14 +00:00
return nil
}
func (s *ServerService) RestartXrayService() error {
err := s.xrayService.RestartXray(true)
if err != nil {
logger.Error("start xray failed:", err)
return err
}
2023-03-16 22:01:14 +00:00
return nil
}
2023-02-09 19:18:06 +00:00
func (s *ServerService) downloadXRay(version string) (string, error) {
osName := runtime.GOOS
arch := runtime.GOARCH
switch osName {
case "darwin":
osName = "macos"
2025-09-10 19:12:37 +00:00
case "windows":
osName = "windows"
2023-02-09 19:18:06 +00:00
}
switch arch {
case "amd64":
arch = "64"
case "arm64":
arch = "arm64-v8a"
2024-07-08 16:19:40 +00:00
case "armv7":
arch = "arm32-v7a"
case "armv6":
arch = "arm32-v6"
case "armv5":
arch = "arm32-v5"
case "386":
arch = "32"
case "s390x":
arch = "s390x"
2023-02-09 19:18:06 +00:00
}
fileName := fmt.Sprintf("Xray-%s-%s.zip", osName, arch)
url := fmt.Sprintf("https://github.com/XTLS/Xray-core/releases/download/%s/%s", version, fileName)
2023-02-09 19:18:06 +00:00
resp, err := http.Get(url)
if err != nil {
return "", err
}
defer resp.Body.Close()
os.Remove(fileName)
file, err := os.Create(fileName)
if err != nil {
return "", err
}
defer file.Close()
_, err = io.Copy(file, resp.Body)
if err != nil {
return "", err
}
return fileName, nil
}
func (s *ServerService) UpdateXray(version string) error {
2025-09-10 19:12:37 +00:00
// 1. Stop xray before doing anything
if err := s.StopXrayService(); err != nil {
logger.Warning("failed to stop xray before update:", err)
}
// 2. Download the zip
2023-02-09 19:18:06 +00:00
zipFileName, err := s.downloadXRay(version)
if err != nil {
return err
}
2025-09-10 19:12:37 +00:00
defer os.Remove(zipFileName)
2023-02-09 19:18:06 +00:00
zipFile, err := os.Open(zipFileName)
if err != nil {
return err
}
2025-09-10 19:12:37 +00:00
defer zipFile.Close()
2023-02-09 19:18:06 +00:00
stat, err := zipFile.Stat()
if err != nil {
return err
}
reader, err := zip.NewReader(zipFile, stat.Size())
if err != nil {
return err
}
2025-09-10 19:12:37 +00:00
// 3. Helper to extract files
2023-02-09 19:18:06 +00:00
copyZipFile := func(zipName string, fileName string) error {
zipFile, err := reader.Open(zipName)
if err != nil {
return err
}
2025-09-10 19:12:37 +00:00
defer zipFile.Close()
os.MkdirAll(filepath.Dir(fileName), 0755)
2023-02-09 19:18:06 +00:00
os.Remove(fileName)
file, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDWR|os.O_TRUNC, fs.ModePerm)
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(file, zipFile)
return err
}
2025-09-10 19:12:37 +00:00
// 4. Extract correct binary
if runtime.GOOS == "windows" {
targetBinary := filepath.Join("bin", "xray-windows-amd64.exe")
err = copyZipFile("xray.exe", targetBinary)
} else {
err = copyZipFile("xray", xray.GetBinaryPath())
}
if err != nil {
2023-02-09 19:18:06 +00:00
return err
}
2025-09-10 19:12:37 +00:00
// 5. Restart xray
if err := s.xrayService.RestartXray(true); err != nil {
logger.Error("start xray failed:", err)
return err
}
2023-02-09 19:18:06 +00:00
return nil
}
func (s *ServerService) GetLogs(count string, level string, syslog string) []string {
c, _ := strconv.Atoi(count)
var lines []string
if syslog == "true" {
// Check if running on Windows - journalctl is not available
if runtime.GOOS == "windows" {
return []string{"Syslog is not supported on Windows. Please use application logs instead by unchecking the 'Syslog' option."}
}
// Validate and sanitize count parameter
countInt, err := strconv.Atoi(count)
if err != nil || countInt < 1 || countInt > 10000 {
return []string{"Invalid count parameter - must be a number between 1 and 10000"}
}
// Validate level parameter - only allow valid syslog levels
validLevels := map[string]bool{
"0": true, "emerg": true,
"1": true, "alert": true,
"2": true, "crit": true,
"3": true, "err": true,
"4": true, "warning": true,
"5": true, "notice": true,
"6": true, "info": true,
"7": true, "debug": true,
}
if !validLevels[level] {
return []string{"Invalid level parameter - must be a valid syslog level"}
}
// Use hardcoded command with validated parameters
cmd := exec.Command("journalctl", "-u", "x-ui", "--no-pager", "-n", strconv.Itoa(countInt), "-p", level)
var out bytes.Buffer
cmd.Stdout = &out
err = cmd.Run()
if err != nil {
return []string{"Failed to run journalctl command! Make sure systemd is available and x-ui service is registered."}
2023-06-16 14:55:33 +00:00
}
lines = strings.Split(out.String(), "\n")
} else {
lines = logger.GetLogs(c, level)
}
return lines
}
func (s *ServerService) GetXrayLogs(
count string,
filter string,
showDirect string,
showBlocked string,
showProxy string,
freedoms []string,
blackholes []string) []LogEntry {
const (
Direct = iota
Blocked
Proxied
)
countInt, _ := strconv.Atoi(count)
var entries []LogEntry
pathToAccessLog, err := xray.GetAccessLogPath()
if err != nil {
return nil
}
file, err := os.Open(pathToAccessLog)
if err != nil {
return nil
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "" || strings.Contains(line, "api -> api") {
//skipping empty lines and api calls
continue
}
if filter != "" && !strings.Contains(line, filter) {
//applying filter if it's not empty
continue
}
var entry LogEntry
parts := strings.Fields(line)
for i, part := range parts {
if i == 0 {
dateTime, err := time.ParseInLocation("2006/01/02 15:04:05.999999", parts[0]+" "+parts[1], time.Local)
if err != nil {
continue
}
entry.DateTime = dateTime.UTC()
}
if part == "from" {
entry.FromAddress = strings.TrimLeft(parts[i+1], "/")
} else if part == "accepted" {
entry.ToAddress = strings.TrimLeft(parts[i+1], "/")
} else if strings.HasPrefix(part, "[") {
entry.Inbound = part[1:]
} else if strings.HasSuffix(part, "]") {
entry.Outbound = part[:len(part)-1]
} else if part == "email:" {
entry.Email = parts[i+1]
}
}
if logEntryContains(line, freedoms) {
if showDirect == "false" {
continue
}
entry.Event = Direct
} else if logEntryContains(line, blackholes) {
if showBlocked == "false" {
continue
}
entry.Event = Blocked
} else {
if showProxy == "false" {
continue
}
entry.Event = Proxied
}
entries = append(entries, entry)
}
if len(entries) > countInt {
entries = entries[len(entries)-countInt:]
}
return entries
}
func logEntryContains(line string, suffixes []string) bool {
for _, sfx := range suffixes {
if strings.Contains(line, sfx+"]") {
return true
}
}
return false
}
func (s *ServerService) GetConfigJson() (any, error) {
config, err := s.xrayService.GetXrayConfig()
if err != nil {
return nil, err
}
contents, err := json.MarshalIndent(config, "", " ")
if err != nil {
return nil, err
}
var jsonData any
err = json.Unmarshal(contents, &jsonData)
if err != nil {
return nil, err
}
return jsonData, nil
}
func (s *ServerService) GetDb() ([]byte, error) {
2023-12-08 19:35:10 +00:00
// Update by manually trigger a checkpoint operation
err := database.Checkpoint()
if err != nil {
return nil, err
}
// Open the file for reading
file, err := os.Open(config.GetDBPath())
if err != nil {
return nil, err
}
defer file.Close()
// Read the file contents
fileContents, err := io.ReadAll(file)
if err != nil {
return nil, err
}
return fileContents, nil
}
2023-05-05 18:19:42 +00:00
func (s *ServerService) ImportDB(file multipart.File) error {
// Check if the file is a SQLite database
isValidDb, err := database.IsSQLiteDB(file)
if err != nil {
return common.NewErrorf("Error checking db file format: %v", err)
}
if !isValidDb {
return common.NewError("Invalid db file format")
}
2023-05-06 00:17:57 +00:00
// Reset the file reader to the beginning
_, err = file.Seek(0, 0)
if err != nil {
return common.NewErrorf("Error resetting file reader: %v", err)
}
// Save the file as a temporary file
2023-05-05 18:19:42 +00:00
tempPath := fmt.Sprintf("%s.temp", config.GetDBPath())
// Remove the existing temporary file (if any)
if _, err := os.Stat(tempPath); err == nil {
if errRemove := os.Remove(tempPath); errRemove != nil {
2023-05-06 00:17:57 +00:00
return common.NewErrorf("Error removing existing temporary db file: %v", errRemove)
}
}
2023-05-06 00:17:57 +00:00
// Create the temporary file
2023-05-05 18:19:42 +00:00
tempFile, err := os.Create(tempPath)
if err != nil {
return common.NewErrorf("Error creating temporary db file: %v", err)
}
// Robust deferred cleanup for the temporary file
defer func() {
if tempFile != nil {
if cerr := tempFile.Close(); cerr != nil {
logger.Warningf("Warning: failed to close temp file: %v", cerr)
}
}
if _, err := os.Stat(tempPath); err == nil {
if rerr := os.Remove(tempPath); rerr != nil {
logger.Warningf("Warning: failed to remove temp file: %v", rerr)
}
}
}()
2023-05-05 18:19:42 +00:00
2023-05-06 00:17:57 +00:00
// Save uploaded file to temporary file
if _, err = io.Copy(tempFile, file); err != nil {
2023-05-05 18:19:42 +00:00
return common.NewErrorf("Error saving db: %v", err)
}
// Close temp file before opening via sqlite
if err = tempFile.Close(); err != nil {
return common.NewErrorf("Error closing temporary db file: %v", err)
2023-05-05 18:19:42 +00:00
}
tempFile = nil
2023-05-05 18:19:42 +00:00
// Validate integrity (no migrations / side effects)
if err = database.ValidateSQLiteDB(tempPath); err != nil {
return common.NewErrorf("Invalid or corrupt db file: %v", err)
}
// Stop Xray (ignore error but log)
if errStop := s.StopXrayService(); errStop != nil {
logger.Warningf("Failed to stop Xray before DB import: %v", errStop)
}
// Close existing DB to release file locks (especially on Windows)
if errClose := database.CloseDB(); errClose != nil {
logger.Warningf("Failed to close existing DB before replacement: %v", errClose)
}
2023-05-05 18:19:42 +00:00
2023-05-06 00:17:57 +00:00
// Backup the current database for fallback
2023-05-05 18:19:42 +00:00
fallbackPath := fmt.Sprintf("%s.backup", config.GetDBPath())
2023-05-06 00:17:57 +00:00
// Remove the existing fallback file (if any)
if _, err := os.Stat(fallbackPath); err == nil {
if errRemove := os.Remove(fallbackPath); errRemove != nil {
2023-05-06 00:17:57 +00:00
return common.NewErrorf("Error removing existing fallback db file: %v", errRemove)
}
}
2023-05-06 00:17:57 +00:00
// Move the current database to the fallback location
if err = os.Rename(config.GetDBPath(), fallbackPath); err != nil {
return common.NewErrorf("Error backing up current db file: %v", err)
2023-05-05 18:19:42 +00:00
}
// Defer fallback cleanup ONLY if everything goes well
defer func() {
if _, err := os.Stat(fallbackPath); err == nil {
if rerr := os.Remove(fallbackPath); rerr != nil {
logger.Warningf("Warning: failed to remove fallback file: %v", rerr)
}
}
}()
2023-05-06 00:17:57 +00:00
2023-05-05 18:19:42 +00:00
// Move temp to DB path
if err = os.Rename(tempPath, config.GetDBPath()); err != nil {
// Restore from fallback
if errRename := os.Rename(fallbackPath, config.GetDBPath()); errRename != nil {
2023-05-06 00:17:57 +00:00
return common.NewErrorf("Error moving db file and restoring fallback: %v", errRename)
}
2023-05-05 18:19:42 +00:00
return common.NewErrorf("Error moving db file: %v", err)
}
// Open & migrate new DB
if err = database.InitDB(config.GetDBPath()); err != nil {
if errRename := os.Rename(fallbackPath, config.GetDBPath()); errRename != nil {
2023-05-06 00:17:57 +00:00
return common.NewErrorf("Error migrating db and restoring fallback: %v", errRename)
}
2023-05-05 18:19:42 +00:00
return common.NewErrorf("Error migrating db: %v", err)
}
s.inboundService.MigrateDB()
2023-05-05 18:19:42 +00:00
// Start Xray
if err = s.RestartXrayService(); err != nil {
return common.NewErrorf("Imported DB but failed to start Xray: %v", err)
2023-05-05 18:19:42 +00:00
}
return nil
}
// IsValidGeofileName validates that the filename is safe for geofile operations.
// It checks for path traversal attempts and ensures the filename contains only safe characters.
func (s *ServerService) IsValidGeofileName(filename string) bool {
if filename == "" {
return false
}
// Check for path traversal attempts
if strings.Contains(filename, "..") {
return false
}
// Check for path separators (both forward and backward slash)
if strings.ContainsAny(filename, `/\`) {
return false
}
// Check for absolute path indicators
if filepath.IsAbs(filename) {
return false
}
// Additional security: only allow alphanumeric, dots, underscores, and hyphens
// This is stricter than the general filename regex
validGeofilePattern := `^[a-zA-Z0-9._-]+\.dat$`
matched, _ := regexp.MatchString(validGeofilePattern, filename)
return matched
}
func (s *ServerService) UpdateGeofile(fileName string) error {
files := []struct {
URL string
FileName string
}{
{"https://github.com/Loyalsoldier/v2ray-rules-dat/releases/latest/download/geoip.dat", "geoip.dat"},
{"https://github.com/Loyalsoldier/v2ray-rules-dat/releases/latest/download/geosite.dat", "geosite.dat"},
{"https://github.com/chocolate4u/Iran-v2ray-rules/releases/latest/download/geoip.dat", "geoip_IR.dat"},
{"https://github.com/chocolate4u/Iran-v2ray-rules/releases/latest/download/geosite.dat", "geosite_IR.dat"},
{"https://github.com/runetfreedom/russia-v2ray-rules-dat/releases/latest/download/geoip.dat", "geoip_RU.dat"},
{"https://github.com/runetfreedom/russia-v2ray-rules-dat/releases/latest/download/geosite.dat", "geosite_RU.dat"},
}
// Strict allowlist check to avoid writing uncontrolled files
if fileName != "" {
// Use the centralized validation function
if !s.IsValidGeofileName(fileName) {
return common.NewErrorf("Invalid geofile name: contains unsafe path characters: %s", fileName)
}
// Ensure the filename matches exactly one from our allowlist
isAllowed := false
for _, file := range files {
if fileName == file.FileName {
isAllowed = true
break
}
}
if !isAllowed {
return common.NewErrorf("Invalid geofile name: %s not in allowlist", fileName)
}
}
downloadFile := func(url, destPath string) error {
# Pull Request: Connection Reporting System & Improvements for Restricted Networks ## Description This PR introduces a comprehensive **Connection Reporting System** designed to improve the reliability and monitoring of connections, specifically tailored for environments with restricted internet access (e.g., active censorship, GFW). ### Key Changes 1. **New Reporting API (`/report`)**: * Added `ReportController` and `ReportService` to handle incoming connection reports. * Endpoint receives data such as `Latency`, `Success` status, `Protocol`, and Client Interface details. * Data is persisted to the database via the new `ConnectionReport` model. 2. **Subscription Link Updates**: * Modified `subService` to append a `reportUrl` parameter to generated subscription links (VLESS, VMess, etc.). * This allows compatible clients to automatically discover the reporting endpoint and send feedback. 3. **Database Integration**: * Added `ConnectionReport` schema to `database/model` and registered it in `database/db.go` for auto-migration. ## Why is this helpful for Restricted Internet Locations? In regions with heavy internet censorship, connection stability is volatile. * **Dynamic Reporting Endpoint**: The `reportUrl` parameter embedded in the subscription link explicitly tells the client *where* to send connection data. * **Bypassing Blocking**: By decoupling the reporting URL from the node address, clients can ensure diagnostic data reaches the panel even if specific node IPs are being interfered with (assuming the panel itself is reachable). * **Real-time Network Intelligence**: This mechanism enables the panel to aggregate "ground truth" data from clients inside the restricted network (e.g., latency, accessibility of specific protocols), allowing admins to react faster to blocking events. * **Protocol Performance Tracking**: Allows comparison of different protocols (Reality vs. VLESS+TLS vs. Trojan) based on real-world latency and success rates from actual users. * **Rapid Troubleshooting**: Administrators can see connection quality trends and rotate IPs/domains proactively when success rates drop, minimizing downtime for users. ## Technical Details * **API Endpoint**: `POST /report` * **Payload Format**: JSON containing `SystemInfo` (Interface), `ConnectionQuality` (Latency, Success), and `ProtocolInfo`. * **Security**: Reports are tied to valid client request contexts (implementation detail: ensure endpoint is rate-limited or authenticated if necessary, though currently designed for open reporting from valid sub links). ## How to Test 1. Update the panel. 2. Generate a subscription link. 3. Observe the `reportUrl` parameter in the link. 4. Simulate a client POST to the report URL and verify the entry in the `ConnectionReports` table.
2026-02-04 10:00:00 +00:00
resp, err := http.Get(url)
if err != nil {
return common.NewErrorf("Failed to download Geofile from %s: %v", url, err)
}
defer resp.Body.Close()
file, err := os.Create(destPath)
if err != nil {
return common.NewErrorf("Failed to create Geofile %s: %v", destPath, err)
}
defer file.Close()
_, err = io.Copy(file, resp.Body)
if err != nil {
return common.NewErrorf("Failed to save Geofile %s: %v", destPath, err)
}
return nil
}
var errorMessages []string
if fileName == "" {
for _, file := range files {
// Sanitize the filename from our allowlist as an extra precaution
destPath := filepath.Join(config.GetBinFolderPath(), filepath.Base(file.FileName))
# Pull Request: Connection Reporting System & Improvements for Restricted Networks ## Description This PR introduces a comprehensive **Connection Reporting System** designed to improve the reliability and monitoring of connections, specifically tailored for environments with restricted internet access (e.g., active censorship, GFW). ### Key Changes 1. **New Reporting API (`/report`)**: * Added `ReportController` and `ReportService` to handle incoming connection reports. * Endpoint receives data such as `Latency`, `Success` status, `Protocol`, and Client Interface details. * Data is persisted to the database via the new `ConnectionReport` model. 2. **Subscription Link Updates**: * Modified `subService` to append a `reportUrl` parameter to generated subscription links (VLESS, VMess, etc.). * This allows compatible clients to automatically discover the reporting endpoint and send feedback. 3. **Database Integration**: * Added `ConnectionReport` schema to `database/model` and registered it in `database/db.go` for auto-migration. ## Why is this helpful for Restricted Internet Locations? In regions with heavy internet censorship, connection stability is volatile. * **Dynamic Reporting Endpoint**: The `reportUrl` parameter embedded in the subscription link explicitly tells the client *where* to send connection data. * **Bypassing Blocking**: By decoupling the reporting URL from the node address, clients can ensure diagnostic data reaches the panel even if specific node IPs are being interfered with (assuming the panel itself is reachable). * **Real-time Network Intelligence**: This mechanism enables the panel to aggregate "ground truth" data from clients inside the restricted network (e.g., latency, accessibility of specific protocols), allowing admins to react faster to blocking events. * **Protocol Performance Tracking**: Allows comparison of different protocols (Reality vs. VLESS+TLS vs. Trojan) based on real-world latency and success rates from actual users. * **Rapid Troubleshooting**: Administrators can see connection quality trends and rotate IPs/domains proactively when success rates drop, minimizing downtime for users. ## Technical Details * **API Endpoint**: `POST /report` * **Payload Format**: JSON containing `SystemInfo` (Interface), `ConnectionQuality` (Latency, Success), and `ProtocolInfo`. * **Security**: Reports are tied to valid client request contexts (implementation detail: ensure endpoint is rate-limited or authenticated if necessary, though currently designed for open reporting from valid sub links). ## How to Test 1. Update the panel. 2. Generate a subscription link. 3. Observe the `reportUrl` parameter in the link. 4. Simulate a client POST to the report URL and verify the entry in the `ConnectionReports` table.
2026-02-04 10:00:00 +00:00
if err := downloadFile(file.URL, destPath); err != nil {
errorMessages = append(errorMessages, fmt.Sprintf("Error downloading Geofile '%s': %v", file.FileName, err))
}
}
} else {
// Use filepath.Base to ensure we only get the filename component, no path traversal
safeName := filepath.Base(fileName)
destPath := filepath.Join(config.GetBinFolderPath(), safeName)
var fileURL string
for _, file := range files {
if file.FileName == fileName {
fileURL = file.URL
break
}
}
if fileURL == "" {
errorMessages = append(errorMessages, fmt.Sprintf("File '%s' not found in the list of Geofiles", fileName))
} else {
if err := downloadFile(fileURL, destPath); err != nil {
errorMessages = append(errorMessages, fmt.Sprintf("Error downloading Geofile '%s': %v", fileName, err))
}
}
}
err := s.RestartXrayService()
if err != nil {
errorMessages = append(errorMessages, fmt.Sprintf("Updated Geofile '%s' but Failed to start Xray: %v", fileName, err))
}
if len(errorMessages) > 0 {
return common.NewErrorf("%s", strings.Join(errorMessages, "\r\n"))
}
return nil
}
func (s *ServerService) GetNewX25519Cert() (any, error) {
// Run the command
cmd := exec.Command(xray.GetBinaryPath(), "x25519")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
return nil, err
}
lines := strings.Split(out.String(), "\n")
privateKeyLine := strings.Split(lines[0], ":")
publicKeyLine := strings.Split(lines[1], ":")
privateKey := strings.TrimSpace(privateKeyLine[1])
publicKey := strings.TrimSpace(publicKeyLine[1])
keyPair := map[string]any{
"privateKey": privateKey,
"publicKey": publicKey,
}
return keyPair, nil
}
2025-07-24 23:22:01 +00:00
func (s *ServerService) GetNewmldsa65() (any, error) {
// Run the command
cmd := exec.Command(xray.GetBinaryPath(), "mldsa65")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
return nil, err
}
lines := strings.Split(out.String(), "\n")
SeedLine := strings.Split(lines[0], ":")
VerifyLine := strings.Split(lines[1], ":")
seed := strings.TrimSpace(SeedLine[1])
verify := strings.TrimSpace(VerifyLine[1])
keyPair := map[string]any{
"seed": seed,
"verify": verify,
}
return keyPair, nil
}
func (s *ServerService) GetNewEchCert(sni string) (any, error) {
// Run the command
cmd := exec.Command(xray.GetBinaryPath(), "tls", "ech", "--serverName", sni)
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
return nil, err
}
lines := strings.Split(out.String(), "\n")
if len(lines) < 4 {
return nil, common.NewError("invalid ech cert")
}
configList := lines[1]
serverKeys := lines[3]
return map[string]any{
"echServerKeys": serverKeys,
"echConfigList": configList,
}, nil
}
2025-09-07 20:35:38 +00:00
func (s *ServerService) GetNewVlessEnc() (any, error) {
cmd := exec.Command(xray.GetBinaryPath(), "vlessenc")
var out bytes.Buffer
cmd.Stdout = &out
if err := cmd.Run(); err != nil {
return nil, err
}
lines := strings.Split(out.String(), "\n")
2025-09-08 23:22:43 +00:00
var auths []map[string]string
var current map[string]string
2025-09-07 20:35:38 +00:00
for _, line := range lines {
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "Authentication:") {
if current != nil {
2025-09-08 23:22:43 +00:00
auths = append(auths, current)
}
current = map[string]string{
"label": strings.TrimSpace(strings.TrimPrefix(line, "Authentication:")),
2025-09-07 20:35:38 +00:00
}
} else if strings.HasPrefix(line, `"decryption"`) || strings.HasPrefix(line, `"encryption"`) {
parts := strings.SplitN(line, ":", 2)
if len(parts) == 2 && current != nil {
key := strings.Trim(parts[0], `" `)
val := strings.Trim(parts[1], `" `)
2025-09-08 23:22:43 +00:00
current[key] = val
2025-09-07 20:35:38 +00:00
}
}
}
if current != nil {
2025-09-08 23:22:43 +00:00
auths = append(auths, current)
2025-09-07 20:35:38 +00:00
}
return map[string]any{
2025-09-08 23:22:43 +00:00
"auths": auths,
}, nil
}
func (s *ServerService) GetNewUUID() (map[string]string, error) {
newUUID, err := uuid.NewRandom()
if err != nil {
return nil, fmt.Errorf("failed to generate UUID: %w", err)
}
return map[string]string{
"uuid": newUUID.String(),
2025-09-07 20:35:38 +00:00
}, nil
}
2025-09-08 23:22:43 +00:00
func (s *ServerService) GetNewmlkem768() (any, error) {
// Run the command
cmd := exec.Command(xray.GetBinaryPath(), "mlkem768")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
return nil, err
}
lines := strings.Split(out.String(), "\n")
SeedLine := strings.Split(lines[0], ":")
ClientLine := strings.Split(lines[1], ":")
seed := strings.TrimSpace(SeedLine[1])
client := strings.TrimSpace(ClientLine[1])
keyPair := map[string]any{
"seed": seed,
"client": client,
}
return keyPair, nil
}