mirror of
https://github.com/MHSanaei/3x-ui.git
synced 2025-10-13 11:39:13 +00:00
fix: outbound address for vless
Some checks are pending
Release 3X-UI / build (386) (push) Waiting to run
Release 3X-UI / build (amd64) (push) Waiting to run
Release 3X-UI / build (arm64) (push) Waiting to run
Release 3X-UI / build (armv5) (push) Waiting to run
Release 3X-UI / build (armv6) (push) Waiting to run
Release 3X-UI / build (armv7) (push) Waiting to run
Release 3X-UI / build (s390x) (push) Waiting to run
Release 3X-UI / Build for Windows (push) Waiting to run
Some checks are pending
Release 3X-UI / build (386) (push) Waiting to run
Release 3X-UI / build (amd64) (push) Waiting to run
Release 3X-UI / build (arm64) (push) Waiting to run
Release 3X-UI / build (armv5) (push) Waiting to run
Release 3X-UI / build (armv6) (push) Waiting to run
Release 3X-UI / build (armv7) (push) Waiting to run
Release 3X-UI / build (s390x) (push) Waiting to run
Release 3X-UI / Build for Windows (push) Waiting to run
This commit is contained in:
parent
020bc9d77c
commit
1016f3b4f9
2 changed files with 24 additions and 21 deletions
|
@ -12,13 +12,14 @@
|
||||||
<a-layout-content>
|
<a-layout-content>
|
||||||
<a-spin :spinning="loadingStates.spinning" :delay="500" tip='{{ i18n "loading"}}'>
|
<a-spin :spinning="loadingStates.spinning" :delay="500" tip='{{ i18n "loading"}}'>
|
||||||
<transition name="list" appear>
|
<transition name="list" appear>
|
||||||
<a-alert type="error" v-if="showAlert && loadingStates.fetched" :style="{ marginBottom: '10px' }" message='{{ i18n "secAlertTitle" }}'
|
<a-alert type="error" v-if="showAlert && loadingStates.fetched" :style="{ marginBottom: '10px' }"
|
||||||
color="red" description='{{ i18n "secAlertSsl" }}' show-icon closable>
|
message='{{ i18n "secAlertTitle" }}' color="red" description='{{ i18n "secAlertSsl" }}' show-icon closable>
|
||||||
</a-alert>
|
</a-alert>
|
||||||
</transition>
|
</transition>
|
||||||
<transition name="list" appear>
|
<transition name="list" appear>
|
||||||
<a-row v-if="!loadingStates.fetched">
|
<a-row v-if="!loadingStates.fetched">
|
||||||
<a-card :style="{ textAlign: 'center', padding: '30px 0', marginTop: '10px', background: 'transparent', border: 'none' }">
|
<a-card
|
||||||
|
:style="{ textAlign: 'center', padding: '30px 0', marginTop: '10px', background: 'transparent', border: 'none' }">
|
||||||
<a-spin tip='{{ i18n "loading" }}'></a-spin>
|
<a-spin tip='{{ i18n "loading" }}'></a-spin>
|
||||||
</a-card>
|
</a-card>
|
||||||
</a-row>
|
</a-row>
|
||||||
|
@ -37,7 +38,8 @@
|
||||||
<a-popover v-if="restartResult" :overlay-class-name="themeSwitcher.currentTheme">
|
<a-popover v-if="restartResult" :overlay-class-name="themeSwitcher.currentTheme">
|
||||||
<span slot="title">{{ i18n "pages.index.xrayErrorPopoverTitle" }}</span>
|
<span slot="title">{{ i18n "pages.index.xrayErrorPopoverTitle" }}</span>
|
||||||
<template slot="content">
|
<template slot="content">
|
||||||
<span :style="{ maxWidth: '400px' }" v-for="line in restartResult.split('\n')">[[ line ]]</span>
|
<span :style="{ maxWidth: '400px' }" v-for="line in restartResult.split('\n')">[[ line
|
||||||
|
]]</span>
|
||||||
</template>
|
</template>
|
||||||
<a-icon type="question-circle"></a-icon>
|
<a-icon type="question-circle"></a-icon>
|
||||||
</a-popover>
|
</a-popover>
|
||||||
|
@ -537,6 +539,7 @@
|
||||||
serverObj = o.settings.vnext;
|
serverObj = o.settings.vnext;
|
||||||
break;
|
break;
|
||||||
case Protocols.VLESS:
|
case Protocols.VLESS:
|
||||||
|
return [o.settings?.address + ':' + o.settings?.port];
|
||||||
case Protocols.HTTP:
|
case Protocols.HTTP:
|
||||||
case Protocols.Socks:
|
case Protocols.Socks:
|
||||||
case Protocols.Shadowsocks:
|
case Protocols.Shadowsocks:
|
||||||
|
|
|
@ -46,22 +46,22 @@ var (
|
||||||
hashStorage *global.HashStorage
|
hashStorage *global.HashStorage
|
||||||
|
|
||||||
// Performance improvements
|
// Performance improvements
|
||||||
messageWorkerPool chan struct{} // Semaphore for limiting concurrent message processing
|
messageWorkerPool chan struct{} // Semaphore for limiting concurrent message processing
|
||||||
optimizedHTTPClient *http.Client // HTTP client with connection pooling and timeouts
|
optimizedHTTPClient *http.Client // HTTP client with connection pooling and timeouts
|
||||||
|
|
||||||
// Simple cache for frequently accessed data
|
// Simple cache for frequently accessed data
|
||||||
statusCache struct {
|
statusCache struct {
|
||||||
data *Status
|
data *Status
|
||||||
timestamp time.Time
|
timestamp time.Time
|
||||||
mutex sync.RWMutex
|
mutex sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
serverStatsCache struct {
|
serverStatsCache struct {
|
||||||
data string
|
data string
|
||||||
timestamp time.Time
|
timestamp time.Time
|
||||||
mutex sync.RWMutex
|
mutex sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// clients data to adding new client
|
// clients data to adding new client
|
||||||
receiver_inbound_ID int
|
receiver_inbound_ID int
|
||||||
client_Id string
|
client_Id string
|
||||||
|
@ -122,7 +122,7 @@ func (t *Tgbot) GetHashStorage() *global.HashStorage {
|
||||||
func (t *Tgbot) getCachedStatus() (*Status, bool) {
|
func (t *Tgbot) getCachedStatus() (*Status, bool) {
|
||||||
statusCache.mutex.RLock()
|
statusCache.mutex.RLock()
|
||||||
defer statusCache.mutex.RUnlock()
|
defer statusCache.mutex.RUnlock()
|
||||||
|
|
||||||
if statusCache.data != nil && time.Since(statusCache.timestamp) < 5*time.Second {
|
if statusCache.data != nil && time.Since(statusCache.timestamp) < 5*time.Second {
|
||||||
return statusCache.data, true
|
return statusCache.data, true
|
||||||
}
|
}
|
||||||
|
@ -133,7 +133,7 @@ func (t *Tgbot) getCachedStatus() (*Status, bool) {
|
||||||
func (t *Tgbot) setCachedStatus(status *Status) {
|
func (t *Tgbot) setCachedStatus(status *Status) {
|
||||||
statusCache.mutex.Lock()
|
statusCache.mutex.Lock()
|
||||||
defer statusCache.mutex.Unlock()
|
defer statusCache.mutex.Unlock()
|
||||||
|
|
||||||
statusCache.data = status
|
statusCache.data = status
|
||||||
statusCache.timestamp = time.Now()
|
statusCache.timestamp = time.Now()
|
||||||
}
|
}
|
||||||
|
@ -142,7 +142,7 @@ func (t *Tgbot) setCachedStatus(status *Status) {
|
||||||
func (t *Tgbot) getCachedServerStats() (string, bool) {
|
func (t *Tgbot) getCachedServerStats() (string, bool) {
|
||||||
serverStatsCache.mutex.RLock()
|
serverStatsCache.mutex.RLock()
|
||||||
defer serverStatsCache.mutex.RUnlock()
|
defer serverStatsCache.mutex.RUnlock()
|
||||||
|
|
||||||
if serverStatsCache.data != "" && time.Since(serverStatsCache.timestamp) < 10*time.Second {
|
if serverStatsCache.data != "" && time.Since(serverStatsCache.timestamp) < 10*time.Second {
|
||||||
return serverStatsCache.data, true
|
return serverStatsCache.data, true
|
||||||
}
|
}
|
||||||
|
@ -153,7 +153,7 @@ func (t *Tgbot) getCachedServerStats() (string, bool) {
|
||||||
func (t *Tgbot) setCachedServerStats(stats string) {
|
func (t *Tgbot) setCachedServerStats(stats string) {
|
||||||
serverStatsCache.mutex.Lock()
|
serverStatsCache.mutex.Lock()
|
||||||
defer serverStatsCache.mutex.Unlock()
|
defer serverStatsCache.mutex.Unlock()
|
||||||
|
|
||||||
serverStatsCache.data = stats
|
serverStatsCache.data = stats
|
||||||
serverStatsCache.timestamp = time.Now()
|
serverStatsCache.timestamp = time.Now()
|
||||||
}
|
}
|
||||||
|
@ -171,7 +171,7 @@ func (t *Tgbot) Start(i18nFS embed.FS) error {
|
||||||
|
|
||||||
// Initialize worker pool for concurrent message processing (max 10 concurrent handlers)
|
// Initialize worker pool for concurrent message processing (max 10 concurrent handlers)
|
||||||
messageWorkerPool = make(chan struct{}, 10)
|
messageWorkerPool = make(chan struct{}, 10)
|
||||||
|
|
||||||
// Initialize optimized HTTP client with connection pooling
|
// Initialize optimized HTTP client with connection pooling
|
||||||
optimizedHTTPClient = &http.Client{
|
optimizedHTTPClient = &http.Client{
|
||||||
Timeout: 15 * time.Second,
|
Timeout: 15 * time.Second,
|
||||||
|
@ -359,9 +359,9 @@ func (t *Tgbot) OnReceive() {
|
||||||
botHandler.HandleMessage(func(ctx *th.Context, message telego.Message) error {
|
botHandler.HandleMessage(func(ctx *th.Context, message telego.Message) error {
|
||||||
// Use goroutine with worker pool for concurrent command processing
|
// Use goroutine with worker pool for concurrent command processing
|
||||||
go func() {
|
go func() {
|
||||||
messageWorkerPool <- struct{}{} // Acquire worker
|
messageWorkerPool <- struct{}{} // Acquire worker
|
||||||
defer func() { <-messageWorkerPool }() // Release worker
|
defer func() { <-messageWorkerPool }() // Release worker
|
||||||
|
|
||||||
delete(userStates, message.Chat.ID)
|
delete(userStates, message.Chat.ID)
|
||||||
t.answerCommand(&message, message.Chat.ID, checkAdmin(message.From.ID))
|
t.answerCommand(&message, message.Chat.ID, checkAdmin(message.From.ID))
|
||||||
}()
|
}()
|
||||||
|
@ -371,9 +371,9 @@ func (t *Tgbot) OnReceive() {
|
||||||
botHandler.HandleCallbackQuery(func(ctx *th.Context, query telego.CallbackQuery) error {
|
botHandler.HandleCallbackQuery(func(ctx *th.Context, query telego.CallbackQuery) error {
|
||||||
// Use goroutine with worker pool for concurrent callback processing
|
// Use goroutine with worker pool for concurrent callback processing
|
||||||
go func() {
|
go func() {
|
||||||
messageWorkerPool <- struct{}{} // Acquire worker
|
messageWorkerPool <- struct{}{} // Acquire worker
|
||||||
defer func() { <-messageWorkerPool }() // Release worker
|
defer func() { <-messageWorkerPool }() // Release worker
|
||||||
|
|
||||||
delete(userStates, query.Message.GetChat().ID)
|
delete(userStates, query.Message.GetChat().ID)
|
||||||
t.answerCallback(&query, checkAdmin(query.From.ID))
|
t.answerCallback(&query, checkAdmin(query.From.ID))
|
||||||
}()
|
}()
|
||||||
|
@ -2537,7 +2537,7 @@ func (t *Tgbot) prepareServerUsageInfo() string {
|
||||||
if cachedStats, found := t.getCachedServerStats(); found {
|
if cachedStats, found := t.getCachedServerStats(); found {
|
||||||
return cachedStats
|
return cachedStats
|
||||||
}
|
}
|
||||||
|
|
||||||
info, ipv4, ipv6 := "", "", ""
|
info, ipv4, ipv6 := "", "", ""
|
||||||
|
|
||||||
// get latest status of server with caching
|
// get latest status of server with caching
|
||||||
|
@ -2588,10 +2588,10 @@ func (t *Tgbot) prepareServerUsageInfo() string {
|
||||||
info += t.I18nBot("tgbot.messages.udpCount", "Count=="+strconv.Itoa(t.lastStatus.UdpCount))
|
info += t.I18nBot("tgbot.messages.udpCount", "Count=="+strconv.Itoa(t.lastStatus.UdpCount))
|
||||||
info += t.I18nBot("tgbot.messages.traffic", "Total=="+common.FormatTraffic(int64(t.lastStatus.NetTraffic.Sent+t.lastStatus.NetTraffic.Recv)), "Upload=="+common.FormatTraffic(int64(t.lastStatus.NetTraffic.Sent)), "Download=="+common.FormatTraffic(int64(t.lastStatus.NetTraffic.Recv)))
|
info += t.I18nBot("tgbot.messages.traffic", "Total=="+common.FormatTraffic(int64(t.lastStatus.NetTraffic.Sent+t.lastStatus.NetTraffic.Recv)), "Upload=="+common.FormatTraffic(int64(t.lastStatus.NetTraffic.Sent)), "Download=="+common.FormatTraffic(int64(t.lastStatus.NetTraffic.Recv)))
|
||||||
info += t.I18nBot("tgbot.messages.xrayStatus", "State=="+fmt.Sprint(t.lastStatus.Xray.State))
|
info += t.I18nBot("tgbot.messages.xrayStatus", "State=="+fmt.Sprint(t.lastStatus.Xray.State))
|
||||||
|
|
||||||
// Cache the complete server stats
|
// Cache the complete server stats
|
||||||
t.setCachedServerStats(info)
|
t.setCachedServerStats(info)
|
||||||
|
|
||||||
return info
|
return info
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue