mirror of
https://github.com/Buriburizaem0n/nezha_domains.git
synced 2026-02-04 20:50:06 +00:00
refactor: rename monitor -> service
This commit is contained in:
@@ -1,271 +0,0 @@
|
||||
package singleton
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/naiba/nezha/model"
|
||||
"github.com/naiba/nezha/pkg/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
ApiTokenList = make(map[string]*model.ApiToken)
|
||||
UserIDToApiTokenList = make(map[uint64][]string)
|
||||
ApiLock sync.RWMutex
|
||||
|
||||
ServerAPI = &ServerAPIService{}
|
||||
MonitorAPI = &MonitorAPIService{}
|
||||
)
|
||||
|
||||
type ServerAPIService struct{}
|
||||
|
||||
// CommonResponse 常规返回结构 包含状态码 和 状态信息
|
||||
type CommonResponse struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type CommonServerInfo struct {
|
||||
ID uint64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Tag string `json:"tag"`
|
||||
LastActive int64 `json:"last_active"`
|
||||
IPV4 string `json:"ipv4"`
|
||||
IPV6 string `json:"ipv6"`
|
||||
ValidIP string `json:"valid_ip"`
|
||||
DisplayIndex int `json:"display_index"`
|
||||
HideForGuest bool `json:"hide_for_guest"`
|
||||
}
|
||||
|
||||
// StatusResponse 服务器状态子结构 包含服务器信息与状态信息
|
||||
type StatusResponse struct {
|
||||
CommonServerInfo
|
||||
Host *model.Host `json:"host"`
|
||||
Status *model.HostState `json:"status"`
|
||||
}
|
||||
|
||||
// ServerStatusResponse 服务器状态返回结构 包含常规返回结构 和 服务器状态子结构
|
||||
type ServerStatusResponse struct {
|
||||
CommonResponse
|
||||
Result []*StatusResponse `json:"result"`
|
||||
}
|
||||
|
||||
// ServerInfoResponse 服务器信息返回结构 包含常规返回结构 和 服务器信息子结构
|
||||
type ServerInfoResponse struct {
|
||||
CommonResponse
|
||||
Result []*CommonServerInfo `json:"result"`
|
||||
}
|
||||
|
||||
type MonitorAPIService struct {
|
||||
}
|
||||
|
||||
type MonitorInfoResponse struct {
|
||||
CommonResponse
|
||||
Result []*MonitorInfo `json:"result"`
|
||||
}
|
||||
|
||||
type MonitorInfo struct {
|
||||
MonitorID uint64 `json:"monitor_id"`
|
||||
ServerID uint64 `json:"server_id"`
|
||||
MonitorName string `json:"monitor_name"`
|
||||
ServerName string `json:"server_name"`
|
||||
CreatedAt []int64 `json:"created_at"`
|
||||
AvgDelay []float32 `json:"avg_delay"`
|
||||
}
|
||||
|
||||
func InitAPI() {
|
||||
ApiTokenList = make(map[string]*model.ApiToken)
|
||||
UserIDToApiTokenList = make(map[uint64][]string)
|
||||
}
|
||||
|
||||
func loadAPI() {
|
||||
InitAPI()
|
||||
var tokenList []*model.ApiToken
|
||||
DB.Find(&tokenList)
|
||||
for _, token := range tokenList {
|
||||
ApiTokenList[token.Token] = token
|
||||
UserIDToApiTokenList[token.UserID] = append(UserIDToApiTokenList[token.UserID], token.Token)
|
||||
}
|
||||
}
|
||||
|
||||
// GetStatusByIDList 获取传入IDList的服务器状态信息
|
||||
func (s *ServerAPIService) GetStatusByIDList(idList []uint64) *ServerStatusResponse {
|
||||
res := &ServerStatusResponse{}
|
||||
res.Result = make([]*StatusResponse, 0)
|
||||
|
||||
ServerLock.RLock()
|
||||
defer ServerLock.RUnlock()
|
||||
|
||||
for _, v := range idList {
|
||||
server := ServerList[v]
|
||||
if server == nil {
|
||||
continue
|
||||
}
|
||||
ipv4, ipv6, validIP := utils.SplitIPAddr(server.Host.IP)
|
||||
info := CommonServerInfo{
|
||||
ID: server.ID,
|
||||
Name: server.Name,
|
||||
// Tag: server.Tag,
|
||||
LastActive: server.LastActive.Unix(),
|
||||
IPV4: ipv4,
|
||||
IPV6: ipv6,
|
||||
ValidIP: validIP,
|
||||
}
|
||||
res.Result = append(res.Result, &StatusResponse{
|
||||
CommonServerInfo: info,
|
||||
Host: server.Host,
|
||||
Status: server.State,
|
||||
})
|
||||
}
|
||||
res.CommonResponse = CommonResponse{
|
||||
Code: 0,
|
||||
Message: "success",
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// GetStatusByTag 获取传入分组的所有服务器状态信息
|
||||
// func (s *ServerAPIService) GetStatusByTag(tag string) *ServerStatusResponse {
|
||||
// return s.GetStatusByIDList(ServerTagToIDList[tag])
|
||||
// }
|
||||
|
||||
// GetAllStatus 获取所有服务器状态信息
|
||||
func (s *ServerAPIService) GetAllStatus() *ServerStatusResponse {
|
||||
res := &ServerStatusResponse{}
|
||||
res.Result = make([]*StatusResponse, 0)
|
||||
ServerLock.RLock()
|
||||
defer ServerLock.RUnlock()
|
||||
for _, v := range ServerList {
|
||||
host := v.Host
|
||||
state := v.State
|
||||
if host == nil || state == nil {
|
||||
continue
|
||||
}
|
||||
ipv4, ipv6, validIP := utils.SplitIPAddr(host.IP)
|
||||
info := CommonServerInfo{
|
||||
ID: v.ID,
|
||||
Name: v.Name,
|
||||
// Tag: v.Tag,
|
||||
LastActive: v.LastActive.Unix(),
|
||||
IPV4: ipv4,
|
||||
IPV6: ipv6,
|
||||
ValidIP: validIP,
|
||||
DisplayIndex: v.DisplayIndex,
|
||||
HideForGuest: v.HideForGuest,
|
||||
}
|
||||
res.Result = append(res.Result, &StatusResponse{
|
||||
CommonServerInfo: info,
|
||||
Host: v.Host,
|
||||
Status: v.State,
|
||||
})
|
||||
}
|
||||
res.CommonResponse = CommonResponse{
|
||||
Code: 0,
|
||||
Message: "success",
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// GetListByTag 获取传入分组的所有服务器信息
|
||||
func (s *ServerAPIService) GetListByTag(tag string) *ServerInfoResponse {
|
||||
res := &ServerInfoResponse{}
|
||||
res.Result = make([]*CommonServerInfo, 0)
|
||||
|
||||
ServerLock.RLock()
|
||||
defer ServerLock.RUnlock()
|
||||
// for _, v := range ServerTagToIDList[tag] {
|
||||
// host := ServerList[v].Host
|
||||
// if host == nil {
|
||||
// continue
|
||||
// }
|
||||
// ipv4, ipv6, validIP := utils.SplitIPAddr(host.IP)
|
||||
// info := &CommonServerInfo{
|
||||
// ID: v,
|
||||
// Name: ServerList[v].Name,
|
||||
// Tag: ServerList[v].Tag,
|
||||
// LastActive: ServerList[v].LastActive.Unix(),
|
||||
// IPV4: ipv4,
|
||||
// IPV6: ipv6,
|
||||
// ValidIP: validIP,
|
||||
// }
|
||||
// res.Result = append(res.Result, info)
|
||||
// }
|
||||
res.CommonResponse = CommonResponse{
|
||||
Code: 0,
|
||||
Message: "success",
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// GetAllList 获取所有服务器信息
|
||||
func (s *ServerAPIService) GetAllList() *ServerInfoResponse {
|
||||
res := &ServerInfoResponse{}
|
||||
res.Result = make([]*CommonServerInfo, 0)
|
||||
|
||||
ServerLock.RLock()
|
||||
defer ServerLock.RUnlock()
|
||||
for _, v := range ServerList {
|
||||
host := v.Host
|
||||
if host == nil {
|
||||
continue
|
||||
}
|
||||
ipv4, ipv6, validIP := utils.SplitIPAddr(host.IP)
|
||||
info := &CommonServerInfo{
|
||||
ID: v.ID,
|
||||
Name: v.Name,
|
||||
// Tag: v.Tag,
|
||||
LastActive: v.LastActive.Unix(),
|
||||
IPV4: ipv4,
|
||||
IPV6: ipv6,
|
||||
ValidIP: validIP,
|
||||
}
|
||||
res.Result = append(res.Result, info)
|
||||
}
|
||||
res.CommonResponse = CommonResponse{
|
||||
Code: 0,
|
||||
Message: "success",
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (m *MonitorAPIService) GetMonitorHistories(query map[string]any) *MonitorInfoResponse {
|
||||
var (
|
||||
resultMap = make(map[uint64]*MonitorInfo)
|
||||
monitorHistories []*model.MonitorHistory
|
||||
sortedMonitorIDs []uint64
|
||||
)
|
||||
res := &MonitorInfoResponse{
|
||||
CommonResponse: CommonResponse{
|
||||
Code: 0,
|
||||
Message: "success",
|
||||
},
|
||||
}
|
||||
if err := DB.Model(&model.MonitorHistory{}).Select("monitor_id, created_at, server_id, avg_delay").
|
||||
Where(query).Where("created_at >= ?", time.Now().Add(-24*time.Hour)).Order("monitor_id, created_at").
|
||||
Scan(&monitorHistories).Error; err != nil {
|
||||
res.CommonResponse = CommonResponse{
|
||||
Code: 500,
|
||||
Message: err.Error(),
|
||||
}
|
||||
} else {
|
||||
for _, history := range monitorHistories {
|
||||
infos, ok := resultMap[history.MonitorID]
|
||||
if !ok {
|
||||
infos = &MonitorInfo{
|
||||
MonitorID: history.MonitorID,
|
||||
ServerID: history.ServerID,
|
||||
MonitorName: ServiceSentinelShared.monitors[history.MonitorID].Name,
|
||||
ServerName: ServerList[history.ServerID].Name,
|
||||
}
|
||||
resultMap[history.MonitorID] = infos
|
||||
sortedMonitorIDs = append(sortedMonitorIDs, history.MonitorID)
|
||||
}
|
||||
infos.CreatedAt = append(infos.CreatedAt, history.CreatedAt.Truncate(time.Minute).Unix()*1000)
|
||||
infos.AvgDelay = append(infos.AvgDelay, history.AvgDelay)
|
||||
}
|
||||
for _, monitorID := range sortedMonitorIDs {
|
||||
res.Result = append(res.Result, resultMap[monitorID])
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
@@ -23,18 +23,18 @@ type ReportData struct {
|
||||
Reporter uint64
|
||||
}
|
||||
|
||||
// _TodayStatsOfMonitor 今日监控记录
|
||||
type _TodayStatsOfMonitor struct {
|
||||
// _TodayStatsOfService 今日监控记录
|
||||
type _TodayStatsOfService struct {
|
||||
Up int // 今日在线计数
|
||||
Down int // 今日离线计数
|
||||
Delay float32 // 今日平均延迟
|
||||
}
|
||||
|
||||
// NewServiceSentinel 创建服务监控器
|
||||
func NewServiceSentinel(serviceSentinelDispatchBus chan<- model.Monitor) {
|
||||
func NewServiceSentinel(serviceSentinelDispatchBus chan<- model.Service) {
|
||||
ServiceSentinelShared = &ServiceSentinel{
|
||||
serviceReportChannel: make(chan ReportData, 200),
|
||||
serviceStatusToday: make(map[uint64]*_TodayStatsOfMonitor),
|
||||
serviceStatusToday: make(map[uint64]*_TodayStatsOfService),
|
||||
serviceCurrentStatusIndex: make(map[uint64]*indexStore),
|
||||
serviceCurrentStatusData: make(map[uint64][]*pb.TaskResult),
|
||||
lastStatus: make(map[uint64]int),
|
||||
@@ -42,30 +42,30 @@ func NewServiceSentinel(serviceSentinelDispatchBus chan<- model.Monitor) {
|
||||
serviceResponseDataStoreCurrentDown: make(map[uint64]uint64),
|
||||
serviceResponseDataStoreCurrentAvgDelay: make(map[uint64]float32),
|
||||
serviceResponsePing: make(map[uint64]map[uint64]*pingStore),
|
||||
monitors: make(map[uint64]*model.Monitor),
|
||||
services: make(map[uint64]*model.Service),
|
||||
sslCertCache: make(map[uint64]string),
|
||||
// 30天数据缓存
|
||||
monthlyStatus: make(map[uint64]*model.ServiceItemResponse),
|
||||
monthlyStatus: make(map[uint64]*model.ServiceResponseItem),
|
||||
dispatchBus: serviceSentinelDispatchBus,
|
||||
}
|
||||
// 加载历史记录
|
||||
ServiceSentinelShared.loadMonitorHistory()
|
||||
ServiceSentinelShared.loadServiceHistory()
|
||||
|
||||
year, month, day := time.Now().Date()
|
||||
today := time.Date(year, month, day, 0, 0, 0, 0, Loc)
|
||||
|
||||
var mhs []model.MonitorHistory
|
||||
var mhs []model.ServiceHistory
|
||||
// 加载当日记录
|
||||
DB.Where("created_at >= ?", today).Find(&mhs)
|
||||
totalDelay := make(map[uint64]float32)
|
||||
totalDelayCount := make(map[uint64]float32)
|
||||
for i := 0; i < len(mhs); i++ {
|
||||
totalDelay[mhs[i].MonitorID] += mhs[i].AvgDelay
|
||||
totalDelayCount[mhs[i].MonitorID]++
|
||||
ServiceSentinelShared.serviceStatusToday[mhs[i].MonitorID].Up += int(mhs[i].Up)
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].TotalUp += mhs[i].Up
|
||||
ServiceSentinelShared.serviceStatusToday[mhs[i].MonitorID].Down += int(mhs[i].Down)
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].TotalDown += mhs[i].Down
|
||||
totalDelay[mhs[i].ServiceID] += mhs[i].AvgDelay
|
||||
totalDelayCount[mhs[i].ServiceID]++
|
||||
ServiceSentinelShared.serviceStatusToday[mhs[i].ServiceID].Up += int(mhs[i].Up)
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].ServiceID].TotalUp += mhs[i].Up
|
||||
ServiceSentinelShared.serviceStatusToday[mhs[i].ServiceID].Down += int(mhs[i].Down)
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].ServiceID].TotalDown += mhs[i].Down
|
||||
}
|
||||
for id, delay := range totalDelay {
|
||||
ServiceSentinelShared.serviceStatusToday[id].Delay = delay / float32(totalDelayCount[id])
|
||||
@@ -85,31 +85,31 @@ func NewServiceSentinel(serviceSentinelDispatchBus chan<- model.Monitor) {
|
||||
使用缓存 channel,处理上报的 Service 请求结果,然后判断是否需要报警
|
||||
需要记录上一次的状态信息
|
||||
|
||||
加锁顺序:serviceResponseDataStoreLock > monthlyStatusLock > monitorsLock
|
||||
加锁顺序:serviceResponseDataStoreLock > monthlyStatusLock > servicesLock
|
||||
*/
|
||||
type ServiceSentinel struct {
|
||||
// 服务监控任务上报通道
|
||||
serviceReportChannel chan ReportData // 服务状态汇报管道
|
||||
// 服务监控任务调度通道
|
||||
dispatchBus chan<- model.Monitor
|
||||
dispatchBus chan<- model.Service
|
||||
|
||||
serviceResponseDataStoreLock sync.RWMutex
|
||||
serviceStatusToday map[uint64]*_TodayStatsOfMonitor // [monitor_id] -> _TodayStatsOfMonitor
|
||||
serviceCurrentStatusIndex map[uint64]*indexStore // [monitor_id] -> 该监控ID对应的 serviceCurrentStatusData 的最新索引下标
|
||||
serviceCurrentStatusData map[uint64][]*pb.TaskResult // [monitor_id] -> []model.MonitorHistory
|
||||
serviceResponseDataStoreCurrentUp map[uint64]uint64 // [monitor_id] -> 当前服务在线计数
|
||||
serviceResponseDataStoreCurrentDown map[uint64]uint64 // [monitor_id] -> 当前服务离线计数
|
||||
serviceResponseDataStoreCurrentAvgDelay map[uint64]float32 // [monitor_id] -> 当前服务离线计数
|
||||
serviceResponsePing map[uint64]map[uint64]*pingStore // [monitor_id] -> ClientID -> delay
|
||||
serviceStatusToday map[uint64]*_TodayStatsOfService // [service_id] -> _TodayStatsOfService
|
||||
serviceCurrentStatusIndex map[uint64]*indexStore // [service_id] -> 该监控ID对应的 serviceCurrentStatusData 的最新索引下标
|
||||
serviceCurrentStatusData map[uint64][]*pb.TaskResult // [service_id] -> []model.ServiceHistory
|
||||
serviceResponseDataStoreCurrentUp map[uint64]uint64 // [service_id] -> 当前服务在线计数
|
||||
serviceResponseDataStoreCurrentDown map[uint64]uint64 // [service_id] -> 当前服务离线计数
|
||||
serviceResponseDataStoreCurrentAvgDelay map[uint64]float32 // [service_id] -> 当前服务离线计数
|
||||
serviceResponsePing map[uint64]map[uint64]*pingStore // [service_id] -> ClientID -> delay
|
||||
lastStatus map[uint64]int
|
||||
sslCertCache map[uint64]string
|
||||
|
||||
monitorsLock sync.RWMutex
|
||||
monitors map[uint64]*model.Monitor // [monitor_id] -> model.Monitor
|
||||
servicesLock sync.RWMutex
|
||||
services map[uint64]*model.Service
|
||||
|
||||
// 30天数据缓存
|
||||
monthlyStatusLock sync.Mutex
|
||||
monthlyStatus map[uint64]*model.ServiceItemResponse // [monitor_id] -> model.ServiceItemResponse
|
||||
monthlyStatus map[uint64]*model.ServiceResponseItem
|
||||
}
|
||||
|
||||
type indexStore struct {
|
||||
@@ -157,23 +157,23 @@ func (ss *ServiceSentinel) Dispatch(r ReportData) {
|
||||
ss.serviceReportChannel <- r
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) Monitors() []*model.Monitor {
|
||||
ss.monitorsLock.RLock()
|
||||
defer ss.monitorsLock.RUnlock()
|
||||
var monitors []*model.Monitor
|
||||
for _, v := range ss.monitors {
|
||||
monitors = append(monitors, v)
|
||||
func (ss *ServiceSentinel) Services() []*model.Service {
|
||||
ss.servicesLock.RLock()
|
||||
defer ss.servicesLock.RUnlock()
|
||||
var services []*model.Service
|
||||
for _, v := range ss.services {
|
||||
services = append(services, v)
|
||||
}
|
||||
sort.SliceStable(monitors, func(i, j int) bool {
|
||||
return monitors[i].ID < monitors[j].ID
|
||||
sort.SliceStable(services, func(i, j int) bool {
|
||||
return services[i].ID < services[j].ID
|
||||
})
|
||||
return monitors
|
||||
return services
|
||||
}
|
||||
|
||||
// loadMonitorHistory 加载服务监控器的历史状态信息
|
||||
func (ss *ServiceSentinel) loadMonitorHistory() {
|
||||
var monitors []*model.Monitor
|
||||
err := DB.Find(&monitors).Error
|
||||
// loadServiceHistory 加载服务监控器的历史状态信息
|
||||
func (ss *ServiceSentinel) loadServiceHistory() {
|
||||
var services []*model.Service
|
||||
err := DB.Find(&services).Error
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -182,29 +182,29 @@ func (ss *ServiceSentinel) loadMonitorHistory() {
|
||||
defer ss.serviceResponseDataStoreLock.Unlock()
|
||||
ss.monthlyStatusLock.Lock()
|
||||
defer ss.monthlyStatusLock.Unlock()
|
||||
ss.monitorsLock.Lock()
|
||||
defer ss.monitorsLock.Unlock()
|
||||
ss.servicesLock.Lock()
|
||||
defer ss.servicesLock.Unlock()
|
||||
|
||||
for i := 0; i < len(monitors); i++ {
|
||||
task := *monitors[i]
|
||||
for i := 0; i < len(services); i++ {
|
||||
task := *services[i]
|
||||
// 通过cron定时将服务监控任务传递给任务调度管道
|
||||
monitors[i].CronJobID, err = Cron.AddFunc(task.CronSpec(), func() {
|
||||
services[i].CronJobID, err = Cron.AddFunc(task.CronSpec(), func() {
|
||||
ss.dispatchBus <- task
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ss.monitors[monitors[i].ID] = monitors[i]
|
||||
ss.serviceCurrentStatusData[monitors[i].ID] = make([]*pb.TaskResult, _CurrentStatusSize)
|
||||
ss.serviceStatusToday[monitors[i].ID] = &_TodayStatsOfMonitor{}
|
||||
ss.services[services[i].ID] = services[i]
|
||||
ss.serviceCurrentStatusData[services[i].ID] = make([]*pb.TaskResult, _CurrentStatusSize)
|
||||
ss.serviceStatusToday[services[i].ID] = &_TodayStatsOfService{}
|
||||
}
|
||||
|
||||
year, month, day := time.Now().Date()
|
||||
today := time.Date(year, month, day, 0, 0, 0, 0, Loc)
|
||||
|
||||
for i := 0; i < len(monitors); i++ {
|
||||
ServiceSentinelShared.monthlyStatus[monitors[i].ID] = &model.ServiceItemResponse{
|
||||
Monitor: monitors[i],
|
||||
for i := 0; i < len(services); i++ {
|
||||
ServiceSentinelShared.monthlyStatus[services[i].ID] = &model.ServiceResponseItem{
|
||||
Service: services[i],
|
||||
Delay: &[30]float32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
Up: &[30]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
Down: &[30]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
@@ -212,7 +212,7 @@ func (ss *ServiceSentinel) loadMonitorHistory() {
|
||||
}
|
||||
|
||||
// 加载服务监控历史记录
|
||||
var mhs []model.MonitorHistory
|
||||
var mhs []model.ServiceHistory
|
||||
DB.Where("created_at > ? AND created_at < ?", today.AddDate(0, 0, -29), today).Find(&mhs)
|
||||
var delayCount = make(map[int]int)
|
||||
for i := 0; i < len(mhs); i++ {
|
||||
@@ -220,22 +220,22 @@ func (ss *ServiceSentinel) loadMonitorHistory() {
|
||||
if dayIndex < 0 {
|
||||
continue
|
||||
}
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].Delay[dayIndex] = (ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].Delay[dayIndex]*float32(delayCount[dayIndex]) + mhs[i].AvgDelay) / float32(delayCount[dayIndex]+1)
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].ServiceID].Delay[dayIndex] = (ServiceSentinelShared.monthlyStatus[mhs[i].ServiceID].Delay[dayIndex]*float32(delayCount[dayIndex]) + mhs[i].AvgDelay) / float32(delayCount[dayIndex]+1)
|
||||
delayCount[dayIndex]++
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].Up[dayIndex] += int(mhs[i].Up)
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].TotalUp += mhs[i].Up
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].Down[dayIndex] += int(mhs[i].Down)
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].TotalDown += mhs[i].Down
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].ServiceID].Up[dayIndex] += int(mhs[i].Up)
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].ServiceID].TotalUp += mhs[i].Up
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].ServiceID].Down[dayIndex] += int(mhs[i].Down)
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].ServiceID].TotalDown += mhs[i].Down
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) OnMonitorUpdate(m model.Monitor) error {
|
||||
func (ss *ServiceSentinel) OnServiceUpdate(m model.Service) error {
|
||||
ss.serviceResponseDataStoreLock.Lock()
|
||||
defer ss.serviceResponseDataStoreLock.Unlock()
|
||||
ss.monthlyStatusLock.Lock()
|
||||
defer ss.monthlyStatusLock.Unlock()
|
||||
ss.monitorsLock.Lock()
|
||||
defer ss.monitorsLock.Unlock()
|
||||
ss.servicesLock.Lock()
|
||||
defer ss.servicesLock.Unlock()
|
||||
|
||||
var err error
|
||||
// 写入新任务
|
||||
@@ -245,32 +245,32 @@ func (ss *ServiceSentinel) OnMonitorUpdate(m model.Monitor) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ss.monitors[m.ID] != nil {
|
||||
if ss.services[m.ID] != nil {
|
||||
// 停掉旧任务
|
||||
Cron.Remove(ss.monitors[m.ID].CronJobID)
|
||||
Cron.Remove(ss.services[m.ID].CronJobID)
|
||||
} else {
|
||||
// 新任务初始化数据
|
||||
ss.monthlyStatus[m.ID] = &model.ServiceItemResponse{
|
||||
Monitor: &m,
|
||||
ss.monthlyStatus[m.ID] = &model.ServiceResponseItem{
|
||||
Service: &m,
|
||||
Delay: &[30]float32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
Up: &[30]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
Down: &[30]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
}
|
||||
ss.serviceCurrentStatusData[m.ID] = make([]*pb.TaskResult, _CurrentStatusSize)
|
||||
ss.serviceStatusToday[m.ID] = &_TodayStatsOfMonitor{}
|
||||
ss.serviceStatusToday[m.ID] = &_TodayStatsOfService{}
|
||||
}
|
||||
// 更新这个任务
|
||||
ss.monitors[m.ID] = &m
|
||||
ss.services[m.ID] = &m
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) OnMonitorDelete(ids []uint64) {
|
||||
func (ss *ServiceSentinel) OnServiceDelete(ids []uint64) {
|
||||
ss.serviceResponseDataStoreLock.Lock()
|
||||
defer ss.serviceResponseDataStoreLock.Unlock()
|
||||
ss.monthlyStatusLock.Lock()
|
||||
defer ss.monthlyStatusLock.Unlock()
|
||||
ss.monitorsLock.Lock()
|
||||
defer ss.monitorsLock.Unlock()
|
||||
ss.servicesLock.Lock()
|
||||
defer ss.servicesLock.Unlock()
|
||||
|
||||
for _, id := range ids {
|
||||
delete(ss.serviceCurrentStatusIndex, id)
|
||||
@@ -283,22 +283,22 @@ func (ss *ServiceSentinel) OnMonitorDelete(ids []uint64) {
|
||||
delete(ss.serviceStatusToday, id)
|
||||
|
||||
// 停掉定时任务
|
||||
Cron.Remove(ss.monitors[id].CronJobID)
|
||||
delete(ss.monitors, id)
|
||||
Cron.Remove(ss.services[id].CronJobID)
|
||||
delete(ss.services, id)
|
||||
|
||||
delete(ss.monthlyStatus, id)
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) LoadStats() map[uint64]*model.ServiceItemResponse {
|
||||
func (ss *ServiceSentinel) LoadStats() map[uint64]*model.ServiceResponseItem {
|
||||
ss.serviceResponseDataStoreLock.RLock()
|
||||
defer ss.serviceResponseDataStoreLock.RUnlock()
|
||||
ss.monthlyStatusLock.Lock()
|
||||
defer ss.monthlyStatusLock.Unlock()
|
||||
|
||||
// 刷新最新一天的数据
|
||||
for k := range ss.monitors {
|
||||
ss.monthlyStatus[k].Monitor = ss.monitors[k]
|
||||
for k := range ss.services {
|
||||
ss.monthlyStatus[k].Service = ss.services[k]
|
||||
v := ss.serviceStatusToday[k]
|
||||
|
||||
// 30 天在线率,
|
||||
@@ -314,7 +314,7 @@ func (ss *ServiceSentinel) LoadStats() map[uint64]*model.ServiceItemResponse {
|
||||
ss.monthlyStatus[k].Delay[29] = v.Delay
|
||||
}
|
||||
|
||||
// 最后 5 分钟的状态 与 monitor 对象填充
|
||||
// 最后 5 分钟的状态 与 service 对象填充
|
||||
for k, v := range ss.serviceResponseDataStoreCurrentDown {
|
||||
ss.monthlyStatus[k].CurrentDown = v
|
||||
}
|
||||
@@ -329,18 +329,18 @@ func (ss *ServiceSentinel) LoadStats() map[uint64]*model.ServiceItemResponse {
|
||||
func (ss *ServiceSentinel) worker() {
|
||||
// 从服务状态汇报管道获取汇报的服务数据
|
||||
for r := range ss.serviceReportChannel {
|
||||
if ss.monitors[r.Data.GetId()] == nil || ss.monitors[r.Data.GetId()].ID == 0 {
|
||||
if ss.services[r.Data.GetId()] == nil || ss.services[r.Data.GetId()].ID == 0 {
|
||||
log.Printf("NEZHA>> 错误的服务监控上报 %+v", r)
|
||||
continue
|
||||
}
|
||||
mh := r.Data
|
||||
if mh.Type == model.TaskTypeTCPPing || mh.Type == model.TaskTypeICMPPing {
|
||||
monitorTcpMap, ok := ss.serviceResponsePing[mh.GetId()]
|
||||
serviceTcpMap, ok := ss.serviceResponsePing[mh.GetId()]
|
||||
if !ok {
|
||||
monitorTcpMap = make(map[uint64]*pingStore)
|
||||
ss.serviceResponsePing[mh.GetId()] = monitorTcpMap
|
||||
serviceTcpMap = make(map[uint64]*pingStore)
|
||||
ss.serviceResponsePing[mh.GetId()] = serviceTcpMap
|
||||
}
|
||||
ts, ok := monitorTcpMap[r.Reporter]
|
||||
ts, ok := serviceTcpMap[r.Reporter]
|
||||
if !ok {
|
||||
ts = &pingStore{}
|
||||
}
|
||||
@@ -348,8 +348,8 @@ func (ss *ServiceSentinel) worker() {
|
||||
ts.ping = (ts.ping*float32(ts.count-1) + mh.Delay) / float32(ts.count)
|
||||
if ts.count == Conf.AvgPingCount {
|
||||
ts.count = 0
|
||||
if err := DB.Create(&model.MonitorHistory{
|
||||
MonitorID: mh.GetId(),
|
||||
if err := DB.Create(&model.ServiceHistory{
|
||||
ServiceID: mh.GetId(),
|
||||
AvgDelay: ts.ping,
|
||||
Data: mh.Data,
|
||||
ServerID: r.Reporter,
|
||||
@@ -357,7 +357,7 @@ func (ss *ServiceSentinel) worker() {
|
||||
log.Println("NEZHA>> 服务监控数据持久化失败:", err)
|
||||
}
|
||||
}
|
||||
monitorTcpMap[r.Reporter] = ts
|
||||
serviceTcpMap[r.Reporter] = ts
|
||||
}
|
||||
ss.serviceResponseDataStoreLock.Lock()
|
||||
// 写入当天状态
|
||||
@@ -414,8 +414,8 @@ func (ss *ServiceSentinel) worker() {
|
||||
index: 0,
|
||||
t: currentTime,
|
||||
}
|
||||
if err := DB.Create(&model.MonitorHistory{
|
||||
MonitorID: mh.GetId(),
|
||||
if err := DB.Create(&model.ServiceHistory{
|
||||
ServiceID: mh.GetId(),
|
||||
AvgDelay: ss.serviceResponseDataStoreCurrentAvgDelay[mh.GetId()],
|
||||
Data: mh.Data,
|
||||
Up: ss.serviceResponseDataStoreCurrentUp[mh.GetId()],
|
||||
@@ -427,23 +427,23 @@ func (ss *ServiceSentinel) worker() {
|
||||
|
||||
// 延迟报警
|
||||
if mh.Delay > 0 {
|
||||
ss.monitorsLock.RLock()
|
||||
if ss.monitors[mh.GetId()].LatencyNotify {
|
||||
notificationGroupID := ss.monitors[mh.GetId()].NotificationGroupID
|
||||
ss.servicesLock.RLock()
|
||||
if ss.services[mh.GetId()].LatencyNotify {
|
||||
notificationGroupID := ss.services[mh.GetId()].NotificationGroupID
|
||||
minMuteLabel := NotificationMuteLabel.ServiceLatencyMin(mh.GetId())
|
||||
maxMuteLabel := NotificationMuteLabel.ServiceLatencyMax(mh.GetId())
|
||||
if mh.Delay > ss.monitors[mh.GetId()].MaxLatency {
|
||||
if mh.Delay > ss.services[mh.GetId()].MaxLatency {
|
||||
// 延迟超过最大值
|
||||
ServerLock.RLock()
|
||||
reporterServer := ServerList[r.Reporter]
|
||||
msg := fmt.Sprintf("[Latency] %s %2f > %2f, Reporter: %s", ss.monitors[mh.GetId()].Name, mh.Delay, ss.monitors[mh.GetId()].MaxLatency, reporterServer.Name)
|
||||
msg := fmt.Sprintf("[Latency] %s %2f > %2f, Reporter: %s", ss.services[mh.GetId()].Name, mh.Delay, ss.services[mh.GetId()].MaxLatency, reporterServer.Name)
|
||||
go SendNotification(notificationGroupID, msg, minMuteLabel)
|
||||
ServerLock.RUnlock()
|
||||
} else if mh.Delay < ss.monitors[mh.GetId()].MinLatency {
|
||||
} else if mh.Delay < ss.services[mh.GetId()].MinLatency {
|
||||
// 延迟低于最小值
|
||||
ServerLock.RLock()
|
||||
reporterServer := ServerList[r.Reporter]
|
||||
msg := fmt.Sprintf("[Latency] %s %2f < %2f, Reporter: %s", ss.monitors[mh.GetId()].Name, mh.Delay, ss.monitors[mh.GetId()].MinLatency, reporterServer.Name)
|
||||
msg := fmt.Sprintf("[Latency] %s %2f < %2f, Reporter: %s", ss.services[mh.GetId()].Name, mh.Delay, ss.services[mh.GetId()].MinLatency, reporterServer.Name)
|
||||
go SendNotification(notificationGroupID, msg, maxMuteLabel)
|
||||
ServerLock.RUnlock()
|
||||
} else {
|
||||
@@ -452,24 +452,24 @@ func (ss *ServiceSentinel) worker() {
|
||||
UnMuteNotification(notificationGroupID, maxMuteLabel)
|
||||
}
|
||||
}
|
||||
ss.monitorsLock.RUnlock()
|
||||
ss.servicesLock.RUnlock()
|
||||
}
|
||||
|
||||
// 状态变更报警+触发任务执行
|
||||
if stateCode == StatusDown || stateCode != ss.lastStatus[mh.GetId()] {
|
||||
ss.monitorsLock.Lock()
|
||||
ss.servicesLock.Lock()
|
||||
lastStatus := ss.lastStatus[mh.GetId()]
|
||||
// 存储新的状态值
|
||||
ss.lastStatus[mh.GetId()] = stateCode
|
||||
|
||||
// 判断是否需要发送通知
|
||||
isNeedSendNotification := ss.monitors[mh.GetId()].Notify && (lastStatus != 0 || stateCode == StatusDown)
|
||||
isNeedSendNotification := ss.services[mh.GetId()].Notify && (lastStatus != 0 || stateCode == StatusDown)
|
||||
if isNeedSendNotification {
|
||||
ServerLock.RLock()
|
||||
|
||||
reporterServer := ServerList[r.Reporter]
|
||||
notificationGroupID := ss.monitors[mh.GetId()].NotificationGroupID
|
||||
notificationMsg := fmt.Sprintf("[%s] %s Reporter: %s, Error: %s", StatusCodeToString(stateCode), ss.monitors[mh.GetId()].Name, reporterServer.Name, mh.Data)
|
||||
notificationGroupID := ss.services[mh.GetId()].NotificationGroupID
|
||||
notificationMsg := fmt.Sprintf("[%s] %s Reporter: %s, Error: %s", StatusCodeToString(stateCode), ss.services[mh.GetId()].Name, reporterServer.Name, mh.Data)
|
||||
muteLabel := NotificationMuteLabel.ServiceStateChanged(mh.GetId())
|
||||
|
||||
// 状态变更时,清除静音缓存
|
||||
@@ -482,7 +482,7 @@ func (ss *ServiceSentinel) worker() {
|
||||
}
|
||||
|
||||
// 判断是否需要触发任务
|
||||
isNeedTriggerTask := ss.monitors[mh.GetId()].EnableTriggerTask && lastStatus != 0
|
||||
isNeedTriggerTask := ss.services[mh.GetId()].EnableTriggerTask && lastStatus != 0
|
||||
if isNeedTriggerTask {
|
||||
ServerLock.RLock()
|
||||
reporterServer := ServerList[r.Reporter]
|
||||
@@ -490,14 +490,14 @@ func (ss *ServiceSentinel) worker() {
|
||||
|
||||
if stateCode == StatusGood && lastStatus != stateCode {
|
||||
// 当前状态正常 前序状态非正常时 触发恢复任务
|
||||
go SendTriggerTasks(ss.monitors[mh.GetId()].RecoverTriggerTasks, reporterServer.ID)
|
||||
go SendTriggerTasks(ss.services[mh.GetId()].RecoverTriggerTasks, reporterServer.ID)
|
||||
} else if lastStatus == StatusGood && lastStatus != stateCode {
|
||||
// 前序状态正常 当前状态非正常时 触发失败任务
|
||||
go SendTriggerTasks(ss.monitors[mh.GetId()].FailTriggerTasks, reporterServer.ID)
|
||||
go SendTriggerTasks(ss.services[mh.GetId()].FailTriggerTasks, reporterServer.ID)
|
||||
}
|
||||
}
|
||||
|
||||
ss.monitorsLock.Unlock()
|
||||
ss.servicesLock.Unlock()
|
||||
}
|
||||
ss.serviceResponseDataStoreLock.Unlock()
|
||||
|
||||
@@ -509,22 +509,22 @@ func (ss *ServiceSentinel) worker() {
|
||||
!strings.HasSuffix(mh.Data, "EOF") &&
|
||||
!strings.HasSuffix(mh.Data, "timed out") {
|
||||
errMsg = mh.Data
|
||||
ss.monitorsLock.RLock()
|
||||
if ss.monitors[mh.GetId()].Notify {
|
||||
ss.servicesLock.RLock()
|
||||
if ss.services[mh.GetId()].Notify {
|
||||
muteLabel := NotificationMuteLabel.ServiceSSL(mh.GetId(), "network")
|
||||
go SendNotification(ss.monitors[mh.GetId()].NotificationGroupID, fmt.Sprintf("[SSL] Fetch cert info failed, %s %s", ss.monitors[mh.GetId()].Name, errMsg), muteLabel)
|
||||
go SendNotification(ss.services[mh.GetId()].NotificationGroupID, fmt.Sprintf("[SSL] Fetch cert info failed, %s %s", ss.services[mh.GetId()].Name, errMsg), muteLabel)
|
||||
}
|
||||
ss.monitorsLock.RUnlock()
|
||||
ss.servicesLock.RUnlock()
|
||||
|
||||
}
|
||||
} else {
|
||||
// 清除网络错误静音缓存
|
||||
UnMuteNotification(ss.monitors[mh.GetId()].NotificationGroupID, NotificationMuteLabel.ServiceSSL(mh.GetId(), "network"))
|
||||
UnMuteNotification(ss.services[mh.GetId()].NotificationGroupID, NotificationMuteLabel.ServiceSSL(mh.GetId(), "network"))
|
||||
|
||||
var newCert = strings.Split(mh.Data, "|")
|
||||
if len(newCert) > 1 {
|
||||
ss.monitorsLock.Lock()
|
||||
enableNotify := ss.monitors[mh.GetId()].Notify
|
||||
ss.servicesLock.Lock()
|
||||
enableNotify := ss.services[mh.GetId()].Notify
|
||||
|
||||
// 首次获取证书信息时,缓存证书信息
|
||||
if ss.sslCertCache[mh.GetId()] == "" {
|
||||
@@ -542,9 +542,9 @@ func (ss *ServiceSentinel) worker() {
|
||||
ss.sslCertCache[mh.GetId()] = mh.Data
|
||||
}
|
||||
|
||||
notificationGroupID := ss.monitors[mh.GetId()].NotificationGroupID
|
||||
serviceName := ss.monitors[mh.GetId()].Name
|
||||
ss.monitorsLock.Unlock()
|
||||
notificationGroupID := ss.services[mh.GetId()].NotificationGroupID
|
||||
serviceName := ss.services[mh.GetId()].Name
|
||||
ss.servicesLock.Unlock()
|
||||
|
||||
// 需要发送提醒
|
||||
if enableNotify {
|
||||
|
||||
@@ -36,7 +36,6 @@ func LoadSingleton() {
|
||||
loadNotifications() // 加载通知服务
|
||||
loadServers() // 加载服务器列表
|
||||
loadCronTasks() // 加载定时任务
|
||||
loadAPI()
|
||||
initNAT()
|
||||
initDDNS()
|
||||
}
|
||||
@@ -63,8 +62,8 @@ func InitDBFromPath(path string) {
|
||||
DB = DB.Debug()
|
||||
}
|
||||
err = DB.AutoMigrate(model.Server{}, model.User{},
|
||||
model.Notification{}, model.AlertRule{}, model.Monitor{},
|
||||
model.MonitorHistory{}, model.Cron{}, model.Transfer{},
|
||||
model.Notification{}, model.AlertRule{}, model.Service{},
|
||||
model.ServiceHistory{}, model.Cron{}, model.Transfer{},
|
||||
model.ApiToken{}, model.NAT{}, model.DDNSProfile{}, model.NotificationGroupNotification{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -98,14 +97,14 @@ func RecordTransferHourlyUsage() {
|
||||
log.Println("NEZHA>> Cron 流量统计入库", len(txs), DB.Create(txs).Error)
|
||||
}
|
||||
|
||||
// CleanMonitorHistory 清理无效或过时的 监控记录 和 流量记录
|
||||
func CleanMonitorHistory() {
|
||||
// CleanServiceHistory 清理无效或过时的 监控记录 和 流量记录
|
||||
func CleanServiceHistory() {
|
||||
// 清理已被删除的服务器的监控记录与流量记录
|
||||
DB.Unscoped().Delete(&model.MonitorHistory{}, "created_at < ? OR monitor_id NOT IN (SELECT `id` FROM monitors)", time.Now().AddDate(0, 0, -30))
|
||||
DB.Unscoped().Delete(&model.ServiceHistory{}, "created_at < ? OR service_id NOT IN (SELECT `id` FROM services)", time.Now().AddDate(0, 0, -30))
|
||||
// 由于网络监控记录的数据较多,并且前端仅使用了 1 天的数据
|
||||
// 考虑到 sqlite 数据量问题,仅保留一天数据,
|
||||
// server_id = 0 的数据会用于/service页面的可用性展示
|
||||
DB.Unscoped().Delete(&model.MonitorHistory{}, "(created_at < ? AND server_id != 0) OR monitor_id NOT IN (SELECT `id` FROM monitors)", time.Now().AddDate(0, 0, -1))
|
||||
DB.Unscoped().Delete(&model.ServiceHistory{}, "(created_at < ? AND server_id != 0) OR service_id NOT IN (SELECT `id` FROM services)", time.Now().AddDate(0, 0, -1))
|
||||
DB.Unscoped().Delete(&model.Transfer{}, "server_id NOT IN (SELECT `id` FROM servers)")
|
||||
// 计算可清理流量记录的时长
|
||||
var allServerKeep time.Time
|
||||
|
||||
Reference in New Issue
Block a user