mirror of
https://github.com/Buriburizaem0n/nezha_domains.git
synced 2026-02-04 12:40:07 +00:00
Merge branch 'master' of https://github.com/naiba/nezha into naiba-master
This commit is contained in:
158
service/singleton/alertsentinel.go
Normal file
158
service/singleton/alertsentinel.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package singleton
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/naiba/nezha/model"
|
||||
"github.com/naiba/nezha/pkg/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
_RuleCheckNoData = iota
|
||||
_RuleCheckFail
|
||||
_RuleCheckPass
|
||||
)
|
||||
|
||||
type NotificationHistory struct {
|
||||
Duration time.Duration
|
||||
Until time.Time
|
||||
}
|
||||
|
||||
// 报警规则
|
||||
var AlertsLock sync.RWMutex
|
||||
var Alerts []*model.AlertRule
|
||||
var alertsStore map[uint64]map[uint64][][]interface{}
|
||||
var alertsPrevState map[uint64]map[uint64]uint
|
||||
var AlertsCycleTransferStatsStore map[uint64]*model.CycleTransferStats
|
||||
|
||||
func addCycleTransferStatsInfo(alert *model.AlertRule) {
|
||||
if !alert.Enabled() {
|
||||
return
|
||||
}
|
||||
for j := 0; j < len(alert.Rules); j++ {
|
||||
if !alert.Rules[j].IsTransferDurationRule() {
|
||||
continue
|
||||
}
|
||||
if AlertsCycleTransferStatsStore[alert.ID] == nil {
|
||||
from := alert.Rules[j].GetTransferDurationStart()
|
||||
to := alert.Rules[j].GetTransferDurationEnd()
|
||||
AlertsCycleTransferStatsStore[alert.ID] = &model.CycleTransferStats{
|
||||
Name: alert.Name,
|
||||
From: from,
|
||||
To: to,
|
||||
Max: uint64(alert.Rules[j].Max),
|
||||
Min: uint64(alert.Rules[j].Min),
|
||||
ServerName: make(map[uint64]string),
|
||||
Transfer: make(map[uint64]uint64),
|
||||
NextUpdate: make(map[uint64]time.Time),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func AlertSentinelStart() {
|
||||
alertsStore = make(map[uint64]map[uint64][][]interface{})
|
||||
alertsPrevState = make(map[uint64]map[uint64]uint)
|
||||
AlertsCycleTransferStatsStore = make(map[uint64]*model.CycleTransferStats)
|
||||
AlertsLock.Lock()
|
||||
if err := DB.Find(&Alerts).Error; err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for i := 0; i < len(Alerts); i++ {
|
||||
alertsStore[Alerts[i].ID] = make(map[uint64][][]interface{})
|
||||
alertsPrevState[Alerts[i].ID] = make(map[uint64]uint)
|
||||
addCycleTransferStatsInfo(Alerts[i])
|
||||
}
|
||||
AlertsLock.Unlock()
|
||||
|
||||
time.Sleep(time.Second * 10)
|
||||
var lastPrint time.Time
|
||||
var checkCount uint64
|
||||
for {
|
||||
startedAt := time.Now()
|
||||
checkStatus()
|
||||
checkCount++
|
||||
if lastPrint.Before(startedAt.Add(-1 * time.Hour)) {
|
||||
if Conf.Debug {
|
||||
log.Println("NEZHA>> 报警规则检测每小时", checkCount, "次", startedAt, time.Now())
|
||||
}
|
||||
checkCount = 0
|
||||
lastPrint = startedAt
|
||||
}
|
||||
time.Sleep(time.Until(startedAt.Add(time.Second * 3))) // 3秒钟检查一次
|
||||
}
|
||||
}
|
||||
|
||||
func OnRefreshOrAddAlert(alert model.AlertRule) {
|
||||
AlertsLock.Lock()
|
||||
defer AlertsLock.Unlock()
|
||||
delete(alertsStore, alert.ID)
|
||||
delete(alertsPrevState, alert.ID)
|
||||
var isEdit bool
|
||||
for i := 0; i < len(Alerts); i++ {
|
||||
if Alerts[i].ID == alert.ID {
|
||||
Alerts[i] = &alert
|
||||
isEdit = true
|
||||
}
|
||||
}
|
||||
if !isEdit {
|
||||
Alerts = append(Alerts, &alert)
|
||||
}
|
||||
alertsStore[alert.ID] = make(map[uint64][][]interface{})
|
||||
alertsPrevState[alert.ID] = make(map[uint64]uint)
|
||||
delete(AlertsCycleTransferStatsStore, alert.ID)
|
||||
addCycleTransferStatsInfo(&alert)
|
||||
}
|
||||
|
||||
func OnDeleteAlert(id uint64) {
|
||||
AlertsLock.Lock()
|
||||
defer AlertsLock.Unlock()
|
||||
delete(alertsStore, id)
|
||||
delete(alertsPrevState, id)
|
||||
for i := 0; i < len(Alerts); i++ {
|
||||
if Alerts[i].ID == id {
|
||||
Alerts = append(Alerts[:i], Alerts[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
delete(AlertsCycleTransferStatsStore, id)
|
||||
}
|
||||
|
||||
func checkStatus() {
|
||||
AlertsLock.RLock()
|
||||
defer AlertsLock.RUnlock()
|
||||
ServerLock.RLock()
|
||||
defer ServerLock.RUnlock()
|
||||
|
||||
for _, alert := range Alerts {
|
||||
// 跳过未启用
|
||||
if !alert.Enabled() {
|
||||
continue
|
||||
}
|
||||
for _, server := range ServerList {
|
||||
// 监测点
|
||||
alertsStore[alert.ID][server.ID] = append(alertsStore[alert.
|
||||
ID][server.ID], alert.Snapshot(AlertsCycleTransferStatsStore[alert.ID], server, DB))
|
||||
// 发送通知,分为触发报警和恢复通知
|
||||
max, passed := alert.Check(alertsStore[alert.ID][server.ID])
|
||||
if !passed {
|
||||
alertsPrevState[alert.ID][server.ID] = _RuleCheckFail
|
||||
message := fmt.Sprintf("[主机故障] %s(%s) 规则:%s", server.Name, utils.IPDesensitize(server.Host.IP), alert.Name)
|
||||
go SendNotification(message, true)
|
||||
} else {
|
||||
if alertsPrevState[alert.ID][server.ID] == _RuleCheckFail {
|
||||
message := fmt.Sprintf("[主机恢复] %s(%s) 规则:%s", server.Name, utils.IPDesensitize(server.Host.IP), alert.Name)
|
||||
go SendNotification(message, true)
|
||||
}
|
||||
alertsPrevState[alert.ID][server.ID] = _RuleCheckPass
|
||||
}
|
||||
// 清理旧数据
|
||||
if max > 0 && max < len(alertsStore[alert.ID][server.ID]) {
|
||||
alertsStore[alert.ID][server.ID] = alertsStore[alert.ID][server.ID][len(alertsStore[alert.ID][server.ID])-max:]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
95
service/singleton/notification.go
Normal file
95
service/singleton/notification.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package singleton
|
||||
|
||||
import (
|
||||
"crypto/md5" // #nosec
|
||||
"encoding/hex"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/naiba/nezha/model"
|
||||
)
|
||||
|
||||
const firstNotificationDelay = time.Minute * 15
|
||||
|
||||
// 通知方式
|
||||
var notifications []model.Notification
|
||||
var notificationsLock sync.RWMutex
|
||||
|
||||
func LoadNotifications() {
|
||||
notificationsLock.Lock()
|
||||
if err := DB.Find(¬ifications).Error; err != nil {
|
||||
panic(err)
|
||||
}
|
||||
notificationsLock.Unlock()
|
||||
}
|
||||
|
||||
func OnRefreshOrAddNotification(n model.Notification) {
|
||||
notificationsLock.Lock()
|
||||
defer notificationsLock.Unlock()
|
||||
var isEdit bool
|
||||
for i := 0; i < len(notifications); i++ {
|
||||
if notifications[i].ID == n.ID {
|
||||
notifications[i] = n
|
||||
isEdit = true
|
||||
}
|
||||
}
|
||||
if !isEdit {
|
||||
notifications = append(notifications, n)
|
||||
}
|
||||
}
|
||||
|
||||
func OnDeleteNotification(id uint64) {
|
||||
notificationsLock.Lock()
|
||||
defer notificationsLock.Unlock()
|
||||
for i := 0; i < len(notifications); i++ {
|
||||
if notifications[i].ID == id {
|
||||
notifications = append(notifications[:i], notifications[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func SendNotification(desc string, muteable bool) {
|
||||
if muteable {
|
||||
// 通知防骚扰策略
|
||||
nID := hex.EncodeToString(md5.New().Sum([]byte(desc))) // #nosec
|
||||
var flag bool
|
||||
if cacheN, has := Cache.Get(nID); has {
|
||||
nHistory := cacheN.(NotificationHistory)
|
||||
// 每次提醒都增加一倍等待时间,最后每天最多提醒一次
|
||||
if time.Now().After(nHistory.Until) {
|
||||
flag = true
|
||||
nHistory.Duration *= 2
|
||||
if nHistory.Duration > time.Hour*24 {
|
||||
nHistory.Duration = time.Hour * 24
|
||||
}
|
||||
nHistory.Until = time.Now().Add(nHistory.Duration)
|
||||
// 缓存有效期加 10 分钟
|
||||
Cache.Set(nID, nHistory, nHistory.Duration+time.Minute*10)
|
||||
}
|
||||
} else {
|
||||
// 新提醒直接通知
|
||||
flag = true
|
||||
Cache.Set(nID, NotificationHistory{
|
||||
Duration: firstNotificationDelay,
|
||||
Until: time.Now().Add(firstNotificationDelay),
|
||||
}, firstNotificationDelay+time.Minute*10)
|
||||
}
|
||||
|
||||
if !flag {
|
||||
if Conf.Debug {
|
||||
log.Println("NEZHA>> 静音的重复通知:", desc, muteable)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
// 发出通知
|
||||
notificationsLock.RLock()
|
||||
defer notificationsLock.RUnlock()
|
||||
for i := 0; i < len(notifications); i++ {
|
||||
if err := notifications[i].Send(desc); err != nil {
|
||||
log.Println("NEZHA>> 发送通知失败:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
399
service/singleton/servicesentinel.go
Normal file
399
service/singleton/servicesentinel.go
Normal file
@@ -0,0 +1,399 @@
|
||||
package singleton
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/naiba/nezha/model"
|
||||
pb "github.com/naiba/nezha/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
_CurrentStatusSize = 30 // 统计 15 分钟内的数据为当前状态
|
||||
_StatusOk = "良好"
|
||||
)
|
||||
|
||||
var ServiceSentinelShared *ServiceSentinel
|
||||
|
||||
type ReportData struct {
|
||||
Data *pb.TaskResult
|
||||
Reporter uint64
|
||||
}
|
||||
|
||||
type _TodayStatsOfMonitor struct {
|
||||
Up int
|
||||
Down int
|
||||
Delay float32
|
||||
}
|
||||
|
||||
func NewServiceSentinel(serviceSentinelDispatchBus chan<- model.Monitor) {
|
||||
ServiceSentinelShared = &ServiceSentinel{
|
||||
serviceReportChannel: make(chan ReportData, 200),
|
||||
serviceStatusToday: make(map[uint64]*_TodayStatsOfMonitor),
|
||||
serviceCurrentStatusIndex: make(map[uint64]int),
|
||||
serviceCurrentStatusData: make(map[uint64][]model.MonitorHistory),
|
||||
latestDate: make(map[uint64]string),
|
||||
lastStatus: make(map[uint64]string),
|
||||
serviceResponseDataStoreCurrentUp: make(map[uint64]uint64),
|
||||
serviceResponseDataStoreCurrentDown: make(map[uint64]uint64),
|
||||
monitors: make(map[uint64]*model.Monitor),
|
||||
sslCertCache: make(map[uint64]string),
|
||||
// 30天数据缓存
|
||||
monthlyStatus: make(map[uint64]*model.ServiceItemResponse),
|
||||
dispatchBus: serviceSentinelDispatchBus,
|
||||
}
|
||||
ServiceSentinelShared.loadMonitorHistory()
|
||||
|
||||
year, month, day := time.Now().Date()
|
||||
today := time.Date(year, month, day, 0, 0, 0, 0, time.Local)
|
||||
|
||||
var mhs []model.MonitorHistory
|
||||
// 加载当日记录
|
||||
DB.Where("created_at >= ?", today).Find(&mhs)
|
||||
totalDelay := make(map[uint64]float32)
|
||||
for i := 0; i < len(mhs); i++ {
|
||||
if mhs[i].Successful {
|
||||
ServiceSentinelShared.serviceStatusToday[mhs[i].MonitorID].Up++
|
||||
totalDelay[mhs[i].MonitorID] += mhs[i].Delay
|
||||
} else {
|
||||
ServiceSentinelShared.serviceStatusToday[mhs[i].MonitorID].Down++
|
||||
}
|
||||
}
|
||||
for id, delay := range totalDelay {
|
||||
ServiceSentinelShared.serviceStatusToday[id].Delay = delay / float32(ServiceSentinelShared.serviceStatusToday[id].Up)
|
||||
}
|
||||
|
||||
// 更新入库时间及当日数据入库游标
|
||||
for k := range ServiceSentinelShared.monitors {
|
||||
ServiceSentinelShared.latestDate[k] = time.Now().Format("02-Jan-06")
|
||||
}
|
||||
|
||||
go ServiceSentinelShared.worker()
|
||||
|
||||
// 每日将游标往后推一天
|
||||
_, err := Cron.AddFunc("0 0 0 * * *", ServiceSentinelShared.refreshMonthlyServiceStatus)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
使用缓存 channel,处理上报的 Service 请求结果,然后判断是否需要报警
|
||||
需要记录上一次的状态信息
|
||||
*/
|
||||
type ServiceSentinel struct {
|
||||
serviceResponseDataStoreLock sync.RWMutex
|
||||
monitorsLock sync.RWMutex
|
||||
serviceReportChannel chan ReportData
|
||||
serviceStatusToday map[uint64]*_TodayStatsOfMonitor
|
||||
serviceCurrentStatusIndex map[uint64]int
|
||||
serviceCurrentStatusData map[uint64][]model.MonitorHistory
|
||||
latestDate map[uint64]string
|
||||
lastStatus map[uint64]string
|
||||
serviceResponseDataStoreCurrentUp map[uint64]uint64
|
||||
serviceResponseDataStoreCurrentDown map[uint64]uint64
|
||||
monitors map[uint64]*model.Monitor
|
||||
sslCertCache map[uint64]string
|
||||
// 30天数据缓存
|
||||
monthlyStatusLock sync.Mutex
|
||||
monthlyStatus map[uint64]*model.ServiceItemResponse
|
||||
// 服务监控调度计划任务
|
||||
dispatchBus chan<- model.Monitor
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) refreshMonthlyServiceStatus() {
|
||||
// 刷新数据防止无人访问
|
||||
ss.LoadStats()
|
||||
// 将数据往前刷一天
|
||||
ss.monthlyStatusLock.Lock()
|
||||
defer ss.monthlyStatusLock.Unlock()
|
||||
for _, v := range ss.monthlyStatus {
|
||||
for i := 0; i < len(v.Up)-1; i++ {
|
||||
v.Up[i] = v.Up[i+1]
|
||||
v.Down[i] = v.Down[i+1]
|
||||
v.Delay[i] = v.Delay[i+1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) Dispatch(r ReportData) {
|
||||
ss.serviceReportChannel <- r
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) Monitors() []*model.Monitor {
|
||||
ss.monitorsLock.RLock()
|
||||
defer ss.monitorsLock.RUnlock()
|
||||
var monitors []*model.Monitor
|
||||
for _, v := range ss.monitors {
|
||||
monitors = append(monitors, v)
|
||||
}
|
||||
sort.SliceStable(monitors, func(i, j int) bool {
|
||||
return monitors[i].ID < monitors[j].ID
|
||||
})
|
||||
return monitors
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) loadMonitorHistory() {
|
||||
var monitors []*model.Monitor
|
||||
DB.Find(&monitors)
|
||||
var err error
|
||||
ss.monitorsLock.Lock()
|
||||
defer ss.monitorsLock.Unlock()
|
||||
ss.monitors = make(map[uint64]*model.Monitor)
|
||||
for i := 0; i < len(monitors); i++ {
|
||||
task := *monitors[i]
|
||||
monitors[i].CronJobID, err = Cron.AddFunc(task.CronSpec(), func() {
|
||||
ss.dispatchBus <- task
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ss.monitors[monitors[i].ID] = monitors[i]
|
||||
ss.serviceCurrentStatusData[monitors[i].ID] = make([]model.MonitorHistory, _CurrentStatusSize)
|
||||
ss.serviceStatusToday[monitors[i].ID] = &_TodayStatsOfMonitor{}
|
||||
}
|
||||
|
||||
year, month, day := time.Now().Date()
|
||||
today := time.Date(year, month, day, 0, 0, 0, 0, time.Local)
|
||||
|
||||
ss.monthlyStatusLock.Lock()
|
||||
defer ss.monthlyStatusLock.Unlock()
|
||||
for i := 0; i < len(monitors); i++ {
|
||||
ServiceSentinelShared.monthlyStatus[monitors[i].ID] = &model.ServiceItemResponse{
|
||||
Monitor: monitors[i],
|
||||
Delay: &[30]float32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
Up: &[30]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
Down: &[30]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
}
|
||||
}
|
||||
|
||||
// 加载历史记录
|
||||
var mhs []model.MonitorHistory
|
||||
DB.Where("created_at >= ? AND created_at < ?", today.AddDate(0, 0, -29), today).Find(&mhs)
|
||||
for i := 0; i < len(mhs); i++ {
|
||||
dayIndex := 28 - (int(today.Sub(mhs[i].CreatedAt).Hours()) / 24)
|
||||
if mhs[i].Successful {
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].TotalUp++
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].Delay[dayIndex] = (ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].Delay[dayIndex]*float32(ss.monthlyStatus[mhs[i].MonitorID].Up[dayIndex]) + mhs[i].Delay) / float32(ss.monthlyStatus[mhs[i].MonitorID].Up[dayIndex]+1)
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].Up[dayIndex]++
|
||||
} else {
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].TotalDown++
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].MonitorID].Down[dayIndex]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) OnMonitorUpdate(m model.Monitor) error {
|
||||
ss.monitorsLock.Lock()
|
||||
defer ss.monitorsLock.Unlock()
|
||||
var err error
|
||||
// 写入新任务
|
||||
m.CronJobID, err = Cron.AddFunc(m.CronSpec(), func() {
|
||||
ss.dispatchBus <- m
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ss.monitors[m.ID] != nil {
|
||||
// 停掉旧任务
|
||||
Cron.Remove(ss.monitors[m.ID].CronJobID)
|
||||
} else {
|
||||
// 新任务初始化数据
|
||||
ss.monthlyStatusLock.Lock()
|
||||
defer ss.monthlyStatusLock.Unlock()
|
||||
ss.monthlyStatus[m.ID] = &model.ServiceItemResponse{
|
||||
Monitor: &m,
|
||||
Delay: &[30]float32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
Up: &[30]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
Down: &[30]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
}
|
||||
ss.serviceResponseDataStoreLock.Lock()
|
||||
defer ss.serviceResponseDataStoreLock.Unlock()
|
||||
ss.serviceCurrentStatusData[m.ID] = make([]model.MonitorHistory, _CurrentStatusSize)
|
||||
ss.serviceStatusToday[m.ID] = &_TodayStatsOfMonitor{}
|
||||
}
|
||||
// 更新这个任务
|
||||
ss.monitors[m.ID] = &m
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) OnMonitorDelete(id uint64) {
|
||||
ss.serviceResponseDataStoreLock.Lock()
|
||||
defer ss.serviceResponseDataStoreLock.Unlock()
|
||||
delete(ss.serviceCurrentStatusIndex, id)
|
||||
delete(ss.serviceCurrentStatusData, id)
|
||||
delete(ss.latestDate, id)
|
||||
delete(ss.lastStatus, id)
|
||||
delete(ss.serviceResponseDataStoreCurrentUp, id)
|
||||
delete(ss.serviceResponseDataStoreCurrentDown, id)
|
||||
delete(ss.sslCertCache, id)
|
||||
ss.monitorsLock.Lock()
|
||||
defer ss.monitorsLock.Unlock()
|
||||
// 停掉定时任务
|
||||
Cron.Remove(ss.monitors[id].CronJobID)
|
||||
delete(ss.monitors, id)
|
||||
ss.monthlyStatusLock.Lock()
|
||||
defer ss.monthlyStatusLock.Unlock()
|
||||
delete(ss.monthlyStatus, id)
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) LoadStats() map[uint64]*model.ServiceItemResponse {
|
||||
// 刷新最新一天的数据
|
||||
ss.serviceResponseDataStoreLock.RLock()
|
||||
defer ss.serviceResponseDataStoreLock.RUnlock()
|
||||
ss.monthlyStatusLock.Lock()
|
||||
defer ss.monthlyStatusLock.Unlock()
|
||||
for k := range ss.monitors {
|
||||
ss.monthlyStatus[k].Monitor = ss.monitors[k]
|
||||
v := ss.serviceStatusToday[k]
|
||||
ss.monthlyStatus[k].Up[29] = v.Up
|
||||
ss.monthlyStatus[k].Down[29] = v.Down
|
||||
ss.monthlyStatus[k].TotalUp += uint64(v.Up)
|
||||
ss.monthlyStatus[k].TotalDown += uint64(v.Down)
|
||||
ss.monthlyStatus[k].Delay[29] = v.Delay
|
||||
}
|
||||
// 最后 5 分钟的状态 与 monitor 对象填充
|
||||
for k, v := range ss.serviceResponseDataStoreCurrentDown {
|
||||
ss.monthlyStatus[k].CurrentDown = v
|
||||
}
|
||||
for k, v := range ss.serviceResponseDataStoreCurrentUp {
|
||||
ss.monthlyStatus[k].CurrentUp = v
|
||||
}
|
||||
return ss.monthlyStatus
|
||||
}
|
||||
|
||||
func getStateStr(percent uint64) string {
|
||||
if percent == 0 {
|
||||
return "无数据"
|
||||
}
|
||||
if percent > 95 {
|
||||
return _StatusOk
|
||||
}
|
||||
if percent > 80 {
|
||||
return "低可用"
|
||||
}
|
||||
return "故障"
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) worker() {
|
||||
for r := range ss.serviceReportChannel {
|
||||
if ss.monitors[r.Data.GetId()] == nil || ss.monitors[r.Data.GetId()].ID == 0 {
|
||||
log.Printf("NEZAH>> 错误的服务监控上报 %+v", r)
|
||||
continue
|
||||
}
|
||||
mh := model.PB2MonitorHistory(r.Data)
|
||||
ss.serviceResponseDataStoreLock.Lock()
|
||||
// 先查看是否到下一天
|
||||
nowDate := time.Now().Format("02-Jan-06")
|
||||
if nowDate != ss.latestDate[mh.MonitorID] {
|
||||
// 清理前一天数据
|
||||
ss.latestDate[mh.MonitorID] = nowDate
|
||||
ss.serviceResponseDataStoreCurrentUp[mh.MonitorID] = 0
|
||||
ss.serviceResponseDataStoreCurrentDown[mh.MonitorID] = 0
|
||||
ss.serviceStatusToday[mh.MonitorID].Delay = 0
|
||||
ss.serviceStatusToday[mh.MonitorID].Up = 0
|
||||
ss.serviceStatusToday[mh.MonitorID].Down = 0
|
||||
}
|
||||
// 写入当天状态
|
||||
if mh.Successful {
|
||||
ss.serviceStatusToday[mh.MonitorID].Delay = (ss.serviceStatusToday[mh.
|
||||
MonitorID].Delay*float32(ss.serviceStatusToday[mh.MonitorID].Up) +
|
||||
mh.Delay) / float32(ss.serviceStatusToday[mh.MonitorID].Up+1)
|
||||
ss.serviceStatusToday[mh.MonitorID].Up++
|
||||
} else {
|
||||
ss.serviceStatusToday[mh.MonitorID].Down++
|
||||
ServerLock.RLock()
|
||||
log.Println("NEZHA>> 服务故障上报:", ss.monitors[mh.MonitorID].Target, "上报者:", ServerList[r.Reporter].Name, "错误信息:", mh.Data)
|
||||
ServerLock.RUnlock()
|
||||
}
|
||||
// 写入当前数据
|
||||
ss.serviceCurrentStatusData[mh.MonitorID][ss.serviceCurrentStatusIndex[mh.MonitorID]] = mh
|
||||
ss.serviceCurrentStatusIndex[mh.MonitorID]++
|
||||
// 更新当前状态
|
||||
ss.serviceResponseDataStoreCurrentUp[mh.MonitorID] = 0
|
||||
ss.serviceResponseDataStoreCurrentDown[mh.MonitorID] = 0
|
||||
for i := 0; i < len(ss.serviceCurrentStatusData[mh.MonitorID]); i++ {
|
||||
if ss.serviceCurrentStatusData[mh.MonitorID][i].MonitorID > 0 {
|
||||
if ss.serviceCurrentStatusData[mh.MonitorID][i].Successful {
|
||||
ss.serviceResponseDataStoreCurrentUp[mh.MonitorID]++
|
||||
} else {
|
||||
ss.serviceResponseDataStoreCurrentDown[mh.MonitorID]++
|
||||
}
|
||||
}
|
||||
}
|
||||
var upPercent uint64 = 0
|
||||
if ss.serviceResponseDataStoreCurrentDown[mh.MonitorID]+ss.serviceResponseDataStoreCurrentUp[mh.MonitorID] > 0 {
|
||||
upPercent = ss.serviceResponseDataStoreCurrentUp[mh.MonitorID] * 100 / (ss.serviceResponseDataStoreCurrentDown[mh.MonitorID] + ss.serviceResponseDataStoreCurrentUp[mh.MonitorID])
|
||||
}
|
||||
stateStr := getStateStr(upPercent)
|
||||
// 数据持久化
|
||||
if ss.serviceCurrentStatusIndex[mh.MonitorID] == _CurrentStatusSize {
|
||||
ss.serviceCurrentStatusIndex[mh.MonitorID] = 0
|
||||
if err := DB.Create(&model.MonitorHistory{
|
||||
MonitorID: mh.MonitorID,
|
||||
Delay: ss.serviceStatusToday[mh.MonitorID].Delay,
|
||||
Successful: stateStr == _StatusOk,
|
||||
Data: mh.Data,
|
||||
}).Error; err != nil {
|
||||
log.Println("NEZHA>> 服务监控数据持久化失败:", err)
|
||||
}
|
||||
}
|
||||
if stateStr == "故障" || stateStr != ss.lastStatus[mh.MonitorID] {
|
||||
ss.monitorsLock.RLock()
|
||||
isNeedSendNotification := (ss.lastStatus[mh.MonitorID] != "" || stateStr == "故障") && ss.monitors[mh.MonitorID].Notify
|
||||
ss.lastStatus[mh.MonitorID] = stateStr
|
||||
if isNeedSendNotification {
|
||||
go SendNotification(fmt.Sprintf("[服务%s] %s", stateStr, ss.monitors[mh.MonitorID].Name), true)
|
||||
}
|
||||
ss.monitorsLock.RUnlock()
|
||||
}
|
||||
ss.serviceResponseDataStoreLock.Unlock()
|
||||
// SSL 证书报警
|
||||
var errMsg string
|
||||
if strings.HasPrefix(mh.Data, "SSL证书错误:") {
|
||||
// 排除 i/o timeont、connection timeout、EOF 错误
|
||||
if !strings.HasSuffix(mh.Data, "timeout") &&
|
||||
!strings.HasSuffix(mh.Data, "EOF") &&
|
||||
!strings.HasSuffix(mh.Data, "timed out") {
|
||||
errMsg = mh.Data
|
||||
}
|
||||
} else {
|
||||
var newCert = strings.Split(mh.Data, "|")
|
||||
if len(newCert) > 1 {
|
||||
if ss.sslCertCache[mh.MonitorID] == "" {
|
||||
ss.sslCertCache[mh.MonitorID] = mh.Data
|
||||
}
|
||||
expiresNew, _ := time.Parse("2006-01-02 15:04:05 -0700 MST", newCert[1])
|
||||
// 证书过期提醒
|
||||
if expiresNew.Before(time.Now().AddDate(0, 0, 7)) {
|
||||
errMsg = fmt.Sprintf(
|
||||
"SSL证书将在七天内过期,过期时间:%s。",
|
||||
expiresNew.Format("2006-01-02 15:04:05"))
|
||||
}
|
||||
// 证书变更提醒
|
||||
var oldCert = strings.Split(ss.sslCertCache[mh.MonitorID], "|")
|
||||
var expiresOld time.Time
|
||||
if len(oldCert) > 1 {
|
||||
expiresOld, _ = time.Parse("2006-01-02 15:04:05 -0700 MST", oldCert[1])
|
||||
}
|
||||
if oldCert[0] != newCert[0] && !expiresNew.Equal(expiresOld) {
|
||||
ss.sslCertCache[mh.MonitorID] = mh.Data
|
||||
errMsg = fmt.Sprintf(
|
||||
"SSL证书变更,旧:%s, %s 过期;新:%s, %s 过期。",
|
||||
oldCert[0], expiresOld.Format("2006-01-02 15:04:05"), newCert[0], expiresNew.Format("2006-01-02 15:04:05"))
|
||||
}
|
||||
}
|
||||
}
|
||||
if errMsg != "" {
|
||||
ss.monitorsLock.RLock()
|
||||
if ss.monitors[mh.MonitorID].Notify {
|
||||
go SendNotification(fmt.Sprintf("[SSL] %s %s", ss.monitors[mh.MonitorID].Name, errMsg), true)
|
||||
}
|
||||
ss.monitorsLock.RUnlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
86
service/singleton/singleton.go
Normal file
86
service/singleton/singleton.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package singleton
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/robfig/cron/v3"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/naiba/nezha/model"
|
||||
pb "github.com/naiba/nezha/proto"
|
||||
)
|
||||
|
||||
var Version = "v0.12.6" // !!记得修改 README 中的 badge 版本!!
|
||||
|
||||
var (
|
||||
Conf *model.Config
|
||||
Cache *cache.Cache
|
||||
DB *gorm.DB
|
||||
|
||||
ServerList map[uint64]*model.Server
|
||||
SecretToID map[string]uint64
|
||||
ServerLock sync.RWMutex
|
||||
|
||||
SortedServerList []*model.Server
|
||||
SortedServerLock sync.RWMutex
|
||||
)
|
||||
|
||||
func ReSortServer() {
|
||||
ServerLock.RLock()
|
||||
defer ServerLock.RUnlock()
|
||||
SortedServerLock.Lock()
|
||||
defer SortedServerLock.Unlock()
|
||||
|
||||
SortedServerList = []*model.Server{}
|
||||
for _, s := range ServerList {
|
||||
SortedServerList = append(SortedServerList, s)
|
||||
}
|
||||
|
||||
sort.SliceStable(SortedServerList, func(i, j int) bool {
|
||||
if SortedServerList[i].DisplayIndex == SortedServerList[j].DisplayIndex {
|
||||
return SortedServerList[i].ID < SortedServerList[j].ID
|
||||
}
|
||||
return SortedServerList[i].DisplayIndex > SortedServerList[j].DisplayIndex
|
||||
})
|
||||
}
|
||||
|
||||
// =============== Cron Mixin ===============
|
||||
|
||||
var CronLock sync.RWMutex
|
||||
var Crons map[uint64]*model.Cron
|
||||
var Cron *cron.Cron
|
||||
|
||||
func ManualTrigger(c model.Cron) {
|
||||
CronTrigger(c)()
|
||||
}
|
||||
|
||||
func CronTrigger(cr model.Cron) func() {
|
||||
crIgnoreMap := make(map[uint64]bool)
|
||||
for j := 0; j < len(cr.Servers); j++ {
|
||||
crIgnoreMap[cr.Servers[j]] = true
|
||||
}
|
||||
return func() {
|
||||
ServerLock.RLock()
|
||||
defer ServerLock.RUnlock()
|
||||
for _, s := range ServerList {
|
||||
if cr.Cover == model.CronCoverAll && crIgnoreMap[s.ID] {
|
||||
continue
|
||||
}
|
||||
if cr.Cover == model.CronCoverIgnoreAll && !crIgnoreMap[s.ID] {
|
||||
continue
|
||||
}
|
||||
if s.TaskStream != nil {
|
||||
s.TaskStream.Send(&pb.Task{
|
||||
Id: cr.ID,
|
||||
Data: cr.Command,
|
||||
Type: model.TaskTypeCommand,
|
||||
})
|
||||
} else {
|
||||
SendNotification(fmt.Sprintf("[任务失败] %s,服务器 %s 离线,无法执行。", cr.Name, s.Name), false)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user