2022-01-20 14:41:25 +03:00
// Copyright 2021 The Gitea Authors. All rights reserved.
2022-11-27 21:20:29 +03:00
// SPDX-License-Identifier: MIT
2022-01-20 14:41:25 +03:00
package routing
import (
"context"
"net/http"
"sync"
"time"
"code.gitea.io/gitea/modules/graceful"
2022-03-31 20:01:43 +03:00
"code.gitea.io/gitea/modules/process"
2022-01-20 14:41:25 +03:00
)
// Event indicates when the printer is triggered
type Event int
const (
2022-01-20 20:46:10 +03:00
// StartEvent at the beginning of a request
2022-01-20 14:41:25 +03:00
StartEvent Event = iota
2022-01-20 20:46:10 +03:00
// StillExecutingEvent the request is still executing
2022-01-20 14:41:25 +03:00
StillExecutingEvent
2022-01-20 20:46:10 +03:00
// EndEvent the request has ended (either completed or failed)
2022-01-20 14:41:25 +03:00
EndEvent
)
// Printer is used to output the log for a request
type Printer func ( trigger Event , record * requestRecord )
type requestRecordsManager struct {
2023-08-21 01:59:19 +03:00
print Printer
2022-01-20 14:41:25 +03:00
lock sync . Mutex
requestRecords map [ uint64 ] * requestRecord
count uint64
}
func ( manager * requestRecordsManager ) startSlowQueryDetector ( threshold time . Duration ) {
2022-03-31 20:01:43 +03:00
go graceful . GetManager ( ) . RunWithShutdownContext ( func ( ctx context . Context ) {
ctx , _ , finished := process . GetManager ( ) . AddTypedContext ( ctx , "Service: SlowQueryDetector" , process . SystemProcessType , true )
defer finished ( )
2022-01-20 14:41:25 +03:00
// This go-routine checks all active requests every second.
// If a request has been running for a long time (eg: /user/events), we also print a log with "still-executing" message
// After the "still-executing" log is printed, the record will be removed from the map to prevent from duplicated logs in future
// We do not care about accurate duration here. It just does the check periodically, 0.5s or 1.5s are all OK.
t := time . NewTicker ( time . Second )
for {
select {
2022-03-31 20:01:43 +03:00
case <- ctx . Done ( ) :
2022-01-20 14:41:25 +03:00
return
case <- t . C :
now := time . Now ( )
var slowRequests [ ] * requestRecord
// find all slow requests with lock
manager . lock . Lock ( )
for index , record := range manager . requestRecords {
if now . Sub ( record . startTime ) < threshold {
continue
}
slowRequests = append ( slowRequests , record )
delete ( manager . requestRecords , index )
}
manager . lock . Unlock ( )
// print logs for slow requests
for _ , record := range slowRequests {
manager . print ( StillExecutingEvent , record )
}
}
}
} )
}
func ( manager * requestRecordsManager ) handler ( next http . Handler ) http . Handler {
return http . HandlerFunc ( func ( w http . ResponseWriter , req * http . Request ) {
record := & requestRecord {
startTime : time . Now ( ) ,
request : req ,
responseWriter : w ,
}
// generate a record index an insert into the map
manager . lock . Lock ( )
record . index = manager . count
manager . count ++
manager . requestRecords [ record . index ] = record
manager . lock . Unlock ( )
defer func ( ) {
// just in case there is a panic. now the panics are all recovered in middleware.go
localPanicErr := recover ( )
if localPanicErr != nil {
record . lock . Lock ( )
record . panicError = localPanicErr
record . lock . Unlock ( )
}
// remove from the record map
manager . lock . Lock ( )
delete ( manager . requestRecords , record . index )
manager . lock . Unlock ( )
// log the end of the request
manager . print ( EndEvent , record )
if localPanicErr != nil {
// the panic wasn't recovered before us, so we should pass it up, and let the framework handle the panic error
panic ( localPanicErr )
}
} ( )
req = req . WithContext ( context . WithValue ( req . Context ( ) , contextKey , record ) )
manager . print ( StartEvent , record )
next . ServeHTTP ( w , req )
} )
}