accesslog: improvements and new features

relative to: #1601 and #1624
This commit is contained in:
Gerasimos (Makis) Maropoulos 2020-09-13 02:56:22 +03:00
parent 7d5789c3de
commit 4845b77177
No known key found for this signature in database
GPG Key ID: 5DBE766BD26A54E7
16 changed files with 612 additions and 261 deletions

3
NOTICE
View File

@ -41,6 +41,9 @@ Revision ID: ab226d925aa394ccecf01e515ea8479367e0961c
crypto 4b2356b1ed79e6b https://go.googlesource.com/crypto
e3deca3737a3db3
d132d2847a
easyjson 8ab5ff9cd8e4e43 https://github.com/mailru/easyjson
2e8b79f6c47d324
a31dd803cf
go-version 2b13044f5cdd383 https://github.com/hashicorp/go-version
3370d41ce57d8bf
3cec5e62b8

View File

@ -19,9 +19,9 @@ func main() {
ac := accesslog.File("./access.log")
defer ac.Close()
ac.AddOutput(os.Stdout)
ac.RequestBody = true
// Set to false to print errors as one line:
// ac.KeepMultiLineError = false
// Set the "depth" of a panic trace:

View File

@ -1,5 +1,7 @@
Timestamp,Latency,Code,Method,Path,IP,Req Values,In,Out,Request,Response
1599900695933,0s,200,GET,/,::1,sleep=35ms,575,81,,Index
1599900696207,0s,404,GET,/notfound,::1,,572,92,,Not Found
1599900696693,0s,200,GET,/,::1,,564,81,,Index
1599900697988,1s,200,GET,/,::1,sleep=1s,573,81,,Index
1599952696740,0s,200,GET,/,::1,,0,5,,Index
1599952696774,0s,404,GET,/favicon.ico,::1,,0,9,,Not Found
1599952697996,0s,200,GET,/,::1,,0,5,,Index
1599952698130,0s,200,GET,/,::1,,0,5,,Index
1599952698261,0s,200,GET,/,::1,,0,5,,Index
1599952741189,1.01113s,200,GET,/,::1,sleep=1s,0,5,,Index

View File

@ -10,11 +10,10 @@ import (
func main() {
app := iris.New()
ac := accesslog.File("access_log.csv")
ac.ResponseBody = true
ac.SetFormatter(&accesslog.CSV{
AutoFlush: true,
Header: true,
Header: true,
// DateScript: "FROM_UNIX",
LatencyRound: time.Second,
})
app.UseRouter(ac.Handler)

View File

@ -13,21 +13,32 @@ func makeAccessLog() *accesslog.AccessLog {
// Initialize a new access log middleware.
ac := accesslog.File("./access.log")
// Defaults to true. Change to false for better performance.
ac.RequestBody = false
ac.ResponseBody = false
// The default configuration:
ac.Delim = '|'
ac.TimeFormat = "2006-01-02 15:04:05"
ac.Async = false
ac.IP = true
ac.BytesReceivedBody = true
ac.BytesSentBody = true
ac.BytesReceived = false
ac.BytesSent = false
ac.BodyMinify = true
ac.RequestBody = true
ac.ResponseBody = false
ac.KeepMultiLineError = true
ac.PanicLog = accesslog.LogHandler
// Defaults to false.
ac.Async = false
// Set Custom Formatter:
ac.SetFormatter(&accesslog.JSON{})
// ac.SetFormatter(&accesslog.CSV{})
// ac.SetFormatter(&accesslog.Template{Text: "{{.Code}}"})
return ac
}
func main() {
ac := makeAccessLog()
defer ac.Close()
defer ac.Close() // Close the underline file.
app := iris.New()
// Register the middleware (UseRouter to catch http errors too).

View File

@ -1,6 +1,7 @@
package main // See https://github.com/kataras/iris/issues/1601
import (
"bufio"
"time"
"github.com/kataras/iris/v12"
@ -29,7 +30,7 @@ func makeAccessLog() *accesslog.AccessLog {
// Initialize a new access log middleware.
// Accepts an `io.Writer`.
ac := accesslog.New(w)
ac := accesslog.New(bufio.NewWriter(w))
// ac.TimeFormat = "2006-01-02 15:04:05" // default
// Example of adding more than one field to the logger.
@ -73,6 +74,8 @@ func makeAccessLog() *accesslog.AccessLog {
ac.ResponseBody = false
ac.BytesReceived = false
ac.BytesSent = false
ac.BytesReceivedBody = false
ac.BytesSentBody = false
Add second output:
ac.AddOutput(app.Logger().Printer)

1
go.mod
View File

@ -26,6 +26,7 @@ require (
github.com/kataras/sitemap v0.0.5
github.com/kataras/tunnel v0.0.1
github.com/klauspost/compress v1.11.0
github.com/mailru/easyjson v0.7.6
github.com/mediocregopher/radix/v3 v3.5.2
github.com/microcosm-cc/bluemonday v1.0.4
github.com/russross/blackfriday/v2 v2.0.1

View File

@ -1,7 +1,9 @@
package accesslog
import (
"bufio"
"bytes"
stdContext "context"
"fmt"
"io"
"net/http/httputil"
@ -9,9 +11,11 @@ import (
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/kataras/iris/v12/context"
"github.com/kataras/iris/v12/core/host"
"github.com/kataras/iris/v12/core/memstore"
)
@ -89,11 +93,13 @@ var (
// AccessLog is a middleware which prints information
// incoming HTTP requests.
//
// Sample access log line:
// 2020-08-22 00:44:20|1ms|POST|/read_body||200|{"id":10,"name":"Tim","age":22}|{"message":"OK"}|
// Default log format:
// Time|Latency|Code|Method|Path|IP|Path Params Query Fields|Bytes Received|Bytes Sent|Request|Response|
//
// Look `New`, `File` package-level functions
// and its `Handler` method to learn more.
// If the given writer is a buffered one,
// its contents are flushed automatically on Close call.
//
// A new AccessLog middleware MUST
// be created after a `New` function call.
@ -103,17 +109,11 @@ type AccessLog struct {
// If multiple output required, then define an `io.MultiWriter`.
// See `SetOutput` and `AddOutput` methods too.
Writer io.Writer
// If enabled, it locks the underline Writer.
// It should be turned off if the given `Writer` is already protected with a locker.
// It is enabled when writer is os.Stdout/os.Stderr.
// You should manually set this field to true if you are not sure
// whether the underline Writer is protected.
//
// Defaults to true on *os.File and *bytes.Buffer, otherwise false.
LockWriter bool
// If not empty then each one of them is called on `Close` method.
Closers []io.Closer
// File type destinations are automatically added.
Flushers []Flusher
Closers []io.Closer
// If not empty then overrides the time.Now to this custom clocker's `Now` method,
// useful for testing (see `TClock`) and
@ -133,41 +133,70 @@ type AccessLog struct {
//
// Defaults to false.
Async bool
// The delimeter between fields when logging with the default format.
// See `SetFormatter` to customize the log even further.
//
// Defaults to '|'.
Delim rune
// The time format for current time on log print.
// Defaults to "2006-01-02 15:04:05" on `New` function.
// Set it to empty to inherit the Iris Application's TimeFormat.
//
// Defaults to "2006-01-02 15:04:05"
TimeFormat string
// Round the latency based on the given duration, e.g. time.Second.
//
// Defaults to 0.
LatencyRound time.Duration
// IP displays the remote address.
//
// Defaults to true.
IP bool
// The number of bytes for the request body only.
// Applied when BytesReceived is false.
//
// Defaults to true.
BytesReceivedBody bool
// The number of bytes for the response body only.
// Applied when BytesSent is false.
//
// Defaults to true.
BytesSentBody bool
// The actual number of bytes received and sent on the network (headers + body).
// It is kind of "slow" operation as it uses the httputil to dumb request
// and response to get the total amount of bytes (headers + body).
//
// They override the BytesReceivedBody and BytesSentBody fields.
// These two fields provide a more a acquirate measurement
// than BytesReceivedBody and BytesSentBody however,
// they are expensive operations, expect a slower execution.
//
// They both default to false.
BytesReceived bool
BytesSent bool
// Note: We could calculate only the bodies, which is a fast operation if we already
// have RequestBody and ResponseBody set to true but this is not an accurate measurement.
// Force minify request and response contents.
BodyMinify bool
// Enable request body logging.
// Note that, if this is true then it modifies the underline request's body type.
//
// Defaults to true.
RequestBody bool
// Enable response body logging.
// Note that, if this is true then it uses a response recorder.
//
// Defaults to false.
ResponseBody bool
// Force minify request and response contents.
//
// Defaults to true.
BodyMinify bool
// KeepMultiLineError displays the Context's error as it's.
// If set to false then it replaces all line characters with spaces.
//
// See `PanicLog` to customize recovered-from-panic errors even further.
//
// Defaults to true.
KeepMultiLineError bool
// What the logger should write to the output destination
// when recovered from a panic.
// Available options:
@ -185,8 +214,14 @@ type AccessLog struct {
formatter Formatter
broker *Broker
// the log instance for custom formatters.
logsPool *sync.Pool
bufPool *sync.Pool
// the builder for the default format.
bufPool *sync.Pool
// remaining logs when Close is called, we wait for timeout (see CloseContext).
remaining uint32
// reports whether the logger is already closed, see `Close` & `CloseContext` methods.
isClosed bool
}
// PanicLog holds the type for the available panic log levels.
@ -204,7 +239,8 @@ const (
const defaultTimeFormat = "2006-01-02 15:04:05"
// New returns a new AccessLog value with the default values.
// Writes to the "w". Output be further modified through its `Set/AddOutput` methods.
// Writes to the "w". Output can be further modified through its `Set/AddOutput` methods.
//
// Register by its `Handler` method.
// See `File` package-level function too.
//
@ -217,13 +253,18 @@ func New(w io.Writer) *AccessLog {
Clock: clockFunc(time.Now),
Delim: defaultDelim,
TimeFormat: defaultTimeFormat,
LatencyRound: 0,
Async: false,
IP: true,
BytesReceived: true,
BytesSent: true,
BodyMinify: true,
BytesReceived: false,
BytesSent: false,
BytesReceivedBody: true,
BytesSentBody: true,
RequestBody: true,
ResponseBody: true,
ResponseBody: false,
BodyMinify: true,
KeepMultiLineError: true,
PanicLog: LogHandler,
logsPool: &sync.Pool{New: func() interface{} {
return new(Log)
}},
@ -237,11 +278,27 @@ func New(w io.Writer) *AccessLog {
}
ac.SetOutput(w)
// workers := 20
// listener := ac.Broker().NewListener()
// for i := 0; i < workers; i++ {
// go func() {
// for log := range listener {
// atomic.AddUint32(&ac.remaining, 1)
// ac.handleLog(log)
// atomic.AddUint32(&ac.remaining, ^uint32(0))
// }
// }()
// }
host.RegisterOnInterrupt(func() {
ac.Close()
})
return ac
}
// File returns a new AccessLog value with the given "path"
// as the log's output file destination.
// The Writer is now a buffered file writer.
// Register by its `Handler` method.
//
// A call of its `Close` method to unlock the underline
@ -256,7 +313,7 @@ func File(path string) *AccessLog {
panic(err)
}
return New(f)
return New(bufio.NewWriter(f))
}
// Broker creates or returns the broker.
@ -268,62 +325,37 @@ func (ac *AccessLog) Broker() *Broker {
ac.mu.Lock()
if ac.broker == nil {
ac.broker = newBroker()
// atomic.StoreUint32(&ac.brokerActive, 1)
}
ac.mu.Unlock()
return ac.broker
}
// func (ac *AccessLog) isBrokerActive() bool { // see `Print` method.
// return atomic.LoadUint32(&ac.brokerActive) > 0
// }
// ^ No need, we declare that the Broker should be called
// before serve-time. Let's respect our comment
// and don't try to make it safe for write and read concurrent access.
// Write writes to the log destination.
// It completes the io.Writer interface.
// Safe for concurrent use.
func (ac *AccessLog) Write(p []byte) (int, error) {
if ac.LockWriter {
ac.mu.Lock()
}
n, err := ac.Writer.Write(p)
if ac.LockWriter {
ac.mu.Unlock()
}
return n, err
}
// SetOutput sets the log's output destination. Accepts one or more io.Writer values.
// Also, if a writer is a Closer, then it is automatically appended to the Closers.
// Call it before `SetFormatter` and `Handler` methods.
func (ac *AccessLog) SetOutput(writers ...io.Writer) *AccessLog {
if len(writers) == 0 {
switch len(writers) {
case 0:
return ac
}
lockWriter := false
for _, w := range writers {
if closer, ok := w.(io.Closer); ok {
ac.Closers = append(ac.Closers, closer)
}
if !lockWriter {
switch w.(type) {
case *os.File, *bytes.Buffer: // force lock writer.
lockWriter = true
}
}
}
ac.LockWriter = lockWriter
if len(writers) == 1 {
case 1:
ac.Writer = writers[0]
} else {
default:
ac.Writer = io.MultiWriter(writers...)
}
for _, w := range writers {
if flusher, ok := w.(Flusher); ok {
ac.Flushers = append(ac.Flushers, flusher)
}
if closer, ok := w.(io.Closer); ok {
ac.Closers = append(ac.Closers, closer)
}
}
// ac.bufWriter = bufio.NewWriterSize(ac.Writer, 4096)
// No ^ let the caller decide that, we just helping on automatic flushing
// through the `Flushers` field called when necessary.
return ac
}
@ -337,23 +369,64 @@ func (ac *AccessLog) AddOutput(writers ...io.Writer) *AccessLog {
return ac.SetOutput(writers...)
}
// func (ac *AccessLog) isBrokerActive() bool { // see `Print` method.
// return atomic.LoadUint32(&ac.brokerActive) > 0
// }
// ^ No need, we declare that the Broker should be called
// before serve-time. Let's respect our comment
// and don't try to make it safe for write and read concurrent access.
// Write writes to the log destination.
// It completes the io.Writer interface.
// Safe for concurrent use.
func (ac *AccessLog) Write(p []byte) (int, error) {
ac.mu.Lock()
n, err := ac.Writer.Write(p)
ac.mu.Unlock()
return n, err
}
// Flush writes any buffered data to the underlying Fluser Writer.
// Flush is called automatically on Close.
func (ac *AccessLog) Flush() (err error) {
ac.mu.Lock()
for _, f := range ac.Flushers {
fErr := f.Flush()
if fErr != nil {
if err == nil {
err = fErr
} else {
err = fmt.Errorf("%v, %v", err, fErr)
}
}
}
ac.mu.Unlock()
return
}
// SetFormatter sets a custom formatter to print the logs.
// Any custom output writers should be
// already registered before calling this method.
// Returns this AccessLog instance.
//
// Usage:
// ac.SetFormatter(&accesslog.JSON{Indent: " "})
// ac.SetFormatter(&accesslog.JSON{Indent: " "})
func (ac *AccessLog) SetFormatter(f Formatter) *AccessLog {
if ac.Writer == nil {
panic("accesslog: SetFormatter called with nil Writer")
}
if f == nil {
return ac
}
if flusher, ok := ac.formatter.(Flusher); ok {
// PREPEND formatter flushes, they should run before destination's ones.
ac.Flushers = append([]Flusher{flusher}, ac.Flushers...)
}
// Inject the writer (AccessLog) here, the writer
// It is a protected with mutex writer to the final output
// when LockWriter field was set to true
// or when the given destination was os.File or bytes.Buffer
// (otherwise we assume it's locked by the end-developer).
// is protected with mutex.
f.SetOutput(ac)
ac.formatter = f
@ -368,10 +441,46 @@ func (ac *AccessLog) AddFields(setters ...FieldSetter) *AccessLog {
return ac
}
// Close calls each registered Closer's Close method.
// Exits when all close methods have been executed.
// Close terminates any broker listeners,
// waits for any remaining logs up to 10 seconds
// (see `CloseContext` to set custom deadline),
// flushes any formatter and any buffered data to the underline writer
// and finally closes any registered closers (files are automatically added as Closer).
//
// After Close is called the AccessLog is not accessible.
func (ac *AccessLog) Close() (err error) {
ac.flushFormatter()
if ac.isClosed {
return nil
}
ctx, cancelFunc := stdContext.WithTimeout(stdContext.Background(), 10*time.Second)
defer cancelFunc()
return ac.CloseContext(ctx)
}
// CloseContext same as `Close` but waits until given "ctx" is done.
func (ac *AccessLog) CloseContext(ctx stdContext.Context) (err error) {
if ac.isClosed {
return nil
}
ac.isClosed = true
if ac.broker != nil {
ac.broker.close <- struct{}{}
}
if ac.Async {
ac.waitRemaining(ctx)
}
if fErr := ac.Flush(); fErr != nil {
if err == nil {
err = fErr
} else {
err = fmt.Errorf("%v, %v", err, fErr)
}
}
for _, closer := range ac.Closers {
cErr := closer.Close()
@ -387,21 +496,32 @@ func (ac *AccessLog) Close() (err error) {
return
}
func (ac *AccessLog) flushFormatter() {
if ac.formatter != nil {
if flusher, ok := ac.formatter.(Flusher); ok {
flusher.Flush()
func (ac *AccessLog) waitRemaining(ctx stdContext.Context) {
if n := atomic.LoadUint32(&ac.remaining); n == 0 {
return
}
t := time.NewTicker(2 * time.Second)
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
if atomic.LoadUint32(&ac.remaining) == 0 {
return
}
}
}
}
func (ac *AccessLog) shouldReadRequestBody() bool {
return ac.RequestBody || ac.BytesReceived
return ac.RequestBody || ac.BytesReceived || ac.BytesReceivedBody
}
func (ac *AccessLog) shouldReadResponseBody() bool {
return ac.ResponseBody || ac.BytesSent
return ac.ResponseBody || ac.BytesSent /* || ac.BytesSentBody this can be measured by the default writer's Written() */
}
// Handler prints request information to the output destination.
@ -469,7 +589,8 @@ func (ac *AccessLog) Handler(ctx *context.Context) {
return
}
latency := time.Since(startTime)
latency := time.Since(startTime).Round(ac.LatencyRound)
if ac.Async {
ctxCopy := ctx.Clone()
go ac.after(ctxCopy, latency, method, path)
@ -484,7 +605,7 @@ func (ac *AccessLog) after(ctx *context.Context, lat time.Duration, method, path
// request and response data or error reading them.
requestBody string
responseBody string
bytesReceived int
bytesReceived int // total or body, depends on the configuration.
bytesSent int
)
@ -498,6 +619,9 @@ func (ac *AccessLog) after(ctx *context.Context, lat time.Duration, method, path
} else {
requestData, err := ctx.GetBody()
requestBodyLength := len(requestData)
if ac.BytesReceivedBody {
bytesReceived = requestBodyLength // store it, if the total is enabled then this will be overriden.
}
if err != nil && ac.RequestBody {
requestBody = ac.getErrorText(err)
} else if requestBodyLength > 0 {
@ -530,6 +654,9 @@ func (ac *AccessLog) after(ctx *context.Context, lat time.Duration, method, path
if ac.shouldReadResponseBody() {
responseData := ctx.Recorder().Body()
responseBodyLength := len(responseData)
if ac.BytesSentBody {
bytesSent = responseBodyLength
}
if ac.ResponseBody && responseBodyLength > 0 {
if ac.BodyMinify {
if minified, err := ctx.Application().Minifier().Bytes(ctx.GetContentType(), responseData); err == nil {
@ -551,6 +678,8 @@ func (ac *AccessLog) after(ctx *context.Context, lat time.Duration, method, path
}
bytesSent = len(b) + responseBodyLength + dateLengthProx
}
} else if ac.BytesSentBody {
bytesSent = ctx.ResponseWriter().Written()
}
// Grab any custom fields.
@ -589,10 +718,33 @@ func (ac *AccessLog) after(ctx *context.Context, lat time.Duration, method, path
}
const defaultDelim = '|'
const newLine = '\n'
// Print writes a log manually.
// The `Handler` method calls it.
func (ac *AccessLog) Print(ctx *context.Context, latency time.Duration, timeFormat string, code int, method, path, ip, reqBody, respBody string, bytesReceived, bytesSent int, params memstore.Store, query []memstore.StringEntry, fields []memstore.Entry) (err error) {
func (ac *AccessLog) Print(ctx *context.Context,
latency time.Duration,
timeFormat string,
code int,
method, path, ip, reqBody, respBody string,
bytesReceived, bytesSent int,
params memstore.Store, query []memstore.StringEntry, fields []memstore.Entry) (err error) {
if ac.Async {
// atomic.AddUint32(&ac.remaining, 1)
// This could work ^
// but to make sure we have the correct number of increments.
// CAS loop:
for {
cur := atomic.LoadUint32(&ac.remaining)
if atomic.CompareAndSwapUint32(&ac.remaining, cur, cur+1) {
break
}
}
defer atomic.AddUint32(&ac.remaining, ^uint32(0))
}
now := ac.Clock.Now()
if hasFormatter, hasBroker := ac.formatter != nil, ac.broker != nil; hasFormatter || hasBroker {
@ -609,10 +761,10 @@ func (ac *AccessLog) Print(ctx *context.Context, latency time.Duration, timeForm
log.Query = query
log.PathParams = params
log.Fields = fields
log.BytesReceived = bytesReceived
log.BytesSent = bytesSent
log.Request = reqBody
log.Response = respBody
log.BytesReceived = bytesReceived
log.BytesSent = bytesSent
log.Ctx = ctx
var handled bool
@ -629,7 +781,6 @@ func (ac *AccessLog) Print(ctx *context.Context, latency time.Duration, timeForm
}
ac.logsPool.Put(log) // we don't need it anymore.
if handled {
return // OK, it's handled, exit now.
}
@ -639,7 +790,7 @@ func (ac *AccessLog) Print(ctx *context.Context, latency time.Duration, timeForm
// key=value key2=value2.
requestValues := parseRequestValues(code, params, query, fields)
// the number of separators are the same, in order to be easier
// the number of separators is the same, in order to be easier
// for 3rd-party programs to read the result log file.
builder := ac.bufPool.Get().(*bytes.Buffer)
@ -658,25 +809,35 @@ func (ac *AccessLog) Print(ctx *context.Context, latency time.Duration, timeForm
builder.WriteString(path)
builder.WriteRune(ac.Delim)
builder.WriteString(ip)
builder.WriteRune(ac.Delim)
if ac.IP {
builder.WriteString(ip)
builder.WriteRune(ac.Delim)
}
builder.WriteString(requestValues)
builder.WriteRune(ac.Delim)
builder.WriteString(formatBytes(bytesReceived))
builder.WriteRune(ac.Delim)
if ac.BytesReceived || ac.BytesReceivedBody {
builder.WriteString(formatBytes(bytesReceived))
builder.WriteRune(ac.Delim)
}
builder.WriteString(formatBytes(bytesSent))
builder.WriteRune(ac.Delim)
if ac.BytesSent || ac.BytesSentBody {
builder.WriteString(formatBytes(bytesSent))
builder.WriteRune(ac.Delim)
}
builder.WriteString(reqBody)
builder.WriteRune(ac.Delim)
if ac.RequestBody {
builder.WriteString(reqBody)
builder.WriteRune(ac.Delim)
}
builder.WriteString(respBody)
builder.WriteRune(ac.Delim)
if ac.ResponseBody {
builder.WriteString(respBody)
builder.WriteRune(ac.Delim)
}
builder.WriteRune('\n')
builder.WriteRune(newLine)
ac.Write(builder.Bytes())
builder.Reset()
@ -689,7 +850,7 @@ var lineBreaksReplacer = strings.NewReplacer("\n\r", " ", "\n", " ")
func (ac *AccessLog) getErrorText(err error) (text string) { // caller checks for nil.
if errPanic, ok := context.IsErrPanicRecovery(err); ok {
ac.flushFormatter() // flush any buffered formatter's contents to be written to the output.
ac.Flush() // flush any buffered contents to be written to the output.
switch ac.PanicLog {
case LogHandler:

View File

@ -7,7 +7,9 @@ import (
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
@ -16,30 +18,20 @@ import (
)
func TestAccessLogPrint_Simple(t *testing.T) {
t.Parallel()
const goroutinesN = 42
const goroutinesN = 420
w := new(bytes.Buffer)
ac := New(w)
ac.TimeFormat = "2006-01-02 15:04:05"
ac.Async = true
ac.ResponseBody = true
ac.Clock = TClock(time.Time{})
if !ac.LockWriter { // should be true because we register a *bytes.Buffer.
t.Fatalf("expected LockRriter to be true")
}
var (
expected string
wg = new(sync.WaitGroup)
)
var expected string
var expectedLines int
var mu sync.Mutex
for i := 0; i < goroutinesN; i++ {
wg.Add(1)
expected += "0001-01-01 00:00:00|1s|200|GET|/path_value?url_query=url_query_value|::1|path_param=path_param_value url_query=url_query_value custom=custom_value|||Incoming|Outcoming|\n"
go func() {
defer wg.Done()
ac.Print(
nil,
1*time.Second,
@ -62,22 +54,35 @@ func TestAccessLogPrint_Simple(t *testing.T) {
{Key: "custom", ValueRaw: "custom_value"},
})
}()
mu.Lock()
expected += "0001-01-01 00:00:00|1s|200|GET|/path_value?url_query=url_query_value|::1|path_param=path_param_value url_query=url_query_value custom=custom_value|0 B|0 B|Incoming|Outcoming|\n"
expectedLines++
mu.Unlock()
}
wg.Wait()
// time.Sleep(1 * time.Second)
// just to fire at least some routines (CI: travis).
ac.Close()
if got := atomic.LoadUint32(&ac.remaining); got > 0 { // test wait.
t.Fatalf("expected remaining: %d but got: %d", 0, got)
}
if got := w.String(); expected != got {
t.Fatalf("expected printed result to be:\n'%s'\n\nbut got:\n'%s'", expected, got)
gotLines := strings.Count(got, "\n")
t.Logf("expected printed result to be[%d]:\n'%s'\n\nbut got[%d]:\n'%s'", expectedLines, expected, gotLines, got)
t.Fatalf("expected[%d]: %d but got: %d lines", goroutinesN, expectedLines, gotLines)
}
}
func TestAccessLogBroker(t *testing.T) {
w := new(bytes.Buffer)
ac := New(w)
ac.Clock = TClock(time.Time{})
broker := ac.Broker()
closed := make(chan struct{})
wg := new(sync.WaitGroup)
n := 4
wg.Add(4)
@ -88,11 +93,11 @@ func TestAccessLogBroker(t *testing.T) {
ln := broker.NewListener()
for {
select {
case <-closed:
broker.CloseListener(ln)
t.Log("Log Listener Closed")
return
case log := <-ln:
case log, ok := <-ln:
if !ok {
t.Log("Log Listener Closed")
return
}
lat := log.Latency
t.Log(lat.String())
wg.Done()
@ -145,7 +150,7 @@ func TestAccessLogBroker(t *testing.T) {
// wait for close messages.
wg.Add(1)
close(closed)
ac.Close()
wg.Wait()
}
@ -171,14 +176,14 @@ func benchmarkAccessLogAfter(b *testing.B, withLogStruct, async bool) {
ac := New(ioutil.Discard)
ac.Clock = TClock(time.Time{})
ac.BytesReceived = false
ac.BytesReceivedBody = false
ac.BytesSent = false
ac.BytesSentBody = false
ac.BodyMinify = false
ac.RequestBody = false
ac.ResponseBody = false
ac.KeepMultiLineError = true
ac.Async = false
ac.IP = false
ac.LockWriter = async
if withLogStruct {
ac.SetFormatter(new(noOpFormatter)) // just to create the log structure, here we test the log creation time only.
}

View File

@ -22,6 +22,9 @@ type Broker struct {
// listeners store.
listeners map[LogChan]bool
// force-terminate all listeners.
close chan struct{}
}
// newBroker returns a new broker factory.
@ -31,6 +34,7 @@ func newBroker() *Broker {
newListeners: make(chan LogChan),
closingListeners: make(chan LogChan),
listeners: make(map[LogChan]bool),
close: make(chan struct{}),
}
// Listens and Broadcasts events.
@ -58,6 +62,12 @@ func (b *Broker) run() {
for clientMessageChan := range b.listeners {
clientMessageChan <- log
}
case <-b.close:
for clientMessageChan := range b.listeners {
delete(b.listeners, clientMessageChan)
close(clientMessageChan)
}
}
}
}

View File

@ -4,13 +4,15 @@ import (
"encoding/csv"
"io"
"strconv"
"time"
"sync"
)
// CSV is a Formatter type for csv encoded logs.
type CSV struct {
type CSV struct { // TODO: change it to use csvutil.
writer *csv.Writer
ac *AccessLog
writerPool *sync.Pool
ac *AccessLog
// Add header fields to the first line if it's not exist.
// Note that the destination should be a compatible io.Reader
@ -23,10 +25,6 @@ type CSV struct {
// return new Date(epoch_in_millis);
// }
DateScript string
// Latency Round base, e.g. time.Second.
LatencyRound time.Duration
// Writes immediately every record.
AutoFlush bool
// TODO: Fields []string // field name, position?
}
@ -36,15 +34,13 @@ type CSV struct {
// write the first csv record which
// contains the names of the future log values.
func (f *CSV) SetOutput(dest io.Writer) {
ac, ok := dest.(*AccessLog)
if !ok {
panic("SetOutput with invalid type. Report it as bug.")
f.ac, _ = dest.(*AccessLog)
f.writerPool = &sync.Pool{
New: func() interface{} {
return csv.NewWriter(dest)
},
}
w := csv.NewWriter(dest)
f.writer = w
f.ac = ac
if !f.Header {
return
}
@ -53,7 +49,7 @@ func (f *CSV) SetOutput(dest io.Writer) {
// If the destination is not a reader
// we can't detect if the header already inserted
// so we exit, we dont want to malform the contents.
destReader, ok := ac.Writer.(io.Reader)
destReader, ok := f.ac.Writer.(io.Reader)
if !ok {
return
}
@ -66,35 +62,30 @@ func (f *CSV) SetOutput(dest io.Writer) {
}
// Write the header.
w := csv.NewWriter(dest)
keys := []string{"Timestamp", "Latency", "Code", "Method", "Path"}
if ac.IP {
if f.ac.IP {
keys = append(keys, "IP")
}
// keys = append(keys, []string{"Params", "Query"}...)
keys = append(keys, "Req Values")
/*
if len(ac.FieldSetters) > 0 {
keys = append(keys, "Fields")
} // Make fields their own headers?
*/
if ac.BytesReceived {
if f.ac.BytesReceived || f.ac.BytesReceivedBody {
keys = append(keys, "In")
}
if ac.BytesSent {
if f.ac.BytesSent || f.ac.BytesSentBody {
keys = append(keys, "Out")
}
if ac.RequestBody {
if f.ac.RequestBody {
keys = append(keys, "Request")
}
if ac.ResponseBody {
if f.ac.ResponseBody {
keys = append(keys, "Response")
}
@ -113,16 +104,9 @@ func (f *CSV) Format(log *Log) (bool, error) {
timestamp = "=" + f.DateScript + "(" + timestamp + ")"
}
lat := ""
if f.LatencyRound > 0 {
lat = log.Latency.Round(f.LatencyRound).String()
} else {
lat = log.Latency.String()
}
values := []string{
timestamp,
lat,
log.Latency.String(),
strconv.Itoa(log.Code),
log.Method,
log.Path,
@ -132,36 +116,30 @@ func (f *CSV) Format(log *Log) (bool, error) {
values = append(values, log.IP)
}
parseRequestValues(log.Code, log.PathParams, log.Query, log.Fields)
values = append(values, log.RequestValuesLine())
if s := log.RequestValuesLine(); s != "" || f.Header {
// even if it's empty, if Header was set, then add it.
values = append(values, s)
}
if f.ac.BytesReceived {
if f.ac.BytesReceived || f.ac.BytesReceivedBody {
values = append(values, strconv.Itoa(log.BytesReceived))
}
if f.ac.BytesSent {
if f.ac.BytesSent || f.ac.BytesSentBody {
values = append(values, strconv.Itoa(log.BytesSent))
}
if f.ac.RequestBody {
if f.ac.RequestBody && (log.Request != "" || f.Header) {
values = append(values, log.Request)
}
if f.ac.ResponseBody {
if f.ac.ResponseBody && (log.Response != "" || f.Header) {
values = append(values, log.Response)
}
f.writer.Write(values)
if f.AutoFlush {
return true, f.Flush()
}
return true, nil
}
// Flush implements the Fluster interface.
// Flushes any buffered csv records to the destination.
func (f *CSV) Flush() error {
f.writer.Flush()
return f.writer.Error()
w := f.writerPool.Get().(*csv.Writer)
err := w.Write(values)
w.Flush() // it works as "reset" too.
f.writerPool.Put(w)
return true, err
}

View File

@ -11,12 +11,11 @@ import (
func TestCSV(t *testing.T) {
buf := new(bytes.Buffer)
ac := New(buf)
ac.RequestBody = false
staticNow, _ := time.Parse(defaultTimeFormat, "1993-01-01 05:00:00")
ac.Clock = TClock(staticNow)
ac.SetFormatter(&CSV{
Header: true,
LatencyRound: time.Second,
AutoFlush: true,
Header: true,
})
lat, _ := time.ParseDuration("1s")
@ -43,11 +42,12 @@ func TestCSV(t *testing.T) {
print()
print()
expected := `Timestamp,Latency,Code,Method,Path,IP,Req Values,In,Out,Request,Response
725864400000,1s,200,GET,/,::1,sleep=1s,573,81,,Index
725864400000,1s,200,GET,/,::1,sleep=1s,573,81,,Index
expected := `Timestamp,Latency,Code,Method,Path,IP,Req Values,In,Out
725864400000,1s,200,GET,/,::1,sleep=1s,573,81
725864400000,1s,200,GET,/,::1,sleep=1s,573,81
`
ac.Close()
if got := buf.String(); expected != got {
t.Fatalf("expected:\n%s\n\nbut got:\n%s", expected, got)
}

View File

@ -1,54 +1,50 @@
package accesslog
import (
"encoding/json"
"io"
"sync"
"strings"
jsoniter "github.com/json-iterator/go"
)
// JSON is a Formatter type for JSON logs.
type JSON struct {
Prefix, Indent string
EscapeHTML bool
// Indent in spaces.
// Note that, if set to > 0 then jsoniter is used instead of easyjson.
Indent string
EscapeHTML bool
enc *json.Encoder
mu sync.Mutex
lockEncoder bool
jsoniter jsoniter.API
ac *AccessLog
}
// SetOutput creates the json encoder writes to the "dest".
// It's called automatically by the middleware when this Formatter is used.
func (f *JSON) SetOutput(dest io.Writer) {
if dest == nil {
return
f.ac, _ = dest.(*AccessLog)
if indentStep := strings.Count(f.Indent, " "); indentStep > 0 {
f.jsoniter = jsoniter.Config{
TagKey: "json",
IndentionStep: indentStep,
EscapeHTML: f.EscapeHTML,
SortMapKeys: true,
}.Froze()
}
// All logs share the same accesslog's writer and it cannot change during serve-time.
enc := json.NewEncoder(dest)
enc.SetEscapeHTML(f.EscapeHTML)
enc.SetIndent(f.Prefix, f.Indent)
f.lockEncoder = f.Prefix != "" || f.Indent != ""
f.enc = enc
}
// Format prints the logs in JSON format.
// Writes to the destination directly,
// locks on each Format call.
func (f *JSON) Format(log *Log) (bool, error) {
// f.mu.Lock()
// ^ This lock is not required as the writer is
// protected with mutex if necessary or configurated to do so.
// However, if we navigate through the
// internal encoding's source code we'll see that it
// uses a field for its indent buffer,
// therefore it's only useful when Prefix or Indent was not empty.
if f.lockEncoder {
f.mu.Lock()
}
err := f.enc.Encode(log)
if f.lockEncoder {
f.mu.Unlock()
if f.jsoniter != nil {
b, err := f.jsoniter.Marshal(log)
if err != nil {
return true, err
}
f.ac.Write(append(b, newLine))
return true, nil
}
err := f.writeEasyJSON(log)
return true, err
}

View File

@ -0,0 +1,165 @@
// Code generated by easyjson but it's highly edited. Please manually add fields as Log is grow,
// in-short: all decode methods removed, rename of the methods, add a new line breaker,
// remove the easyjson import requirement.
package accesslog
import (
"encoding/json"
"github.com/kataras/iris/v12/core/memstore"
"github.com/mailru/easyjson/jwriter"
)
func (f *JSON) writeEasyJSON(in *Log) error {
out := &jwriter.Writer{NoEscapeHTML: !f.EscapeHTML}
out.RawByte('{')
first := true
_ = first
{
const prefix string = ",\"timestamp\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Int64(int64(in.Timestamp))
}
{
const prefix string = ",\"latency\":"
out.RawString(prefix)
out.Int64(int64(in.Latency))
}
{
const prefix string = ",\"code\":"
out.RawString(prefix)
out.Int(int(in.Code))
}
{
const prefix string = ",\"method\":"
out.RawString(prefix)
out.String(string(in.Method))
}
{
const prefix string = ",\"path\":"
out.RawString(prefix)
out.String(string(in.Path))
}
if in.IP != "" {
const prefix string = ",\"ip\":"
out.RawString(prefix)
out.String(string(in.IP))
}
if len(in.Query) != 0 {
const prefix string = ",\"query\":"
out.RawString(prefix)
{
out.RawByte('[')
for v4, v5 := range in.Query {
if v4 > 0 {
out.RawByte(',')
}
easyJSONStringEntry(out, v5)
}
out.RawByte(']')
}
}
if len(in.PathParams) != 0 {
const prefix string = ",\"params\":"
out.RawString(prefix)
{
out.RawByte('[')
for v6, v7 := range in.PathParams {
if v6 > 0 {
out.RawByte(',')
}
easyJSONEntry(out, v7)
}
out.RawByte(']')
}
}
if len(in.Fields) != 0 {
const prefix string = ",\"fields\":"
out.RawString(prefix)
{
out.RawByte('[')
for v8, v9 := range in.Fields {
if v8 > 0 {
out.RawByte(',')
}
easyJSONEntry(out, v9)
}
out.RawByte(']')
}
}
if in.Logger.RequestBody {
const prefix string = ",\"request\":"
out.RawString(prefix)
out.String(string(in.Request))
}
if in.Logger.ResponseBody {
const prefix string = ",\"response\":"
out.RawString(prefix)
out.String(string(in.Response))
}
if in.BytesReceived != 0 {
const prefix string = ",\"bytes_received\":"
out.RawString(prefix)
out.Int(int(in.BytesReceived))
}
if in.BytesSent != 0 {
const prefix string = ",\"bytes_sent\":"
out.RawString(prefix)
out.Int(int(in.BytesSent))
}
out.RawByte('}')
out.RawByte(newLine)
if out.Error != nil {
return out.Error
}
f.ac.Write(out.Buffer.BuildBytes())
return nil
}
func easyJSONEntry(out *jwriter.Writer, in memstore.Entry) {
out.RawByte('{')
first := true
_ = first
{
const prefix string = ",\"key\":"
out.RawString(prefix[1:])
out.String(string(in.Key))
}
{
const prefix string = ",\"value\":"
out.RawString(prefix)
if m, ok := in.ValueRaw.(json.Marshaler); ok {
out.Raw(m.MarshalJSON())
} else {
out.Raw(json.Marshal(in.ValueRaw))
}
}
out.RawByte('}')
}
func easyJSONStringEntry(out *jwriter.Writer, in memstore.StringEntry) {
out.RawByte('{')
first := true
_ = first
{
const prefix string = ",\"key\":"
out.RawString(prefix[1:])
out.String(string(in.Key))
}
{
const prefix string = ",\"value\":"
out.RawString(prefix)
out.String(string(in.Value))
}
out.RawByte('}')
}

View File

@ -11,6 +11,7 @@ import (
)
// Log represents the log data specifically for the accesslog middleware.
//easyjson:json
type Log struct {
// The AccessLog instance this Log was created of.
Logger *AccessLog `json:"-" yaml:"-" toml:"-"`
@ -21,35 +22,33 @@ type Log struct {
// useful on Template Formatter.
TimeFormat string `json:"-" yaml:"-" toml:"-"`
// Timestamp the Now's unix timestamp (milliseconds).
Timestamp int64 `json:"timestamp"`
Timestamp int64 `json:"timestamp" csv:"timestamp"`
// Request-Response latency.
Latency time.Duration `json:"latency"`
Latency time.Duration `json:"latency" csv:"latency"`
// The response status code.
Code int `json:"code"`
Code int `json:"code" csv:"code"`
// Init request's Method and Path.
Method string `json:"method"`
Path string `json:"path"`
Method string `json:"method" csv:"method"`
Path string `json:"path" csv:"path"`
// The Remote Address.
IP string `json:"ip,omitempty"`
IP string `json:"ip,omitempty" csv:"ip,omitempty"`
// Sorted URL Query arguments.
Query []memstore.StringEntry `json:"query,omitempty"`
Query []memstore.StringEntry `json:"query,omitempty" csv:"query,omitempty"`
// Dynamic path parameters.
PathParams memstore.Store `json:"params,omitempty"`
PathParams memstore.Store `json:"params,omitempty" csv:"params,omitempty"`
// Fields any data information useful to represent this Log.
Fields memstore.Store `json:"fields,omitempty"`
// The actual number of bytes received and sent on the network (headers + body).
BytesReceived int `json:"bytes_received"`
BytesSent int `json:"bytes_sent"`
Fields memstore.Store `json:"fields,omitempty" csv:"fields,omitempty"`
// The Request and Response raw bodies.
// If they are escaped (e.g. JSON),
// A third-party software can read it through:
// data, _ := strconv.Unquote(log.Request)
// err := json.Unmarshal([]byte(data), &customStruct)
Request string `json:"request"`
Response string `json:"response"`
Request string `json:"request,omitempty" csv:"request,omitempty"`
Response string `json:"response,omitempty" csv:"response,omitempty"`
// The actual number of bytes received and sent on the network (headers + body or body only).
BytesReceived int `json:"bytes_received,omitempty" csv:"bytes_received,omitempty"`
BytesSent int `json:"bytes_sent,omitempty" csv:"bytes_sent,omitempty"`
// A copy of the Request's Context when Async is true (safe to use concurrently),
// otherwise it's the current Context (not safe for concurrent access).
@ -69,17 +68,23 @@ func (l *Log) RequestValuesLine() string {
// BytesReceivedLine returns the formatted bytes received length.
func (l *Log) BytesReceivedLine() string {
if !l.Logger.BytesReceived && !l.Logger.BytesReceivedBody {
return ""
}
return formatBytes(l.BytesReceived)
}
// BytesSentLine returns the formatted bytes sent length.
func (l *Log) BytesSentLine() string {
if !l.Logger.BytesSent && !l.Logger.BytesSentBody {
return ""
}
return formatBytes(l.BytesSent)
}
func formatBytes(b int) string {
if b <= 0 {
return ""
return "0 B"
}
const unit = 1024
@ -155,5 +160,4 @@ var (
_ Formatter = (*JSON)(nil)
_ Formatter = (*Template)(nil)
_ Formatter = (*CSV)(nil)
_ Flusher = (*CSV)(nil)
)

View File

@ -1,8 +1,8 @@
package accesslog
import (
"bytes"
"io"
"sync"
"text/template"
)
@ -10,6 +10,11 @@ import (
// It's used to print the Log in a text/template way.
// The caller has full control over the printable result;
// certain fields can be ignored, change the display order and e.t.c.
//
// For faster execution you can create a custom Formatter
// and compile your own quicktemplate: https://github.com/valyala/quicktemplate
//
// This one uses the standard text/template syntax.
type Template struct {
// Custom template source.
// Use this or `Tmpl/TmplName` fields.
@ -22,13 +27,14 @@ type Template struct {
// If not empty then this named template/block renders the log line.
TmplName string
dest io.Writer
mu sync.Mutex
ac *AccessLog
}
// SetOutput creates the default template if missing
// when this formatter is registered.
func (f *Template) SetOutput(dest io.Writer) {
f.ac, _ = dest.(*AccessLog)
if f.Tmpl == nil {
tmpl := template.New("")
@ -41,8 +47,6 @@ func (f *Template) SetOutput(dest io.Writer) {
f.Tmpl = template.Must(tmpl.Parse(text))
}
f.dest = dest
}
const defaultTmplText = "{{.Now.Format .TimeFormat}}|{{.Latency}}|{{.Code}}|{{.Method}}|{{.Path}}|{{.IP}}|{{.RequestValuesLine}}|{{.BytesReceivedLine}}|{{.BytesSentLine}}|{{.Request}}|{{.Response}}|\n"
@ -53,13 +57,22 @@ func (f *Template) Format(log *Log) (bool, error) {
// A template may be executed safely in parallel, although if parallel
// executions share a Writer the output may be interleaved.
f.mu.Lock()
if f.TmplName != "" {
err = f.Tmpl.ExecuteTemplate(f.dest, f.TmplName, log)
} else {
err = f.Tmpl.Execute(f.dest, log)
}
f.mu.Unlock()
// We solve that using a buffer pool, no locks when template is executing (x2 performance boost).
temp := f.ac.bufPool.Get().(*bytes.Buffer)
return true, err
if f.TmplName != "" {
err = f.Tmpl.ExecuteTemplate(temp, f.TmplName, log)
} else {
err = f.Tmpl.Execute(temp, log)
}
if err != nil {
f.ac.bufPool.Put(temp)
return true, err
}
f.ac.Write(temp.Bytes())
temp.Reset()
f.ac.bufPool.Put(temp)
return true, nil
}