accesslog: NEW log broker and many more features

some fixes about context clone, fix response recorder concurrent access, fix reload views with only ParseTemplate and more
This commit is contained in:
Gerasimos (Makis) Maropoulos 2020-09-09 14:43:26 +03:00
parent fb7627256a
commit b77227a0f9
No known key found for this signature in database
GPG Key ID: 5DBE766BD26A54E7
15 changed files with 359 additions and 49 deletions

View File

@ -364,6 +364,10 @@ Response:
Other Improvements:
- Add a `Response() *http.Response` to the Response Recorder.
- Fix Response Recorder `Flush` when transfer-encoding is `chunked`.
- Fix Response Recorder `Clone` concurrent access afterwards.
- Add a `ParseTemplate` method on view engines to manually parse and add a template from a text as [requested](https://github.com/kataras/iris/issues/1617). [Examples](https://github.com/kataras/iris/tree/master/_examples/view/parse-template).
- Full `http.FileSystem` interface support for all **view** engines as [requested](https://github.com/kataras/iris/issues/1575). The first argument of the functions(`HTML`, `Blocks`, `Pug`, `Amber`, `Ace`, `Jet`, `Django`, `Handlebars`) can now be either a directory of `string` type (like before) or a value which completes the `http.FileSystem` interface. The `.Binary` method of all view engines was removed: pass the go-bindata's latest version `AssetFile()` exported function as the first argument instead of string.
@ -614,7 +618,7 @@ New Context Methods:
- `Context.ViewEngine(ViewEngine)` to set a view engine on-fly for the current chain of handlers, responsible to render templates through `ctx.View`. [Example](_examples/view/context-view-engine).
- `Context.SetErr(error)` and `Context.GetErr() error` helpers.
- `Context.CompressWriter(bool) error` and `Context.CompressReader(bool) error`.
- `Context.Clone() Context` returns a copy of the Context.
- `Context.Clone() Context` returns a copy of the Context safe for concurrent access.
- `Context.IsCanceled() bool` reports whether the request has been canceled by the client.
- `Context.IsSSL() bool` reports whether the request is under HTTPS SSL (New `Configuration.SSLProxyHeaders` and `HostProxyHeaders` fields too).
- `Context.CompressReader(enable bool)` method and `iris.CompressReader` middleware to enable future request read body calls to decompress data, [example](_examples/compression/main.go).

View File

@ -72,7 +72,8 @@
* [Sitemap](routing/sitemap/main.go)
* Logging
* [Request Logger](logging/request-logger/main.go)
* [Log requests and responses to access.log](logging/request-logger/accesslog)
* [AccessLog: log request & response and more](logging/request-logger/accesslog)
* [AccessLog: listen to logs and render them](logging/request-logger/accesslog-broker/main.go)
* [Log Requests to a JSON File](logging/request-logger/request-logger-file-json/main.go)
* [Application File Logger](logging/file-logger/main.go)
* [Application JSON Logger](logging/json-logger/main.go)

View File

@ -0,0 +1,75 @@
package main
import (
"github.com/kataras/iris/v12"
"github.com/kataras/iris/v12/middleware/accesslog"
)
func main() {
/*
On this example we will make use of the logs broker.
A handler will listen for any incoming logs and render
those logs as chunks of JSON to the client (e.g. browser) at real-time.
Note that this ^ can be done with Server-Sent Events but for the
sake of the example we'll do it using Transfer-Encoding: chunked.
*/
ac := accesslog.File("./access.log")
ac.TimeFormat = "2006-01-02 15:04:05"
ac.Async = true
broker := ac.Broker() // <- IMPORTANT
app := iris.New()
app.UseRouter(ac.Handler)
app.Get("/", indexHandler)
app.Get("/profile/{username}", profileHandler)
app.Post("/read_body", readBodyHandler)
app.Get("/logs", logsHandler(broker))
// http://localhost:8080/logs to see the logs at real-time.
app.Listen(":8080")
}
func indexHandler(ctx iris.Context) {
ctx.HTML("<h1>Index</h1>")
}
func profileHandler(ctx iris.Context) {
username := ctx.Params().Get("username")
ctx.HTML("Hello, <strong>%s</strong>!", username)
}
func readBodyHandler(ctx iris.Context) {
var request interface{}
if err := ctx.ReadBody(&request); err != nil {
ctx.StopWithPlainError(iris.StatusBadRequest, err)
return
}
ctx.JSON(iris.Map{"message": "OK", "data": request})
}
func logsHandler(b *accesslog.Broker) iris.Handler {
return func(ctx iris.Context) {
accesslog.Skip(ctx) // skip logging this handler, optionally.
logs := b.NewListener() // <- IMPORTANT
ctx.Header("Transfer-Encoding", "chunked")
notifyClose := ctx.Request().Context().Done()
for {
select {
case <-notifyClose:
b.CloseListener(logs) // <- IMPORTANT
err := ctx.Request().Context().Err()
ctx.Application().Logger().Infof("Listener closed [%v], loop end.", err)
return
case log := <-logs: // <- IMPORTANT
ctx.JSON(log, iris.JSON{Indent: " ", UnescapeHTML: true})
ctx.ResponseWriter().Flush()
}
}
}
}

View File

@ -11,6 +11,7 @@ func main() {
if err != nil {
panic(err)
}
e.Reload(true)
app := iris.New()
app.RegisterView(e)

View File

@ -11,6 +11,7 @@ func main() {
if err != nil {
panic(err)
}
e.Reload(true)
app := iris.New()
app.RegisterView(e)

View File

@ -9,6 +9,7 @@ func main() {
return "Hello, " + name + "!"
},
})
e.Reload(true)
app := iris.New()
app.RegisterView(e)

View File

@ -17,6 +17,7 @@ func main() {
if err != nil {
panic(err)
}
e.Reload(true)
app := iris.New()
app.RegisterView(e)

View File

@ -19,6 +19,7 @@ func main() {
return "Hello, " + name + "!"
},
})
e.Reload(true)
app := iris.New()
app.RegisterView(e)

View File

@ -59,7 +59,7 @@ func (w *ResponseRecorder) Naive() http.ResponseWriter {
// prepares itself, the response recorder, to record and send response to the client.
func (w *ResponseRecorder) BeginRecord(underline ResponseWriter) {
w.ResponseWriter = underline
w.headers = underline.Header()
w.headers = underline.Header().Clone()
w.result = nil
w.ResetBody()
}
@ -67,8 +67,8 @@ func (w *ResponseRecorder) BeginRecord(underline ResponseWriter) {
// EndResponse is auto-called when the whole client's request is done,
// releases the response recorder and its underline ResponseWriter.
func (w *ResponseRecorder) EndResponse() {
releaseResponseRecorder(w)
w.ResponseWriter.EndResponse()
releaseResponseRecorder(w)
}
// Write Adds the contents to the body reply, it writes the contents temporarily
@ -99,6 +99,12 @@ func (w *ResponseRecorder) Write(contents []byte) (int, error) {
return len(contents), nil
}
// Header returns the temporary header map that, on flush response,
// will be sent by the underline's ResponseWriter's WriteHeader method.
func (w *ResponseRecorder) Header() http.Header {
return w.headers
}
// SetBody overrides the body and sets it to a slice of bytes value.
func (w *ResponseRecorder) SetBody(b []byte) {
w.chunks = b
@ -122,7 +128,7 @@ func (w *ResponseRecorder) ResetBody() {
// ResetHeaders sets the headers to the underline's response writer's headers, may empty.
func (w *ResponseRecorder) ResetHeaders() {
w.headers = w.ResponseWriter.Header()
w.headers = w.ResponseWriter.Header().Clone()
}
// ClearHeaders clears all headers, both temp and underline's response writer.
@ -130,7 +136,7 @@ func (w *ResponseRecorder) ClearHeaders() {
w.headers = http.Header{}
h := w.ResponseWriter.Header()
for k := range h {
h[k] = nil
delete(h, k)
}
}
@ -151,12 +157,9 @@ func (w *ResponseRecorder) FlushResponse() {
// copy the headers to the underline response writer
if w.headers != nil {
h := w.ResponseWriter.Header()
for k, values := range w.headers {
h[k] = nil
for i := range values {
h.Add(k, values[i])
}
// note: we don't reset the current underline's headers.
for k, v := range w.headers {
h[k] = v
}
}
@ -184,8 +187,15 @@ func (w *ResponseRecorder) FlushResponse() {
// it copies the header, status code, headers and the beforeFlush finally returns a new ResponseRecorder
func (w *ResponseRecorder) Clone() ResponseWriter {
wc := &ResponseRecorder{}
wc.headers = w.headers
wc.chunks = w.chunks[0:]
// copy headers.
wc.headers = w.headers.Clone()
// copy body.
chunksCopy := make([]byte, len(w.chunks))
copy(chunksCopy, w.chunks)
wc.chunks = chunksCopy
if resW, ok := w.ResponseWriter.(*responseWriter); ok {
wc.ResponseWriter = &responseWriter{
ResponseWriter: resW.ResponseWriter,
@ -252,6 +262,23 @@ func (w *ResponseRecorder) CopyTo(res ResponseWriter) {
// Flush sends any buffered data to the client.
func (w *ResponseRecorder) Flush() {
// This fixes response recorder when chunked + Flush is used.
if w.headers.Get("Transfer-Encoding") == "chunked" {
if w.Written() == NoWritten {
if len(w.headers) > 0 {
h := w.ResponseWriter.Header()
// note: we don't reset the current underline's headers.
for k, v := range w.headers {
h[k] = v
}
}
}
if len(w.chunks) > 0 {
w.ResponseWriter.Write(w.chunks)
}
}
w.ResponseWriter.Flush()
w.ResetBody()
}
@ -308,9 +335,9 @@ func (w *ResponseRecorder) Result() *http.Response { // a modified copy of net/h
headers := w.headers.Clone()
for k, v := range w.ResponseWriter.Header() {
headers[k] = v
}
// for k, v := range w.ResponseWriter.Header() {
// headers[k] = v
// }
/*
dateFound := false
for k := range headers {

View File

@ -18,22 +18,35 @@ func init() {
context.SetHandlerName("iris/middleware/accesslog.*", "iris.accesslog")
}
const accessLogFieldsContextKey = "iris.accesslog.request.fields"
const (
fieldsContextKey = "iris.accesslog.request.fields"
skipLogContextKey = "iris.accesslog.request.skip"
)
// GetFields returns the accesslog fields for this request.
// Returns a store which the caller can use to
// set/get/remove custom log fields. Use its `Set` method.
func GetFields(ctx iris.Context) (fields *Fields) {
if v := ctx.Values().Get(accessLogFieldsContextKey); v != nil {
if v := ctx.Values().Get(fieldsContextKey); v != nil {
fields = v.(*Fields)
} else {
fields = new(Fields)
ctx.Values().Set(accessLogFieldsContextKey, fields)
ctx.Values().Set(fieldsContextKey, fields)
}
return
}
// Skip called when a specific route should be skipped from the logging process.
// It's an easy to use alternative for iris.NewConditionalHandler.
func Skip(ctx iris.Context) {
ctx.Values().Set(skipLogContextKey, struct{}{})
}
func shouldSkip(ctx iris.Context) bool {
return ctx.Values().Get(skipLogContextKey) != nil
}
type (
// Fields is a type alias for memstore.Store, used to set
@ -137,6 +150,7 @@ type AccessLog struct {
// order of registration so use a slice and
// take the field key from the extractor itself.
formatter Formatter
broker *Broker
}
// New returns a new AccessLog value with the default values.
@ -179,6 +193,28 @@ func File(path string) *AccessLog {
return New(f)
}
// Broker creates or returns the broker.
// Use its `NewListener` and `CloseListener`
// to listen and unlisten for incoming logs.
//
// Should be called before serve-time.
func (ac *AccessLog) Broker() *Broker {
ac.mu.Lock()
if ac.broker == nil {
ac.broker = newBroker()
// atomic.StoreUint32(&ac.brokerActive, 1)
}
ac.mu.Unlock()
return ac.broker
}
// func (ac *AccessLog) isBrokerActive() bool { // see `Print` method.
// return atomic.LoadUint32(&ac.brokerActive) > 0
// }
// ^ No need, we declare that the Broker should be called
// before serve-time. Let's respect our comment
// and don't try to make it safe for write and read concurrent access.
// Write writes to the log destination.
// It completes the io.Writer interface.
// Safe for concurrent use.
@ -294,6 +330,11 @@ func (ac *AccessLog) shouldReadResponseBody() bool {
// defer ac.Close()
// app.UseRouter(ac.Handler)
func (ac *AccessLog) Handler(ctx *context.Context) {
if shouldSkip(ctx) { // usage: another middleware before that one disables logging.
ctx.Next()
return
}
var (
startTime = time.Now()
// Store some values, as future handler chain
@ -314,13 +355,17 @@ func (ac *AccessLog) Handler(ctx *context.Context) {
// Set the fields context value so they can be modified
// on the following handlers chain. Same as `AddFields` but per-request.
// ctx.Values().Set(accessLogFieldsContextKey, new(Fields))
// ctx.Values().Set(fieldsContextKey, new(Fields))
// No need ^ The GetFields will set it if it's missing.
// So we initialize them whenever, and if, asked.
// Proceed to the handlers chain.
ctx.Next()
if shouldSkip(ctx) { // normal flow, we can get the context by executing the handler first.
return
}
latency := time.Since(startTime)
if ac.Async {
ctxCopy := ctx.Clone()
@ -435,7 +480,7 @@ func (ac *AccessLog) after(ctx *context.Context, lat time.Duration, method, path
// Print writes a log manually.
// The `Handler` method calls it.
func (ac *AccessLog) Print(ctx *context.Context, latency time.Duration, timeFormat string, code int, method, path, reqBody, respBody string, bytesReceived, bytesSent int, params *context.RequestParams, query []memstore.StringEntry, fields []memstore.Entry) error {
func (ac *AccessLog) Print(ctx *context.Context, latency time.Duration, timeFormat string, code int, method, path, reqBody, respBody string, bytesReceived, bytesSent int, params *context.RequestParams, query []memstore.StringEntry, fields []memstore.Entry) (err error) {
var now time.Time
if ac.Clock != nil {
@ -444,7 +489,7 @@ func (ac *AccessLog) Print(ctx *context.Context, latency time.Duration, timeForm
now = time.Now()
}
if f := ac.formatter; f != nil {
if hasFormatter, hasBroker := ac.formatter != nil, ac.broker != nil; hasFormatter || hasBroker {
log := &Log{
Logger: ac,
Now: now,
@ -464,12 +509,21 @@ func (ac *AccessLog) Print(ctx *context.Context, latency time.Duration, timeForm
Ctx: ctx, // ctx should only be used here, it may be nil on testing.
}
if err := f.Format(log); err != nil {
return err
var handled bool
if hasFormatter {
handled, err = ac.formatter.Format(log)
if err != nil {
return
}
}
// OK, it's handled, exit now.
return nil
if hasBroker { // after Format, it may want to customize the log's fields.
ac.broker.notify(log)
}
if handled {
return // OK, it's handled, exit now.
}
}
// url parameters, path parameters and custom fields separated by space,
@ -478,7 +532,7 @@ func (ac *AccessLog) Print(ctx *context.Context, latency time.Duration, timeForm
// the number of separators are the same, in order to be easier
// for 3rd-party programs to read the result log file.
_, err := fmt.Fprintf(ac, "%s|%s|%s|%s|%s|%d|%s|%s|%s|%s|\n",
_, err = fmt.Fprintf(ac, "%s|%s|%s|%s|%s|%d|%s|%s|%s|%s|\n",
now.Format(timeFormat),
latency,
method,
@ -491,5 +545,5 @@ func (ac *AccessLog) Print(ctx *context.Context, latency time.Duration, timeForm
respBody,
)
return err
return
}

View File

@ -0,0 +1,90 @@
package accesslog
// LogChan describes the log channel.
// See `Broker` for details.
type LogChan chan *Log
// A Broker holds the active listeners,
// incoming logs on its Notifier channel
// and broadcast event data to all registered listeners.
//
// Exports the `NewListener` and `CloseListener` methods.
type Broker struct {
// Logs are pushed to this channel
// by the main events-gathering `run` routine.
Notifier LogChan
// NewListener action.
newListeners chan LogChan
// CloseListener action.
closingListeners chan chan *Log
// listeners store.
listeners map[LogChan]bool
}
// newBroker returns a new broker factory.
func newBroker() *Broker {
b := &Broker{
Notifier: make(LogChan, 1),
newListeners: make(chan LogChan),
closingListeners: make(chan chan *Log),
listeners: make(map[LogChan]bool),
}
// Listens and Broadcasts events.
go b.run()
return b
}
// run listens on different channels and act accordingly.
func (b *Broker) run() {
for {
select {
case s := <-b.newListeners:
// A new channel has started to listen.
b.listeners[s] = true
case s := <-b.closingListeners:
// A listener has dettached.
// Stop sending them the logs.
delete(b.listeners, s)
case log := <-b.Notifier:
// A new log sent by the logger.
// Send it to all active listeners.
for clientMessageChan := range b.listeners {
clientMessageChan <- log
}
}
}
}
// notify sends the "log" to all active listeners.
func (b *Broker) notify(log *Log) {
b.Notifier <- log
}
// NewListener returns a new log channel listener.
// The caller SHALL NOT use this to write logs.
func (b *Broker) NewListener() LogChan {
// Each listener registers its own message channel with the Broker's connections registry.
logs := make(LogChan)
// Signal the broker that we have a new listener.
b.newListeners <- logs
return logs
}
// CloseListener removes the "ln" listener from the active listeners.
func (b *Broker) CloseListener(ln LogChan) {
b.closingListeners <- ln
}
// As we cant export a read-only and pass it as closing client
// we will return a read-write channel on NewListener and add a note that the user
// should NOT send data back to the channel, its use is read-only.
// func (b *Broker) CloseListener(ln <-chan *Log) {
// b.closingListeners <- ln
// }

View File

@ -34,9 +34,9 @@ type Log struct {
// The response status code.
Code int `json:"code"`
// Sorted URL Query arguments.
Query []memstore.StringEntry `json:"query"`
Query []memstore.StringEntry `json:"query,omitempty"`
// Dynamic path parameters.
PathParams []memstore.Entry `json:"params"`
PathParams []memstore.Entry `json:"params,omitempty"`
// Fields any data information useful to represent this Log.
Fields []memstore.Entry `json:"fields,omitempty"`
@ -127,15 +127,17 @@ func parseRequestValues(code int, pathParams *context.RequestParams, query []mem
// Formatter is responsible to print a Log to the accesslog's writer.
type Formatter interface {
// SetOutput should inject the accesslog's direct output,
// if this "dest" is used then the Formatter
// should manually control its concurrent use.
SetOutput(dest io.Writer)
// Format should print the Log.
// Returns nil error on handle successfully,
// otherwise the log will be printed using the default formatter
// and the error will be printed to the Iris Application's error log level.
Format(log *Log) error
// SetWriter should inject the accesslog's direct output,
// if this "dest" is used then the Formatter
// should manually control its concurrent use.
SetOutput(dest io.Writer)
// Should return true if this handled the logging, otherwise false to
// continue with the default print format.
Format(log *Log) (bool, error)
}
var (
@ -169,12 +171,12 @@ func (f *JSON) SetOutput(dest io.Writer) {
// Format prints the logs in JSON format.
// Writes to the destination directly,
// locks on each Format call.
func (f *JSON) Format(log *Log) error {
func (f *JSON) Format(log *Log) (bool, error) {
f.mu.Lock()
err := f.enc.Encode(log)
f.mu.Unlock()
return err
return true, err
}
// Template is a Formatter.
@ -213,7 +215,7 @@ func (f *Template) SetOutput(dest io.Writer) {
const defaultTmplText = "{{.Now.Format .TimeFormat}}|{{.Latency}}|{{.Method}}|{{.Path}}|{{.RequestValuesLine}}|{{.Code}}|{{.BytesReceivedLine}}|{{.BytesSentLine}}|{{.Request}}|{{.Response}}|\n"
// Format prints the logs in text/template format.
func (f *Template) Format(log *Log) error {
func (f *Template) Format(log *Log) (bool, error) {
var err error
// A template may be executed safely in parallel, although if parallel
@ -226,5 +228,5 @@ func (f *Template) Format(log *Log) error {
}
f.mu.Unlock()
return err
return true, err
}

View File

@ -110,6 +110,11 @@ type noOpFS struct{}
func (fs noOpFS) Open(name string) (http.File, error) { return nil, nil }
func isNoOpFS(fs http.FileSystem) bool {
_, ok := fs.(noOpFS)
return ok
}
// fixes: "invalid character in file path"
// on amber engine (it uses the virtual fs directly
// and it uses filepath instead of the path package...).

View File

@ -34,9 +34,16 @@ type HTMLEngine struct {
//
middleware func(name string, contents []byte) (string, error)
Templates *template.Template
customCache []customTmp // required to load them again if reload is true.
//
}
type customTmp struct {
name string
contents []byte
funcs template.FuncMap
}
var (
_ Engine = (*HTMLEngine)(nil)
_ EngineFuncer = (*HTMLEngine)(nil)
@ -215,6 +222,17 @@ func (s *HTMLEngine) Funcs(funcMap template.FuncMap) *HTMLEngine {
//
// Returns an error if something bad happens, caller is responsible to handle that.
func (s *HTMLEngine) Load() error {
s.rmu.Lock()
defer s.rmu.Unlock()
return s.load()
}
func (s *HTMLEngine) load() error {
if err := s.reloadCustomTemplates(); err != nil {
return err
}
return walk(s.fs, s.rootDir, func(path string, info os.FileInfo, err error) error {
if info == nil || info.IsDir() {
return nil
@ -231,15 +249,35 @@ func (s *HTMLEngine) Load() error {
return fmt.Errorf("%s: %w", path, err)
}
return s.ParseTemplate(path, buf, nil)
return s.parseTemplate(path, buf, nil)
})
}
func (s *HTMLEngine) reloadCustomTemplates() error {
for _, tmpl := range s.customCache {
if err := s.parseTemplate(tmpl.name, tmpl.contents, tmpl.funcs); err != nil {
return err
}
}
return nil
}
// ParseTemplate adds a custom template to the root template.
func (s *HTMLEngine) ParseTemplate(name string, contents []byte, funcs template.FuncMap) (err error) {
s.rmu.Lock()
defer s.rmu.Unlock()
s.customCache = append(s.customCache, customTmp{
name: name,
contents: contents,
funcs: funcs,
})
return s.parseTemplate(name, contents, funcs)
}
func (s *HTMLEngine) parseTemplate(name string, contents []byte, funcs template.FuncMap) (err error) {
s.initRootTmpl()
name = strings.TrimPrefix(name, "/")
@ -270,6 +308,7 @@ func (s *HTMLEngine) initRootTmpl() { // protected by the caller.
// the root template should be the same,
// no matter how many reloads as the
// following unexported fields cannot be modified.
// However, on reload they should be cleared otherwise we get an error.
s.Templates = template.New(s.rootDir)
s.Templates.Delims(s.left, s.right)
}
@ -349,7 +388,14 @@ func (s *HTMLEngine) runtimeFuncsFor(t *template.Template, name string, binding
func (s *HTMLEngine) ExecuteWriter(w io.Writer, name string, layout string, bindingData interface{}) error {
// re-parse the templates if reload is enabled.
if s.reload {
if err := s.Load(); err != nil {
s.rmu.Lock()
defer s.rmu.Unlock()
s.Templates = nil
// we lose the templates parsed manually, so store them when it's called
// in order for load to take care of them too.
if err := s.load(); err != nil {
return err
}
}

View File

@ -178,9 +178,6 @@ func (s *JetEngine) AddVar(key string, value interface{}) {
// not safe concurrent access across clients, use it only on development state.
func (s *JetEngine) Reload(developmentMode bool) *JetEngine {
s.developmentMode = developmentMode
if s.Set != nil {
s.Set.SetDevelopmentMode(developmentMode)
}
return s
}
@ -215,7 +212,6 @@ func (l *jetLoader) Exists(name string) (string, bool) {
// Load should load the templates from a physical system directory or by an embedded one (assets/go-bindata).
func (s *JetEngine) Load() error {
s.initSet()
// Note that, unlike the rest of template engines implementations,
// we don't call the Set.GetTemplate to parse the templates,
// we let it to the jet template parser itself which does that at serve-time and caches each template by itself.
@ -236,7 +232,12 @@ func (s *JetEngine) initSet() {
s.mu.Lock()
if s.Set == nil {
s.Set = jet.NewHTMLSetLoader(s.loader)
s.Set.SetDevelopmentMode(s.developmentMode)
if s.developmentMode && !isNoOpFS(s.fs) {
// this check is made to avoid jet's fs lookup on noOp fs (nil passed by the developer).
// This can be produced when nil fs passed
// and only `ParseTemplate` is used.
s.Set.SetDevelopmentMode(true)
}
if s.vars != nil {
for key, value := range s.vars {