re-add the mutex on sessions.LifeTime

This commit is contained in:
Gerasimos (Makis) Maropoulos 2020-08-12 07:48:45 +03:00
parent 5d7198ca7b
commit 9ed566b076
No known key found for this signature in database
GPG Key ID: 5DBE766BD26A54E7
4 changed files with 28 additions and 9 deletions

View File

@ -13,7 +13,6 @@ import (
)
func TestNoCache(t *testing.T) {
t.Parallel()
app := iris.New()
app.Get("/", cache.NoCache, func(ctx iris.Context) {
ctx.WriteString("no_cache")
@ -30,7 +29,6 @@ func TestNoCache(t *testing.T) {
}
func TestStaticCache(t *testing.T) {
t.Parallel()
// test change the time format, which is not recommended but can be done.
app := iris.New().Configure(iris.WithTimeFormat("02 Jan 2006 15:04:05 GMT"))
@ -52,7 +50,7 @@ func TestStaticCache(t *testing.T) {
}
func TestCache304(t *testing.T) {
t.Parallel()
// t.Parallel()
app := iris.New()
expiresEvery := 4 * time.Second
@ -78,7 +76,7 @@ func TestCache304(t *testing.T) {
}
func TestETag(t *testing.T) {
t.Parallel()
// t.Parallel()
app := iris.New()
n := "_"

11
cache/cache_test.go vendored
View File

@ -145,10 +145,13 @@ func TestCache(t *testing.T) {
}
}
func TestCacheHandlerParallel(t *testing.T) {
t.Parallel()
TestCache(t)
}
// This works but we have issue on golog.SetLevel and get golog.Level on httptest.New
// when tests are running in parallel and the loggers are used.
// // TODO: Fix it on golog repository or here, we'll see.
// func TestCacheHandlerParallel(t *testing.T) {
// t.Parallel()
// TestCache(t)
// }
func TestCacheValidator(t *testing.T) {
app := iris.New()

View File

@ -10,6 +10,7 @@ import (
"net"
"net/http"
"strings"
"sync"
"testing"
"github.com/iris-contrib/httpexpect/v2"
@ -51,6 +52,7 @@ func newTester(t *testing.T, baseURL string, handler http.Handler) *httpexpect.E
func testSupervisor(t *testing.T, creator func(*http.Server, []func(TaskHost)) *Supervisor) {
loggerOutput := &bytes.Buffer{}
logger := log.New(loggerOutput, "", 0)
mu := new(sync.RWMutex)
const (
expectedHelloMessage = "Hello\n"
)
@ -78,7 +80,9 @@ func testSupervisor(t *testing.T, creator func(*http.Server, []func(TaskHost)) *
}
helloMe := func(_ TaskHost) {
mu.Lock()
logger.Print(expectedHelloMessage)
mu.Unlock()
}
host := creator(srv, []func(TaskHost){helloMe})
@ -95,7 +99,10 @@ func testSupervisor(t *testing.T, creator func(*http.Server, []func(TaskHost)) *
// but it's "safe" here.
// testing Task (recorded) message:
if got := loggerOutput.String(); expectedHelloMessage != got {
mu.RLock()
got := loggerOutput.String()
mu.RUnlock()
if expectedHelloMessage != got {
t.Fatalf("expected hello Task's message to be '%s' but got '%s'", expectedHelloMessage, got)
}
}

View File

@ -1,6 +1,7 @@
package sessions
import (
"sync"
"time"
"github.com/kataras/iris/v12/context"
@ -15,6 +16,8 @@ type LifeTime struct {
// (this should be a bug(go1.9-rc1) or not. We don't care atm)
time.Time
timer *time.Timer
mu sync.RWMutex
}
// Begin will begin the life based on the time.Now().Add(d).
@ -24,8 +27,10 @@ func (lt *LifeTime) Begin(d time.Duration, onExpire func()) {
return
}
lt.mu.Lock()
lt.Time = time.Now().Add(d)
lt.timer = time.AfterFunc(d, onExpire)
lt.mu.Unlock()
}
// Revive will continue the life based on the stored Time.
@ -38,24 +43,30 @@ func (lt *LifeTime) Revive(onExpire func()) {
now := time.Now()
if lt.Time.After(now) {
d := lt.Time.Sub(now)
lt.mu.Lock()
lt.timer = time.AfterFunc(d, onExpire)
lt.mu.Unlock()
}
}
// Shift resets the lifetime based on "d".
func (lt *LifeTime) Shift(d time.Duration) {
lt.mu.Lock()
if d > 0 && lt.timer != nil {
lt.Time = time.Now().Add(d)
lt.timer.Reset(d)
}
lt.mu.Unlock()
}
// ExpireNow reduce the lifetime completely.
func (lt *LifeTime) ExpireNow() {
lt.mu.Lock()
lt.Time = context.CookieExpireDelete
if lt.timer != nil {
lt.timer.Stop()
}
lt.mu.Unlock()
}
// HasExpired reports whether "lt" represents is expired.