Skip to content

Commit dc2460c

Browse files
authored
Merge pull request #5 from corhere/version-two
Locker v2
2 parents 281af2d + eee143e commit dc2460c

File tree

6 files changed

+124
-158
lines changed

6 files changed

+124
-158
lines changed

.github/workflows/test.yml

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,24 +2,27 @@ name: Run go test
22

33
on:
44
push:
5-
branches: [ $default-branch ]
5+
branches: [ 'main' ]
66
pull_request:
7-
branch: [ $default-branch ]
87

98
jobs:
109
test:
1110
name: Test
11+
permissions:
12+
contents: read
1213
strategy:
1314
matrix:
14-
go-version: [1.14.x, 1.15.x]
15+
go-version:
16+
- stable
17+
- oldstable
18+
- 1.19.x
1519
os: [ubuntu-latest]
1620
runs-on: ${{ matrix.os }}
1721
steps:
22+
- uses: actions/checkout@v4
1823
- name: Install Go
19-
uses: actions/setup-go@v2
24+
uses: actions/setup-go@v5
2025
with:
2126
go-version: ${{ matrix.go-version }}
22-
- name: Checkout code
23-
uses: actions/checkout@v2
2427
- name: Test
2528
run: go test -v ./...

README.md

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,7 @@ locker provides a mechanism for creating finer-grained locking to help
55
free up more global locks to handle other tasks.
66

77
The implementation looks close to a sync.Mutex, however, the user must provide a
8-
reference to use to refer to the underlying lock when locking and unlocking,
9-
and unlock may generate an error.
8+
reference to use to refer to the underlying lock when locking and unlocking.
109

1110
If a lock with a given name does not exist when `Lock` is called, one is
1211
created.
@@ -23,11 +22,11 @@ import (
2322
"sync"
2423
"time"
2524

26-
"github.com/moby/locker"
25+
"github.com/moby/locker/v2"
2726
)
2827

2928
type important struct {
30-
locks *locker.Locker
29+
locks locker.MutexMap[string]
3130
data map[string]interface{}
3231
mu sync.Mutex
3332
}

go.mod

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
module github.com/moby/locker
1+
module github.com/moby/locker/v2
22

3-
go 1.13
3+
go 1.19

locker.go

Lines changed: 0 additions & 112 deletions
This file was deleted.

mutexmap.go

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
/*
2+
Package locker provides a mechanism for creating finer-grained locking to help
3+
free up more global locks to handle other tasks.
4+
*/
5+
package locker
6+
7+
import (
8+
"sync"
9+
"sync/atomic"
10+
)
11+
12+
// MutexMap is a more convenient map[T]sync.Mutex. It automatically makes and
13+
// deletes mutexes as needed. Unlocked mutexes consume no memory.
14+
//
15+
// The zero value is a valid MutexMap.
16+
type MutexMap[T comparable] struct {
17+
mu sync.Mutex
18+
locks map[T]*lockCtr
19+
}
20+
21+
// lockCtr is used by Locker to represent a lock with a given key.
22+
type lockCtr struct {
23+
sync.Mutex
24+
waiters atomic.Int32 // Number of callers waiting to acquire the lock
25+
}
26+
27+
var lockCtrPool = sync.Pool{New: func() any { return new(lockCtr) }}
28+
29+
// Lock locks the mutex identified by key.
30+
func (l *MutexMap[T]) Lock(key T) {
31+
l.mu.Lock()
32+
if l.locks == nil {
33+
l.locks = make(map[T]*lockCtr)
34+
}
35+
36+
nameLock, exists := l.locks[key]
37+
if !exists {
38+
nameLock = lockCtrPool.Get().(*lockCtr)
39+
l.locks[key] = nameLock
40+
}
41+
42+
// Increment the nameLock waiters while inside the main mutex.
43+
// This makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently.
44+
nameLock.waiters.Add(1)
45+
l.mu.Unlock()
46+
47+
// Lock the nameLock outside the main mutex so we don't block other operations.
48+
// Once locked then we can decrement the number of waiters for this lock.
49+
nameLock.Lock()
50+
nameLock.waiters.Add(-1)
51+
}
52+
53+
// Unlock unlocks the mutex identified by key.
54+
//
55+
// It is a run-time error if the mutex is not locked on entry to Unlock.
56+
func (l *MutexMap[T]) Unlock(key T) {
57+
l.mu.Lock()
58+
defer l.mu.Unlock()
59+
nameLock, exists := l.locks[key]
60+
if !exists {
61+
// Generate an un-recover()-able error without reaching into runtime internals.
62+
(&sync.Mutex{}).Unlock()
63+
}
64+
65+
if nameLock.waiters.Load() <= 0 {
66+
delete(l.locks, key)
67+
defer lockCtrPool.Put(nameLock)
68+
}
69+
nameLock.Unlock()
70+
}
71+
72+
type nameLocker[T comparable] struct {
73+
l *MutexMap[T]
74+
key T
75+
}
76+
77+
// Locker returns a [sync.Locker] interface that implements
78+
// the [sync.Locker.Lock] and [sync.Locker.Unlock] methods
79+
// by calling l.Lock(key) and l.Unlock(key).
80+
func (l *MutexMap[T]) Locker(key T) sync.Locker {
81+
return nameLocker[T]{l: l, key: key}
82+
}
83+
84+
func (n nameLocker[T]) Lock() {
85+
n.l.Lock(n.key)
86+
}
87+
func (n nameLocker[T]) Unlock() {
88+
n.l.Unlock(n.key)
89+
}

locker_test.go renamed to mutexmap_test.go

Lines changed: 21 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -8,27 +8,13 @@ import (
88
"time"
99
)
1010

11-
func TestLockCounter(t *testing.T) {
12-
l := &lockCtr{}
13-
l.inc()
14-
15-
if l.waiters != 1 {
16-
t.Fatal("counter inc failed")
17-
}
18-
19-
l.dec()
20-
if l.waiters != 0 {
21-
t.Fatal("counter dec failed")
22-
}
23-
}
24-
25-
func TestLockerLock(t *testing.T) {
26-
l := New()
11+
func TestMutexMap_Lock(t *testing.T) {
12+
var l MutexMap[string]
2713
l.Lock("test")
2814
ctr := l.locks["test"]
2915

30-
if ctr.count() != 0 {
31-
t.Fatalf("expected waiters to be 0, got :%d", ctr.waiters)
16+
if w := ctr.waiters.Load(); w != 0 {
17+
t.Fatalf("expected waiters to be 0, got %d", w)
3218
}
3319

3420
chDone := make(chan struct{})
@@ -40,7 +26,7 @@ func TestLockerLock(t *testing.T) {
4026
chWaiting := make(chan struct{})
4127
go func() {
4228
for range time.Tick(1 * time.Millisecond) {
43-
if ctr.count() == 1 {
29+
if ctr.waiters.Load() == 1 {
4430
close(chWaiting)
4531
break
4632
}
@@ -59,23 +45,21 @@ func TestLockerLock(t *testing.T) {
5945
default:
6046
}
6147

62-
if err := l.Unlock("test"); err != nil {
63-
t.Fatal(err)
64-
}
48+
l.Unlock("test")
6549

6650
select {
6751
case <-chDone:
6852
case <-time.After(3 * time.Second):
6953
t.Fatalf("lock should have completed")
7054
}
7155

72-
if ctr.count() != 0 {
73-
t.Fatalf("expected waiters to be 0, got: %d", ctr.count())
56+
if w := ctr.waiters.Load(); w != 0 {
57+
t.Fatalf("expected waiters to be 0, got %d", w)
7458
}
7559
}
7660

77-
func TestLockerUnlock(t *testing.T) {
78-
l := New()
61+
func TestMutexMap_Unlock(t *testing.T) {
62+
var l MutexMap[string]
7963

8064
l.Lock("test")
8165
l.Unlock("test")
@@ -93,8 +77,8 @@ func TestLockerUnlock(t *testing.T) {
9377
}
9478
}
9579

96-
func TestLockerConcurrency(t *testing.T) {
97-
l := New()
80+
func TestMutexMap_Concurrency(t *testing.T) {
81+
var l MutexMap[string]
9882

9983
var wg sync.WaitGroup
10084
for i := 0; i <= 10000; i++ {
@@ -125,16 +109,19 @@ func TestLockerConcurrency(t *testing.T) {
125109
}
126110
}
127111

128-
func BenchmarkLocker(b *testing.B) {
129-
l := New()
112+
func BenchmarkMutexMap(b *testing.B) {
113+
var l MutexMap[string]
114+
b.ReportAllocs()
130115
for i := 0; i < b.N; i++ {
131116
l.Lock("test")
117+
l.Lock(strconv.Itoa(i))
118+
l.Unlock(strconv.Itoa(i))
132119
l.Unlock("test")
133120
}
134121
}
135122

136-
func BenchmarkLockerParallel(b *testing.B) {
137-
l := New()
123+
func BenchmarkMutexMap_Parallel(b *testing.B) {
124+
var l MutexMap[string]
138125
b.SetParallelism(128)
139126
b.RunParallel(func(pb *testing.PB) {
140127
for pb.Next() {
@@ -144,8 +131,8 @@ func BenchmarkLockerParallel(b *testing.B) {
144131
})
145132
}
146133

147-
func BenchmarkLockerMoreKeys(b *testing.B) {
148-
l := New()
134+
func BenchmarkMutexMap_MoreKeys(b *testing.B) {
135+
var l MutexMap[string]
149136
var keys []string
150137
for i := 0; i < 64; i++ {
151138
keys = append(keys, strconv.Itoa(i))

0 commit comments

Comments
 (0)