aboutsummaryrefslogtreecommitdiff
path: root/internal/fusefrontend/write_lock.go
diff options
context:
space:
mode:
authorJakob Unterwurzacher2016-10-25 23:57:30 +0200
committerJakob Unterwurzacher2016-10-28 21:17:53 +0200
commita08d55f42d5b11e265a8617bee16babceebfd026 (patch)
tree7dd8b2717e871183b0067ad1f565419f57d34ea9 /internal/fusefrontend/write_lock.go
parent9b7135224bc3596ee3bceeca83cca3a687b0e9c7 (diff)
fusefronted: optimize NFS streaming writes by saving one Stat()
Stat() calls are expensive on NFS as they need a full network round-trip. We detect when a write immediately follows the last one and skip the Stat in this case because the write cannot create a file hole. On my (slow) NAS, this takes the write speed from 24MB/s to 41MB/s.
Diffstat (limited to 'internal/fusefrontend/write_lock.go')
-rw-r--r--internal/fusefrontend/write_lock.go8
1 files changed, 8 insertions, 0 deletions
diff --git a/internal/fusefrontend/write_lock.go b/internal/fusefrontend/write_lock.go
index 2f8ea4e..7394994 100644
--- a/internal/fusefrontend/write_lock.go
+++ b/internal/fusefrontend/write_lock.go
@@ -2,6 +2,7 @@ package fusefrontend
import (
"sync"
+ "sync/atomic"
)
func init() {
@@ -20,6 +21,12 @@ var wlock wlockMap
// 2) lock ... unlock ...
// 3) unregister
type wlockMap struct {
+ // Counts lock() calls. As every operation that modifies a file should
+ // call it, this effectively serves as a write-operation counter.
+ // The variable is accessed without holding any locks so atomic operations
+ // must be used. It must be the first element of the struct to guarantee
+ // 64-bit alignment.
+ opCount uint64
// Protects map access
sync.Mutex
inodeLocks map[uint64]*refCntMutex
@@ -62,6 +69,7 @@ func (w *wlockMap) unregister(ino uint64) {
// lock retrieves the entry for "ino" and locks it.
func (w *wlockMap) lock(ino uint64) {
+ atomic.AddUint64(&w.opCount, 1)
w.Lock()
r := w.inodeLocks[ino]
w.Unlock()