aboutsummaryrefslogtreecommitdiff
path: root/internal/fusefrontend
diff options
context:
space:
mode:
authorJakob Unterwurzacher2016-10-25 23:57:30 +0200
committerJakob Unterwurzacher2016-10-28 21:17:53 +0200
commita08d55f42d5b11e265a8617bee16babceebfd026 (patch)
tree7dd8b2717e871183b0067ad1f565419f57d34ea9 /internal/fusefrontend
parent9b7135224bc3596ee3bceeca83cca3a687b0e9c7 (diff)
fusefronted: optimize NFS streaming writes by saving one Stat()
Stat() calls are expensive on NFS as they need a full network round-trip. We detect when a write immediately follows the last one and skip the Stat in this case because the write cannot create a file hole. On my (slow) NAS, this takes the write speed from 24MB/s to 41MB/s.
Diffstat (limited to 'internal/fusefrontend')
-rw-r--r--internal/fusefrontend/file.go33
-rw-r--r--internal/fusefrontend/write_lock.go8
2 files changed, 37 insertions, 4 deletions
diff --git a/internal/fusefrontend/file.go b/internal/fusefrontend/file.go
index ee48930..9991f3e 100644
--- a/internal/fusefrontend/file.go
+++ b/internal/fusefrontend/file.go
@@ -9,6 +9,7 @@ import (
"log"
"os"
"sync"
+ "sync/atomic"
"syscall"
"time"
@@ -43,6 +44,11 @@ type file struct {
header *contentenc.FileHeader
// go-fuse nodefs.loopbackFile
loopbackFile nodefs.File
+ // Store what the last byte was written
+ lastWrittenOffset int64
+ // The opCount is used to judge whether "lastWrittenOffset" is still
+ // guaranteed to be correct.
+ lastOpCount uint64
}
// NewFile returns a new go-fuse File instance.
@@ -282,6 +288,16 @@ func (f *file) doWrite(data []byte, off int64) (uint32, fuse.Status) {
return written, status
}
+// isConsecutiveWrite returns true if the current write
+// directly (in time and space) follows the last write.
+// This is an optimisation for streaming writes on NFS where a
+// Stat() call is very expensive.
+// The caller must "wlock.lock(f.ino)" otherwise this check would be racy.
+func (f *file) isConsecutiveWrite(off int64) bool {
+ opCount := atomic.LoadUint64(&wlock.opCount)
+ return opCount == f.lastOpCount+1 && off == f.lastWrittenOffset+1
+}
+
// Write - FUSE call
//
// If the write creates a hole, pads the file to the next block boundary.
@@ -299,11 +315,20 @@ func (f *file) Write(data []byte, off int64) (uint32, fuse.Status) {
defer wlock.unlock(f.ino)
tlog.Debug.Printf("ino%d: FUSE Write: offset=%d length=%d", f.ino, off, len(data))
// If the write creates a file hole, we have to zero-pad the last block.
- status := f.writePadHole(off)
- if !status.Ok() {
- return 0, status
+ // But if the write directly follows an earlier write, it cannot create a
+ // hole, and we can save one Stat() call.
+ if !f.isConsecutiveWrite(off) {
+ status := f.writePadHole(off)
+ if !status.Ok() {
+ return 0, status
+ }
+ }
+ n, status := f.doWrite(data, off)
+ if status.Ok() {
+ f.lastOpCount = atomic.LoadUint64(&wlock.opCount)
+ f.lastWrittenOffset = off + int64(len(data)) - 1
}
- return f.doWrite(data, off)
+ return n, status
}
// Release - FUSE call, close file
diff --git a/internal/fusefrontend/write_lock.go b/internal/fusefrontend/write_lock.go
index 2f8ea4e..7394994 100644
--- a/internal/fusefrontend/write_lock.go
+++ b/internal/fusefrontend/write_lock.go
@@ -2,6 +2,7 @@ package fusefrontend
import (
"sync"
+ "sync/atomic"
)
func init() {
@@ -20,6 +21,12 @@ var wlock wlockMap
// 2) lock ... unlock ...
// 3) unregister
type wlockMap struct {
+ // Counts lock() calls. As every operation that modifies a file should
+ // call it, this effectively serves as a write-operation counter.
+ // The variable is accessed without holding any locks so atomic operations
+ // must be used. It must be the first element of the struct to guarantee
+ // 64-bit alignment.
+ opCount uint64
// Protects map access
sync.Mutex
inodeLocks map[uint64]*refCntMutex
@@ -62,6 +69,7 @@ func (w *wlockMap) unregister(ino uint64) {
// lock retrieves the entry for "ino" and locks it.
func (w *wlockMap) lock(ino uint64) {
+ atomic.AddUint64(&w.opCount, 1)
w.Lock()
r := w.inodeLocks[ino]
w.Unlock()