aboutsummaryrefslogtreecommitdiff
path: root/internal
diff options
context:
space:
mode:
authorJakob Unterwurzacher2016-11-17 20:32:19 +0100
committerJakob Unterwurzacher2016-11-17 20:32:19 +0100
commite04dc050126edcf7440ea5412bc74c6ad8b42e95 (patch)
tree56affe516cc83618152790582d0a8a6b2c870646 /internal
parent081015aa746cc2a37284ad545d56d5ceb78164b6 (diff)
fusefrontend: upgrade wlockMap to use device AND inode number
If there are multiple filesystems backing the gocryptfs filesystems inode numbers are not guaranteed to be unique.
Diffstat (limited to 'internal')
-rw-r--r--internal/fusefrontend/file.go41
-rw-r--r--internal/fusefrontend/file_allocate_truncate.go16
-rw-r--r--internal/fusefrontend/write_lock.go46
-rw-r--r--internal/fusefrontend_reverse/ino_map.go5
-rw-r--r--internal/fusefrontend_reverse/rfs.go6
5 files changed, 63 insertions, 51 deletions
diff --git a/internal/fusefrontend/file.go b/internal/fusefrontend/file.go
index de36a45..44146d6 100644
--- a/internal/fusefrontend/file.go
+++ b/internal/fusefrontend/file.go
@@ -38,8 +38,8 @@ type file struct {
writeOnly bool
// Content encryption helper
contentEnc *contentenc.ContentEnc
- // Inode number
- ino uint64
+ // Device and inode number uniquely identify the backing file
+ devIno DevInoStruct
// File header
header *contentenc.FileHeader
// go-fuse nodefs.loopbackFile
@@ -59,13 +59,14 @@ func NewFile(fd *os.File, writeOnly bool, contentEnc *contentenc.ContentEnc) (no
tlog.Warn.Printf("NewFile: Fstat on fd %d failed: %v\n", fd.Fd(), err)
return nil, fuse.ToStatus(err)
}
- wlock.register(st.Ino)
+ di := DevInoFromStat(&st)
+ wlock.register(di)
return &file{
fd: fd,
writeOnly: writeOnly,
contentEnc: contentEnc,
- ino: st.Ino,
+ devIno: di,
loopbackFile: nodefs.NewLoopbackFile(fd),
}, fuse.OK
}
@@ -109,7 +110,7 @@ func (f *file) createHeader() error {
// Prevent partially written (=corrupt) header by preallocating the space beforehand
err := syscallcompat.EnospcPrealloc(int(f.fd.Fd()), 0, contentenc.HeaderLen)
if err != nil {
- tlog.Warn.Printf("ino%d: createHeader: prealloc failed: %s\n", f.ino, err.Error())
+ tlog.Warn.Printf("ino%d: createHeader: prealloc failed: %s\n", f.devIno.ino, err.Error())
return err
}
@@ -169,7 +170,7 @@ func (f *file) doRead(off uint64, length uint64) ([]byte, fuse.Status) {
plaintext, err := f.contentEnc.DecryptBlocks(ciphertext, firstBlockNo, f.header.ID)
if err != nil {
curruptBlockNo := firstBlockNo + f.contentEnc.PlainOffToBlockNo(uint64(len(plaintext)))
- tlog.Warn.Printf("ino%d: doRead: corrupt block #%d: %v", f.ino, curruptBlockNo, err)
+ tlog.Warn.Printf("ino%d: doRead: corrupt block #%d: %v", f.devIno.ino, curruptBlockNo, err)
return nil, fuse.EIO
}
@@ -192,23 +193,23 @@ func (f *file) Read(buf []byte, off int64) (resultData fuse.ReadResult, code fus
f.fdLock.RLock()
defer f.fdLock.RUnlock()
- tlog.Debug.Printf("ino%d: FUSE Read: offset=%d length=%d", f.ino, len(buf), off)
+ tlog.Debug.Printf("ino%d: FUSE Read: offset=%d length=%d", f.devIno.ino, len(buf), off)
if f.writeOnly {
- tlog.Warn.Printf("ino%d: Tried to read from write-only file", f.ino)
+ tlog.Warn.Printf("ino%d: Tried to read from write-only file", f.devIno.ino)
return nil, fuse.EBADF
}
out, status := f.doRead(uint64(off), uint64(len(buf)))
if status == fuse.EIO {
- tlog.Warn.Printf("ino%d: Read: returning EIO, offset=%d, length=%d", f.ino, len(buf), off)
+ tlog.Warn.Printf("ino%d: Read: returning EIO, offset=%d, length=%d", f.devIno.ino, len(buf), off)
}
if status != fuse.OK {
return nil, status
}
- tlog.Debug.Printf("ino%d: Read: status %v, returning %d bytes", f.ino, status, len(out))
+ tlog.Debug.Printf("ino%d: Read: status %v, returning %d bytes", f.devIno.ino, status, len(out))
return fuse.ReadResultData(out), status
}
@@ -250,7 +251,7 @@ func (f *file) doWrite(data []byte, off int64) (uint32, fuse.Status) {
var oldData []byte
oldData, status = f.doRead(o, f.contentEnc.PlainBS())
if status != fuse.OK {
- tlog.Warn.Printf("ino%d fh%d: RMW read failed: %s", f.ino, f.intFd(), status.String())
+ tlog.Warn.Printf("ino%d fh%d: RMW read failed: %s", f.devIno.ino, f.intFd(), status.String())
return written, status
}
// Modify
@@ -262,12 +263,12 @@ func (f *file) doWrite(data []byte, off int64) (uint32, fuse.Status) {
blockOffset := b.BlockCipherOff()
blockData = f.contentEnc.EncryptBlock(blockData, b.BlockNo, f.header.ID)
tlog.Debug.Printf("ino%d: Writing %d bytes to block #%d",
- f.ino, uint64(len(blockData))-f.contentEnc.BlockOverhead(), b.BlockNo)
+ f.devIno.ino, uint64(len(blockData))-f.contentEnc.BlockOverhead(), b.BlockNo)
// Prevent partially written (=corrupt) blocks by preallocating the space beforehand
err := syscallcompat.EnospcPrealloc(int(f.fd.Fd()), int64(blockOffset), int64(len(blockData)))
if err != nil {
- tlog.Warn.Printf("ino%d fh%d: doWrite: prealloc failed: %s", f.ino, f.intFd(), err.Error())
+ tlog.Warn.Printf("ino%d fh%d: doWrite: prealloc failed: %s", f.devIno.ino, f.intFd(), err.Error())
status = fuse.ToStatus(err)
break
}
@@ -289,7 +290,7 @@ func (f *file) doWrite(data []byte, off int64) (uint32, fuse.Status) {
// directly (in time and space) follows the last write.
// This is an optimisation for streaming writes on NFS where a
// Stat() call is very expensive.
-// The caller must "wlock.lock(f.ino)" otherwise this check would be racy.
+// The caller must "wlock.lock(f.devIno.ino)" otherwise this check would be racy.
func (f *file) isConsecutiveWrite(off int64) bool {
opCount := atomic.LoadUint64(&wlock.opCount)
return opCount == f.lastOpCount+1 && off == f.lastWrittenOffset+1
@@ -305,12 +306,12 @@ func (f *file) Write(data []byte, off int64) (uint32, fuse.Status) {
// The file descriptor has been closed concurrently, which also means
// the wlock has been freed. Exit here so we don't crash trying to access
// it.
- tlog.Warn.Printf("ino%d fh%d: Write on released file", f.ino, f.intFd())
+ tlog.Warn.Printf("ino%d fh%d: Write on released file", f.devIno.ino, f.intFd())
return 0, fuse.EBADF
}
- wlock.lock(f.ino)
- defer wlock.unlock(f.ino)
- tlog.Debug.Printf("ino%d: FUSE Write: offset=%d length=%d", f.ino, off, len(data))
+ wlock.lock(f.devIno)
+ defer wlock.unlock(f.devIno)
+ tlog.Debug.Printf("ino%d: FUSE Write: offset=%d length=%d", f.devIno.ino, off, len(data))
// If the write creates a file hole, we have to zero-pad the last block.
// But if the write directly follows an earlier write, it cannot create a
// hole, and we can save one Stat() call.
@@ -332,13 +333,13 @@ func (f *file) Write(data []byte, off int64) (uint32, fuse.Status) {
func (f *file) Release() {
f.fdLock.Lock()
if f.released {
- log.Panicf("ino%d fh%d: double release", f.ino, f.intFd())
+ log.Panicf("ino%d fh%d: double release", f.devIno.ino, f.intFd())
}
f.fd.Close()
f.released = true
f.fdLock.Unlock()
- wlock.unregister(f.ino)
+ wlock.unregister(f.devIno)
}
// Flush - FUSE call
diff --git a/internal/fusefrontend/file_allocate_truncate.go b/internal/fusefrontend/file_allocate_truncate.go
index bedbba3..f2fca22 100644
--- a/internal/fusefrontend/file_allocate_truncate.go
+++ b/internal/fusefrontend/file_allocate_truncate.go
@@ -50,8 +50,8 @@ func (f *file) Allocate(off uint64, sz uint64, mode uint32) fuse.Status {
if f.released {
return fuse.EBADF
}
- wlock.lock(f.ino)
- defer wlock.unlock(f.ino)
+ wlock.lock(f.devIno)
+ defer wlock.unlock(f.devIno)
blocks := f.contentEnc.ExplodePlainRange(off, sz)
firstBlock := blocks[0]
@@ -97,17 +97,17 @@ func (f *file) Truncate(newSize uint64) fuse.Status {
defer f.fdLock.RUnlock()
if f.released {
// The file descriptor has been closed concurrently.
- tlog.Warn.Printf("ino%d fh%d: Truncate on released file", f.ino, f.intFd())
+ tlog.Warn.Printf("ino%d fh%d: Truncate on released file", f.devIno.ino, f.intFd())
return fuse.EBADF
}
- wlock.lock(f.ino)
- defer wlock.unlock(f.ino)
+ wlock.lock(f.devIno)
+ defer wlock.unlock(f.devIno)
var err error
// Common case first: Truncate to zero
if newSize == 0 {
err = syscall.Ftruncate(int(f.fd.Fd()), 0)
if err != nil {
- tlog.Warn.Printf("ino%d fh%d: Ftruncate(fd, 0) returned error: %v", f.ino, f.intFd(), err)
+ tlog.Warn.Printf("ino%d fh%d: Ftruncate(fd, 0) returned error: %v", f.devIno.ino, f.intFd(), err)
return fuse.ToStatus(err)
}
// Truncate to zero kills the file header
@@ -123,7 +123,7 @@ func (f *file) Truncate(newSize uint64) fuse.Status {
oldB := float32(oldSize) / float32(f.contentEnc.PlainBS())
newB := float32(newSize) / float32(f.contentEnc.PlainBS())
- tlog.Debug.Printf("ino%d: FUSE Truncate from %.2f to %.2f blocks (%d to %d bytes)", f.ino, oldB, newB, oldSize, newSize)
+ tlog.Debug.Printf("ino%d: FUSE Truncate from %.2f to %.2f blocks (%d to %d bytes)", f.devIno.ino, oldB, newB, oldSize, newSize)
// File size stays the same - nothing to do
if newSize == oldSize {
@@ -166,7 +166,7 @@ func (f *file) Truncate(newSize uint64) fuse.Status {
func (f *file) statPlainSize() (uint64, error) {
fi, err := f.fd.Stat()
if err != nil {
- tlog.Warn.Printf("ino%d fh%d: statPlainSize: %v", f.ino, f.intFd(), err)
+ tlog.Warn.Printf("ino%d fh%d: statPlainSize: %v", f.devIno.ino, f.intFd(), err)
return 0, err
}
cipherSz := uint64(fi.Size())
diff --git a/internal/fusefrontend/write_lock.go b/internal/fusefrontend/write_lock.go
index 3addfd6..71e911e 100644
--- a/internal/fusefrontend/write_lock.go
+++ b/internal/fusefrontend/write_lock.go
@@ -3,10 +3,26 @@ package fusefrontend
import (
"sync"
"sync/atomic"
+ "syscall"
)
+// DevInoStruct uniquely identifies a backing file through device number and
+// inode number.
+type DevInoStruct struct {
+ dev uint64
+ ino uint64
+}
+
+func DevInoFromStat(st *syscall.Stat_t) DevInoStruct {
+ // Explicit cast to uint64 to prevent build problems on 32-bit platforms
+ return DevInoStruct{
+ dev: uint64(st.Dev),
+ ino: uint64(st.Ino),
+ }
+}
+
func init() {
- wlock.inodeLocks = make(map[uint64]*refCntMutex)
+ wlock.inodeLocks = make(map[DevInoStruct]*refCntMutex)
}
// wlock - serializes write accesses to each file (identified by inode number)
@@ -29,7 +45,7 @@ type wlockMap struct {
opCount uint64
// Protects map access
sync.Mutex
- inodeLocks map[uint64]*refCntMutex
+ inodeLocks map[DevInoStruct]*refCntMutex
}
// refCntMutex - mutex with reference count
@@ -42,45 +58,45 @@ type refCntMutex struct {
// register creates an entry for "ino", or incrementes the reference count
// if the entry already exists.
-func (w *wlockMap) register(ino uint64) {
+func (w *wlockMap) register(di DevInoStruct) {
w.Lock()
defer w.Unlock()
- r := w.inodeLocks[ino]
+ r := w.inodeLocks[di]
if r == nil {
r = &refCntMutex{}
- w.inodeLocks[ino] = r
+ w.inodeLocks[di] = r
}
r.refCnt++
}
-// unregister decrements the reference count for "ino" and deletes the entry if
+// unregister decrements the reference count for "di" and deletes the entry if
// the reference count has reached 0.
-func (w *wlockMap) unregister(ino uint64) {
+func (w *wlockMap) unregister(di DevInoStruct) {
w.Lock()
defer w.Unlock()
- r := w.inodeLocks[ino]
+ r := w.inodeLocks[di]
r.refCnt--
if r.refCnt == 0 {
- delete(w.inodeLocks, ino)
+ delete(w.inodeLocks, di)
}
}
-// lock retrieves the entry for "ino" and locks it.
-func (w *wlockMap) lock(ino uint64) {
+// lock retrieves the entry for "di" and locks it.
+func (w *wlockMap) lock(di DevInoStruct) {
atomic.AddUint64(&w.opCount, 1)
w.Lock()
- r := w.inodeLocks[ino]
+ r := w.inodeLocks[di]
w.Unlock()
// this can take a long time - execute outside the wlockMap lock
r.Lock()
}
-// unlock retrieves the entry for "ino" and unlocks it.
-func (w *wlockMap) unlock(ino uint64) {
+// unlock retrieves the entry for "di" and unlocks it.
+func (w *wlockMap) unlock(di DevInoStruct) {
w.Lock()
- r := w.inodeLocks[ino]
+ r := w.inodeLocks[di]
w.Unlock()
r.Unlock()
}
diff --git a/internal/fusefrontend_reverse/ino_map.go b/internal/fusefrontend_reverse/ino_map.go
index 9412343..dae8222 100644
--- a/internal/fusefrontend_reverse/ino_map.go
+++ b/internal/fusefrontend_reverse/ino_map.go
@@ -17,8 +17,3 @@ type inoGenT struct {
func (i *inoGenT) next() uint64 {
return atomic.AddUint64(i.ino, 1)
}
-
-type devIno struct {
- dev uint64
- ino uint64
-}
diff --git a/internal/fusefrontend_reverse/rfs.go b/internal/fusefrontend_reverse/rfs.go
index 7f102cf..aca4b90 100644
--- a/internal/fusefrontend_reverse/rfs.go
+++ b/internal/fusefrontend_reverse/rfs.go
@@ -41,7 +41,7 @@ type ReverseFS struct {
// Inode number generator
inoGen *inoGenT
// Maps backing files device+inode pairs to user-facing unique inode numbers
- inoMap map[devIno]uint64
+ inoMap map[fusefrontend.DevInoStruct]uint64
// Protects map access
inoMapLock sync.Mutex
}
@@ -68,7 +68,7 @@ func NewFS(args fusefrontend.Args) *ReverseFS {
nameTransform: nameTransform,
contentEnc: contentEnc,
inoGen: newInoGen(),
- inoMap: map[devIno]uint64{},
+ inoMap: map[fusefrontend.DevInoStruct]uint64{},
}
}
@@ -167,7 +167,7 @@ func (rfs *ReverseFS) inoAwareStat(relPlainPath string) (*fuse.Attr, fuse.Status
// The file has hard links. We have to give it a stable inode number so
// tar or rsync can find them.
if fi.Mode().IsRegular() && st.Nlink > 1 {
- di := devIno{uint64(st.Dev), st.Ino}
+ di := fusefrontend.DevInoFromStat(st)
rfs.inoMapLock.Lock()
stableIno := rfs.inoMap[di]
if stableIno == 0 {