diff options
Diffstat (limited to 'internal')
-rw-r--r-- | internal/fusefrontend/file.go | 38 | ||||
-rw-r--r-- | internal/fusefrontend/file_allocate_truncate.go | 8 | ||||
-rw-r--r-- | internal/fusefrontend/file_holes.go | 4 |
3 files changed, 25 insertions, 25 deletions
diff --git a/internal/fusefrontend/file.go b/internal/fusefrontend/file.go index 2041154..bcf4b83 100644 --- a/internal/fusefrontend/file.go +++ b/internal/fusefrontend/file.go @@ -23,10 +23,10 @@ import ( "github.com/rfjakob/gocryptfs/internal/tlog" ) -var _ nodefs.File = &file{} // Verify that interface is implemented. +var _ nodefs.File = &File{} // Verify that interface is implemented. // File - based on loopbackFile in go-fuse/fuse/nodefs/files.go -type file struct { +type File struct { fd *os.File // Has Release() already been called on this file? This also means that the // wlock entry has been freed, so let's not crash trying to access it. @@ -60,7 +60,7 @@ type file struct { } // NewFile returns a new go-fuse File instance. -func NewFile(fd *os.File, fs *FS) (nodefs.File, fuse.Status) { +func NewFile(fd *os.File, fs *FS) (*File, fuse.Status) { var st syscall.Stat_t err := syscall.Fstat(int(fd.Fd()), &st) if err != nil { @@ -70,7 +70,7 @@ func NewFile(fd *os.File, fs *FS) (nodefs.File, fuse.Status) { qi := openfiletable.QInoFromStat(&st) e := openfiletable.Register(qi) - return &file{ + return &File{ fd: fd, contentEnc: fs.contentEnc, qIno: qi, @@ -83,13 +83,13 @@ func NewFile(fd *os.File, fs *FS) (nodefs.File, fuse.Status) { // intFd - return the backing file descriptor as an integer. Used for debug // messages. -func (f *file) intFd() int { +func (f *File) intFd() int { return int(f.fd.Fd()) } // readFileID loads the file header from disk and extracts the file ID. // Returns io.EOF if the file is empty. -func (f *file) readFileID() ([]byte, error) { +func (f *File) readFileID() ([]byte, error) { // We read +1 byte to determine if the file has actual content // and not only the header. A header-only file will be considered empty. // This makes File ID poisoning more difficult. @@ -115,7 +115,7 @@ func (f *file) readFileID() ([]byte, error) { // createHeader creates a new random header and writes it to disk. // Returns the new file ID. // The caller must hold fileIDLock.Lock(). -func (f *file) createHeader() (fileID []byte, err error) { +func (f *File) createHeader() (fileID []byte, err error) { h := contentenc.RandomHeader() buf := h.Pack() // Prevent partially written (=corrupt) header by preallocating the space beforehand @@ -143,7 +143,7 @@ func (f *file) createHeader() (fileID []byte, err error) { // // Called by Read() for normal reading, // by Write() and Truncate() for Read-Modify-Write -func (f *file) doRead(dst []byte, off uint64, length uint64) ([]byte, fuse.Status) { +func (f *File) doRead(dst []byte, off uint64, length uint64) ([]byte, fuse.Status) { // Make sure we have the file ID. f.fileTableEntry.HeaderLock.RLock() if f.fileTableEntry.ID == nil { @@ -230,7 +230,7 @@ func (f *file) doRead(dst []byte, off uint64, length uint64) ([]byte, fuse.Statu } // Read - FUSE call -func (f *file) Read(buf []byte, off int64) (resultData fuse.ReadResult, code fuse.Status) { +func (f *File) Read(buf []byte, off int64) (resultData fuse.ReadResult, code fuse.Status) { if len(buf) > fuse.MAX_KERNEL_WRITE { // This would crash us due to our fixed-size buffer pool tlog.Warn.Printf("Read: rejecting oversized request with EMSGSIZE, len=%d", len(buf)) @@ -263,7 +263,7 @@ func (f *file) Read(buf []byte, off int64) (resultData fuse.ReadResult, code fus // and by Truncate() to rewrite the last file block. // // Empty writes do nothing and are allowed. -func (f *file) doWrite(data []byte, off int64) (uint32, fuse.Status) { +func (f *File) doWrite(data []byte, off int64) (uint32, fuse.Status) { // Read header from disk, create a new one if the file is empty f.fileTableEntry.HeaderLock.RLock() if f.fileTableEntry.ID == nil { @@ -337,7 +337,7 @@ func (f *file) doWrite(data []byte, off int64) (uint32, fuse.Status) { // This is an optimisation for streaming writes on NFS where a // Stat() call is very expensive. // The caller must "wlock.lock(f.devIno.ino)" otherwise this check would be racy. -func (f *file) isConsecutiveWrite(off int64) bool { +func (f *File) isConsecutiveWrite(off int64) bool { opCount := openfiletable.WriteOpCount() return opCount == f.lastOpCount+1 && off == f.lastWrittenOffset+1 } @@ -345,7 +345,7 @@ func (f *file) isConsecutiveWrite(off int64) bool { // Write - FUSE call // // If the write creates a hole, pads the file to the next block boundary. -func (f *file) Write(data []byte, off int64) (uint32, fuse.Status) { +func (f *File) Write(data []byte, off int64) (uint32, fuse.Status) { if len(data) > fuse.MAX_KERNEL_WRITE { // This would crash us due to our fixed-size buffer pool tlog.Warn.Printf("Write: rejecting oversized request with EMSGSIZE, len=%d", len(data)) @@ -381,7 +381,7 @@ func (f *file) Write(data []byte, off int64) (uint32, fuse.Status) { } // Release - FUSE call, close file -func (f *file) Release() { +func (f *File) Release() { f.fdLock.Lock() if f.released { log.Panicf("ino%d fh%d: double release", f.qIno.Ino, f.intFd()) @@ -394,7 +394,7 @@ func (f *file) Release() { } // Flush - FUSE call -func (f *file) Flush() fuse.Status { +func (f *File) Flush() fuse.Status { f.fdLock.RLock() defer f.fdLock.RUnlock() @@ -410,14 +410,14 @@ func (f *file) Flush() fuse.Status { return fuse.ToStatus(err) } -func (f *file) Fsync(flags int) (code fuse.Status) { +func (f *File) Fsync(flags int) (code fuse.Status) { f.fdLock.RLock() defer f.fdLock.RUnlock() return fuse.ToStatus(syscall.Fsync(int(f.fd.Fd()))) } -func (f *file) Chmod(mode uint32) fuse.Status { +func (f *File) Chmod(mode uint32) fuse.Status { f.fdLock.RLock() defer f.fdLock.RUnlock() @@ -427,14 +427,14 @@ func (f *file) Chmod(mode uint32) fuse.Status { return fuse.ToStatus(err) } -func (f *file) Chown(uid uint32, gid uint32) fuse.Status { +func (f *File) Chown(uid uint32, gid uint32) fuse.Status { f.fdLock.RLock() defer f.fdLock.RUnlock() return fuse.ToStatus(f.fd.Chown(int(uid), int(gid))) } -func (f *file) GetAttr(a *fuse.Attr) fuse.Status { +func (f *File) GetAttr(a *fuse.Attr) fuse.Status { f.fdLock.RLock() defer f.fdLock.RUnlock() @@ -453,7 +453,7 @@ func (f *file) GetAttr(a *fuse.Attr) fuse.Status { return fuse.OK } -func (f *file) Utimens(a *time.Time, m *time.Time) fuse.Status { +func (f *File) Utimens(a *time.Time, m *time.Time) fuse.Status { f.fdLock.RLock() defer f.fdLock.RUnlock() return f.loopbackFile.Utimens(a, m) diff --git a/internal/fusefrontend/file_allocate_truncate.go b/internal/fusefrontend/file_allocate_truncate.go index 67254dc..e02fc06 100644 --- a/internal/fusefrontend/file_allocate_truncate.go +++ b/internal/fusefrontend/file_allocate_truncate.go @@ -36,7 +36,7 @@ var allocateWarnOnce sync.Once // complicated and hard to get right. // // Other modes (hole punching, zeroing) are not supported. -func (f *file) Allocate(off uint64, sz uint64, mode uint32) fuse.Status { +func (f *File) Allocate(off uint64, sz uint64, mode uint32) fuse.Status { if mode != FALLOC_DEFAULT && mode != FALLOC_FL_KEEP_SIZE { f := func() { tlog.Warn.Printf("fallocate: only mode 0 (default) and 1 (keep size) are supported") @@ -92,7 +92,7 @@ func (f *file) Allocate(off uint64, sz uint64, mode uint32) fuse.Status { } // Truncate - FUSE call -func (f *file) Truncate(newSize uint64) fuse.Status { +func (f *File) Truncate(newSize uint64) fuse.Status { f.fdLock.RLock() defer f.fdLock.RUnlock() if f.released { @@ -165,7 +165,7 @@ func (f *file) Truncate(newSize uint64) fuse.Status { } // statPlainSize stats the file and returns the plaintext size -func (f *file) statPlainSize() (uint64, error) { +func (f *File) statPlainSize() (uint64, error) { fi, err := f.fd.Stat() if err != nil { tlog.Warn.Printf("ino%d fh%d: statPlainSize: %v", f.qIno.Ino, f.intFd(), err) @@ -179,7 +179,7 @@ func (f *file) statPlainSize() (uint64, error) { // truncateGrowFile extends a file using seeking or ftruncate performing RMW on // the first and last block as necessary. New blocks in the middle become // file holes unless they have been fallocate()'d beforehand. -func (f *file) truncateGrowFile(oldPlainSz uint64, newPlainSz uint64) fuse.Status { +func (f *File) truncateGrowFile(oldPlainSz uint64, newPlainSz uint64) fuse.Status { if newPlainSz <= oldPlainSz { log.Panicf("BUG: newSize=%d <= oldSize=%d", newPlainSz, oldPlainSz) } diff --git a/internal/fusefrontend/file_holes.go b/internal/fusefrontend/file_holes.go index d779191..2b95c5c 100644 --- a/internal/fusefrontend/file_holes.go +++ b/internal/fusefrontend/file_holes.go @@ -10,7 +10,7 @@ import ( // Will a write to plaintext offset "targetOff" create a file hole in the // ciphertext? If yes, zero-pad the last ciphertext block. -func (f *file) writePadHole(targetOff int64) fuse.Status { +func (f *File) writePadHole(targetOff int64) fuse.Status { // Get the current file size. fi, err := f.fd.Stat() if err != nil { @@ -41,7 +41,7 @@ func (f *file) writePadHole(targetOff int64) fuse.Status { // Zero-pad the file of size plainSize to the next block boundary. This is a no-op // if the file is already block-aligned. -func (f *file) zeroPad(plainSize uint64) fuse.Status { +func (f *File) zeroPad(plainSize uint64) fuse.Status { lastBlockLen := plainSize % f.contentEnc.PlainBS() if lastBlockLen == 0 { // Already block-aligned |