diff options
| author | Jakob Unterwurzacher | 2017-05-01 21:57:18 +0200 | 
|---|---|---|
| committer | Jakob Unterwurzacher | 2017-05-01 21:57:18 +0200 | 
| commit | fb3cc6ea407b83e4e1acf4e1a80e3b7d09c5b1db (patch) | |
| tree | 9a766eed46d7d27f8681193b22e9b42939d7fbfe /internal | |
| parent | f322ee87e31a6ceb04c25fa62dcde4df6e45f92a (diff) | |
openfiletable: rename WriteLock to ContentLock
...and IDLock to HeaderLock. This matches what the locks actually
protect.
Diffstat (limited to 'internal')
| -rw-r--r-- | internal/fusefrontend/file.go | 38 | ||||
| -rw-r--r-- | internal/fusefrontend/file_allocate_truncate.go | 16 | ||||
| -rw-r--r-- | internal/openfiletable/open_file_table.go | 25 | 
3 files changed, 42 insertions, 37 deletions
| diff --git a/internal/fusefrontend/file.go b/internal/fusefrontend/file.go index 9784b3e..57ec9ba 100644 --- a/internal/fusefrontend/file.go +++ b/internal/fusefrontend/file.go @@ -142,27 +142,27 @@ func (f *file) createHeader() (fileID []byte, err error) {  // by Write() and Truncate() for Read-Modify-Write  func (f *file) doRead(off uint64, length uint64) ([]byte, fuse.Status) {  	// Make sure we have the file ID. -	f.fileTableEntry.IDLock.RLock() +	f.fileTableEntry.HeaderLock.RLock()  	if f.fileTableEntry.ID == nil { -		f.fileTableEntry.IDLock.RUnlock() +		f.fileTableEntry.HeaderLock.RUnlock()  		// Yes, somebody else may take the lock before we can. This will get  		// the header read twice, but causes no harm otherwise. -		f.fileTableEntry.IDLock.Lock() +		f.fileTableEntry.HeaderLock.Lock()  		tmpID, err := f.readFileID()  		if err == io.EOF { -			f.fileTableEntry.IDLock.Unlock() +			f.fileTableEntry.HeaderLock.Unlock()  			return nil, fuse.OK  		}  		if err != nil { -			f.fileTableEntry.IDLock.Unlock() +			f.fileTableEntry.HeaderLock.Unlock()  			return nil, fuse.ToStatus(err)  		}  		f.fileTableEntry.ID = tmpID  		// Downgrade the lock. -		f.fileTableEntry.IDLock.Unlock() +		f.fileTableEntry.HeaderLock.Unlock()  		// The file ID may change in here. This does no harm because we  		// re-read it after the RLock(). -		f.fileTableEntry.IDLock.RLock() +		f.fileTableEntry.HeaderLock.RLock()  	}  	fileID := f.fileTableEntry.ID  	// Read the backing ciphertext in one go @@ -174,7 +174,7 @@ func (f *file) doRead(off uint64, length uint64) ([]byte, fuse.Status) {  	ciphertext := make([]byte, int(alignedLength))  	n, err := f.fd.ReadAt(ciphertext, int64(alignedOffset))  	// We don't care if the file ID changes after we have read the data. Drop the lock. -	f.fileTableEntry.IDLock.RUnlock() +	f.fileTableEntry.HeaderLock.RUnlock()  	if err != nil && err != io.EOF {  		tlog.Warn.Printf("read: ReadAt: %s", err.Error())  		return nil, fuse.ToStatus(err) @@ -253,27 +253,27 @@ func (f *file) Read(buf []byte, off int64) (resultData fuse.ReadResult, code fus  // Empty writes do nothing and are allowed.  func (f *file) doWrite(data []byte, off int64) (uint32, fuse.Status) {  	// Read header from disk, create a new one if the file is empty -	f.fileTableEntry.IDLock.RLock() +	f.fileTableEntry.HeaderLock.RLock()  	if f.fileTableEntry.ID == nil { -		f.fileTableEntry.IDLock.RUnlock() +		f.fileTableEntry.HeaderLock.RUnlock()  		// Somebody else may write the header here, but this would do no harm. -		f.fileTableEntry.IDLock.Lock() +		f.fileTableEntry.HeaderLock.Lock()  		tmpID, err := f.readFileID()  		if err == io.EOF {  			tmpID, err = f.createHeader()  		}  		if err != nil { -			f.fileTableEntry.IDLock.Unlock() +			f.fileTableEntry.HeaderLock.Unlock()  			return 0, fuse.ToStatus(err)  		}  		f.fileTableEntry.ID = tmpID -		f.fileTableEntry.IDLock.Unlock() +		f.fileTableEntry.HeaderLock.Unlock()  		// The file ID may change in here. This does no harm because we  		// re-read it after the RLock(). -		f.fileTableEntry.IDLock.RLock() +		f.fileTableEntry.HeaderLock.RLock()  	}  	fileID := f.fileTableEntry.ID -	defer f.fileTableEntry.IDLock.RUnlock() +	defer f.fileTableEntry.HeaderLock.RUnlock()  	// Handle payload data  	status := fuse.OK  	dataBuf := bytes.NewBuffer(data) @@ -336,7 +336,7 @@ func (f *file) doWrite(data []byte, off int64) (uint32, fuse.Status) {  // Stat() call is very expensive.  // The caller must "wlock.lock(f.devIno.ino)" otherwise this check would be racy.  func (f *file) isConsecutiveWrite(off int64) bool { -	opCount := openfiletable.WriteLockCount() +	opCount := openfiletable.WriteOpCount()  	return opCount == f.lastOpCount+1 && off == f.lastWrittenOffset+1  } @@ -353,8 +353,8 @@ func (f *file) Write(data []byte, off int64) (uint32, fuse.Status) {  		tlog.Warn.Printf("ino%d fh%d: Write on released file", f.qIno.Ino, f.intFd())  		return 0, fuse.EBADF  	} -	f.fileTableEntry.WriteLock.Lock() -	defer f.fileTableEntry.WriteLock.Unlock() +	f.fileTableEntry.ContentLock.Lock() +	defer f.fileTableEntry.ContentLock.Unlock()  	tlog.Debug.Printf("ino%d: FUSE Write: offset=%d length=%d", f.qIno.Ino, off, len(data))  	// If the write creates a file hole, we have to zero-pad the last block.  	// But if the write directly follows an earlier write, it cannot create a @@ -367,7 +367,7 @@ func (f *file) Write(data []byte, off int64) (uint32, fuse.Status) {  	}  	n, status := f.doWrite(data, off)  	if status.Ok() { -		f.lastOpCount = openfiletable.WriteLockCount() +		f.lastOpCount = openfiletable.WriteOpCount()  		f.lastWrittenOffset = off + int64(len(data)) - 1  	}  	return n, status diff --git a/internal/fusefrontend/file_allocate_truncate.go b/internal/fusefrontend/file_allocate_truncate.go index acde76f..85ed11c 100644 --- a/internal/fusefrontend/file_allocate_truncate.go +++ b/internal/fusefrontend/file_allocate_truncate.go @@ -50,8 +50,8 @@ func (f *file) Allocate(off uint64, sz uint64, mode uint32) fuse.Status {  	if f.released {  		return fuse.EBADF  	} -	f.fileTableEntry.WriteLock.Lock() -	defer f.fileTableEntry.WriteLock.Unlock() +	f.fileTableEntry.ContentLock.Lock() +	defer f.fileTableEntry.ContentLock.Unlock()  	blocks := f.contentEnc.ExplodePlainRange(off, sz)  	firstBlock := blocks[0] @@ -100,8 +100,8 @@ func (f *file) Truncate(newSize uint64) fuse.Status {  		tlog.Warn.Printf("ino%d fh%d: Truncate on released file", f.qIno.Ino, f.intFd())  		return fuse.EBADF  	} -	f.fileTableEntry.WriteLock.Lock() -	defer f.fileTableEntry.WriteLock.Unlock() +	f.fileTableEntry.ContentLock.Lock() +	defer f.fileTableEntry.ContentLock.Unlock()  	var err error  	// Common case first: Truncate to zero  	if newSize == 0 { @@ -111,9 +111,9 @@ func (f *file) Truncate(newSize uint64) fuse.Status {  			return fuse.ToStatus(err)  		}  		// Truncate to zero kills the file header -		f.fileTableEntry.IDLock.Lock() +		f.fileTableEntry.HeaderLock.Lock()  		f.fileTableEntry.ID = nil -		f.fileTableEntry.IDLock.Unlock() +		f.fileTableEntry.HeaderLock.Unlock()  		return fuse.OK  	}  	// We need the old file size to determine if we are growing or shrinking @@ -206,8 +206,8 @@ func (f *file) truncateGrowFile(oldPlainSz uint64, newPlainSz uint64) fuse.Statu  	if newPlainSz%f.contentEnc.PlainBS() == 0 {  		// The file was empty, so it did not have a header. Create one.  		if oldPlainSz == 0 { -			f.fileTableEntry.IDLock.Lock() -			defer f.fileTableEntry.IDLock.Unlock() +			f.fileTableEntry.HeaderLock.Lock() +			defer f.fileTableEntry.HeaderLock.Unlock()  			id, err := f.createHeader()  			if err != nil {  				return fuse.ToStatus(err) diff --git a/internal/openfiletable/open_file_table.go b/internal/openfiletable/open_file_table.go index 7cd401e..d70bcfa 100644 --- a/internal/openfiletable/open_file_table.go +++ b/internal/openfiletable/open_file_table.go @@ -42,13 +42,13 @@ func init() {  }  type table struct { -	// writeLockCount counts entry.writeLock.Lock() calls. As every operation that +	// writeOpCount counts entry.ContentLock.Lock() calls. As every operation that  	// modifies a file should  	// call it, this effectively serves as a write-operation counter.  	// The variable is accessed without holding any locks so atomic operations  	// must be used. It must be the first element of the struct to guarantee  	// 64-bit alignment. -	writeLockCount uint64 +	writeOpCount uint64  	// Protects map access  	sync.Mutex  	// Table entries @@ -59,11 +59,16 @@ type table struct {  type Entry struct {  	// Reference count  	refCount int -	// Write lock for this inode -	WriteLock countingMutex +	// ContentLock guards the file content from concurrent writes. Every writer +	// must take this lock before modifying the file content. +	ContentLock countingMutex +	// HeaderLock guards the file ID (in this struct) and the file header (on +	// disk). Take HeaderLock.RLock() to make sure the file ID does not change +	// behind your back. If you modify the file ID, you must take +	// HeaderLock.Lock(). +	HeaderLock sync.RWMutex  	// ID is the file ID in the file header. -	ID     []byte -	IDLock sync.RWMutex +	ID []byte  }  // Register creates an open file table entry for "qi" (or incrementes the @@ -101,11 +106,11 @@ type countingMutex struct {  func (c *countingMutex) Lock() {  	c.Mutex.Lock() -	atomic.AddUint64(&t.writeLockCount, 1) +	atomic.AddUint64(&t.writeOpCount, 1)  } -// WriteLockCount returns the write lock counter value. This value is encremented +// WriteOpCount returns the write lock counter value. This value is encremented  // each time writeLock.Lock() on a file table entry is called. -func WriteLockCount() uint64 { -	return atomic.LoadUint64(&t.writeLockCount) +func WriteOpCount() uint64 { +	return atomic.LoadUint64(&t.writeOpCount)  } | 
