summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakob Unterwurzacher2017-10-17 21:47:32 +0200
committerJakob Unterwurzacher2017-10-17 21:48:29 +0200
commit3009ec9852316c3c696f77f476390ab5a6d8d6d7 (patch)
treee64b84f0cdda14b32fe986918aeae7f7be465a4d
parent64e5906ffa1f225a51048b3d0ac6b1a09e2ca170 (diff)
fusefrontend: clamp oversized reads
Our byte cache pools are sized acc. to MAX_KERNEL_WRITE, but the running kernel may have a higher limit set. Clamp to what we can handle. Fixes a panic on a Synology NAS reported at https://github.com/rfjakob/gocryptfs/issues/145
-rw-r--r--internal/contentenc/content.go2
-rw-r--r--internal/fusefrontend/file.go15
2 files changed, 16 insertions, 1 deletions
diff --git a/internal/contentenc/content.go b/internal/contentenc/content.go
index f48080f..e6f79a6 100644
--- a/internal/contentenc/content.go
+++ b/internal/contentenc/content.go
@@ -194,6 +194,8 @@ func (be *ContentEnc) DecryptBlock(ciphertext []byte, blockNo uint64, fileID []b
const encryptMaxSplit = 2
// EncryptBlocks is like EncryptBlock but takes multiple plaintext blocks.
+// Returns a byte slice from CReqPool - so don't forget to return it
+// to the pool.
func (be *ContentEnc) EncryptBlocks(plaintextBlocks [][]byte, firstBlockNo uint64, fileID []byte) []byte {
ciphertextBlocks := make([][]byte, len(plaintextBlocks))
// For large writes, we parallelize encryption.
diff --git a/internal/fusefrontend/file.go b/internal/fusefrontend/file.go
index 30e6e3d..be15280 100644
--- a/internal/fusefrontend/file.go
+++ b/internal/fusefrontend/file.go
@@ -132,6 +132,8 @@ func (f *file) createHeader() (fileID []byte, err error) {
return h.ID, err
}
+var oversizedReadWarn sync.Once
+
// doRead - read "length" plaintext bytes from plaintext offset "off" and append
// to "dst".
// Arguments "length" and "off" do not have to be block-aligned.
@@ -142,6 +144,16 @@ func (f *file) createHeader() (fileID []byte, err error) {
// Called by Read() for normal reading,
// by Write() and Truncate() for Read-Modify-Write
func (f *file) doRead(dst []byte, off uint64, length uint64) ([]byte, fuse.Status) {
+ // Our byte cache pools are sized acc. to MAX_KERNEL_WRITE, but the
+ // running kernel may have a higher limit set. Clamp to what we can
+ // handle.
+ if length > fuse.MAX_KERNEL_WRITE {
+ oversizedReadWarn.Do(func() {
+ tlog.Warn.Printf("doRead: truncating oversized read: %d to %d bytes",
+ length, fuse.MAX_KERNEL_WRITE)
+ })
+ length = fuse.MAX_KERNEL_WRITE
+ }
// Make sure we have the file ID.
f.fileTableEntry.HeaderLock.RLock()
if f.fileTableEntry.ID == nil {
@@ -170,7 +182,8 @@ func (f *file) doRead(dst []byte, off uint64, length uint64) ([]byte, fuse.Statu
blocks := f.contentEnc.ExplodePlainRange(off, length)
alignedOffset, alignedLength := blocks[0].JointCiphertextRange(blocks)
skip := blocks[0].Skip
- tlog.Debug.Printf("JointCiphertextRange(%d, %d) -> %d, %d, %d", off, length, alignedOffset, alignedLength, skip)
+ tlog.Debug.Printf("doRead: off=%d len=%d -> off=%d len=%d skip=%d\n",
+ off, length, alignedOffset, alignedLength, skip)
ciphertext := f.fs.contentEnc.CReqPool.Get()
ciphertext = ciphertext[:int(alignedLength)]