about summary refs log tree commit diff
path: root/library/std/src/sys/unix/kernel_copy.rs
diff options
context:
space:
mode:
authorThe8472 <git@infinite-source.de>2020-11-12 23:39:49 +0100
committerThe8472 <git@infinite-source.de>2020-11-13 22:38:27 +0100
commitbbfa92c82debed28417350b15fc6a2f46135346d (patch)
treec055bcfb0611b728298a87057f99d497919a55bb /library/std/src/sys/unix/kernel_copy.rs
parent4854d418a5245b07eca7dbec92a29d18af13a821 (diff)
downloadrust-bbfa92c82debed28417350b15fc6a2f46135346d.tar.gz
rust-bbfa92c82debed28417350b15fc6a2f46135346d.zip
Always handle EOVERFLOW by falling back to the generic copy loop
Previously EOVERFLOW handling was only applied for io::copy specialization
but not for fs::copy sharing the same code.

Additionally we lower the chunk size to 1GB since we have a user report
that older kernels may return EINVAL when passing 0x8000_0000
but smaller values succeed.
Diffstat (limited to 'library/std/src/sys/unix/kernel_copy.rs')
-rw-r--r--library/std/src/sys/unix/kernel_copy.rs10
1 files changed, 5 insertions, 5 deletions
diff --git a/library/std/src/sys/unix/kernel_copy.rs b/library/std/src/sys/unix/kernel_copy.rs
index 99533dd3c07..ac2fcfcb53f 100644
--- a/library/std/src/sys/unix/kernel_copy.rs
+++ b/library/std/src/sys/unix/kernel_copy.rs
@@ -438,7 +438,6 @@ pub(super) enum CopyResult {
 /// Callers must handle fallback to a generic copy loop.
 /// `Fallback` may indicate non-zero number of bytes already written
 /// if one of the files' cursor +`max_len` would exceed u64::MAX (`EOVERFLOW`).
-/// If the initial file offset was 0 then `Fallback` will only contain `0`.
 pub(super) fn copy_regular_files(reader: RawFd, writer: RawFd, max_len: u64) -> CopyResult {
     use crate::cmp;
 
@@ -462,10 +461,10 @@ pub(super) fn copy_regular_files(reader: RawFd, writer: RawFd, max_len: u64) ->
     while written < max_len {
         let copy_result = if has_copy_file_range {
             let bytes_to_copy = cmp::min(max_len - written, usize::MAX as u64);
-            // cap to 2GB chunks in case u64::MAX is passed in as file size and the file has a non-zero offset
-            // this allows us to copy large chunks without hitting the limit,
-            // unless someone sets a file offset close to u64::MAX - 2GB, in which case the fallback would kick in
-            let bytes_to_copy = cmp::min(bytes_to_copy as usize, 0x8000_0000usize);
+            // cap to 1GB chunks in case u64::MAX is passed as max_len and the file has a non-zero seek position
+            // this allows us to copy large chunks without hitting EOVERFLOW,
+            // unless someone sets a file offset close to u64::MAX - 1GB, in which case a fallback would be required
+            let bytes_to_copy = cmp::min(bytes_to_copy as usize, 0x4000_0000usize);
             let copy_result = unsafe {
                 // We actually don't have to adjust the offsets,
                 // because copy_file_range adjusts the file offset automatically
@@ -560,6 +559,7 @@ fn sendfile_splice(mode: SpliceMode, reader: RawFd, writer: RawFd, len: u64) ->
 
     let mut written = 0u64;
     while written < len {
+        // according to its manpage that's the maximum size sendfile() will copy per invocation
         let chunk_size = crate::cmp::min(len - written, 0x7ffff000_u64) as usize;
 
         let result = match mode {