Skip to content

Commit ab34bb7

Browse files
committed
pkg/detach: fix broken Copy() detach sequence
The code only could handle the detach sequence when it read one byte at a time. This obviously is not correct and lead to some issues for my automated test in my podman PR[1] where I added some automated tests for detaching and the read part is really undefined and depends on the input side/kernel scheduling on how much we read at once. This is large rework to make the code check for the key sequence across the entire buffer. That is of course more work but it needs to happen for this to work correctly. I guess the only reason why this was never noticed is because normally user detach manually by typing and not in an automated way which is much slower and thus likely reads the bytes one by one. I added new test to actually confirm the behavior. And to ensure this works with various read sizes I made it a fuzz test. I had this running for a while and did not spot any issues there. The old code fails already on the simple test cases. [1] containers/podman#25083 Signed-off-by: Paul Holzinger <pholzing@redhat.com>
1 parent 34a90af commit ab34bb7

File tree

2 files changed

+183
-31
lines changed

2 files changed

+183
-31
lines changed

pkg/detach/copy.go

Lines changed: 58 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
package detach
22

33
import (
4+
"bytes"
45
"errors"
56
"io"
67
)
@@ -11,47 +12,73 @@ var ErrDetach = errors.New("detached from container")
1112

1213
// Copy is similar to io.Copy but support a detach key sequence to break out.
1314
func Copy(dst io.Writer, src io.Reader, keys []byte) (written int64, err error) {
15+
// if no key sequence we can use the fast std lib implementation
16+
if len(keys) == 0 {
17+
return io.Copy(dst, src)
18+
}
1419
buf := make([]byte, 32*1024)
20+
tmpKeyBuf := make([]byte, 0, len(keys))
21+
outer:
1522
for {
1623
nr, er := src.Read(buf)
17-
if nr > 0 {
18-
preservBuf := []byte{}
19-
for i, key := range keys {
20-
preservBuf = append(preservBuf, buf[0:nr]...)
21-
if nr != 1 || buf[0] != key {
22-
break
23-
}
24-
if i == len(keys)-1 {
25-
return 0, ErrDetach
24+
25+
// previous key buffer
26+
if len(tmpKeyBuf) > 0 {
27+
bytesToCheck := min(nr, len(keys)-len(tmpKeyBuf))
28+
if bytes.Equal(buf[:bytesToCheck], keys[len(tmpKeyBuf):]) {
29+
if len(tmpKeyBuf)+bytesToCheck == len(keys) {
30+
// we are done
31+
return written, ErrDetach
2632
}
27-
nr, er = src.Read(buf)
28-
}
29-
var nw int
30-
var ew error
31-
if len(preservBuf) > 0 {
32-
nw, ew = dst.Write(preservBuf)
33-
nr = len(preservBuf)
34-
} else {
35-
nw, ew = dst.Write(buf[0:nr])
36-
}
37-
if nw > 0 {
38-
written += int64(nw)
33+
tmpKeyBuf = append(tmpKeyBuf, buf[:bytesToCheck]...)
34+
continue outer
3935
}
36+
// No match, write buffered keys now
37+
nw, ew := dst.Write(tmpKeyBuf)
4038
if ew != nil {
41-
err = ew
42-
break
43-
}
44-
if nr != nw {
45-
err = io.ErrShortWrite
46-
break
39+
return written, ew
4740
}
41+
written += int64(nw)
42+
tmpKeyBuf = tmpKeyBuf[:0]
4843
}
44+
4945
if er != nil {
50-
if er != io.EOF {
51-
err = er
46+
if er == io.EOF {
47+
return written, nil
5248
}
53-
break
49+
return written, err
50+
}
51+
52+
readMinusKeys := nr - len(keys)
53+
for i := 0; i < readMinusKeys; i++ {
54+
if bytes.Equal(buf[i:i+len(keys)], keys) {
55+
if i > 0 {
56+
nw, ew := dst.Write(buf[:i])
57+
if ew != nil {
58+
return written, ew
59+
}
60+
written += int64(nw)
61+
}
62+
return written, ErrDetach
63+
}
64+
}
65+
66+
for i := max(readMinusKeys, 0); i < nr; i++ {
67+
if bytes.Equal(buf[i:nr], keys[:nr-i]) {
68+
nw, ew := dst.Write(buf[:i])
69+
if ew != nil {
70+
return written, ew
71+
}
72+
written += int64(nw)
73+
tmpKeyBuf = append(tmpKeyBuf, buf[i:nr]...)
74+
continue outer
75+
}
76+
}
77+
78+
nw, ew := dst.Write(buf[:nr])
79+
if ew != nil {
80+
return written, ew
5481
}
82+
written += int64(nw)
5583
}
56-
return written, err
5784
}

pkg/detach/copy_test.go

Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,125 @@
1+
package detach
2+
3+
import (
4+
"bytes"
5+
"strings"
6+
"testing"
7+
8+
"github.com/stretchr/testify/assert"
9+
)
10+
11+
var (
12+
smallBytes = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
13+
bigBytes = []byte(strings.Repeat("0F", 32*1024+30))
14+
)
15+
16+
func newCustomReader(buf *bytes.Buffer, readsize uint) *customReader {
17+
return &customReader{
18+
inner: buf,
19+
readsize: readsize,
20+
}
21+
}
22+
23+
type customReader struct {
24+
inner *bytes.Buffer
25+
readsize uint
26+
}
27+
28+
func (c *customReader) Read(p []byte) (n int, err error) {
29+
return c.inner.Read(p[:min(int(c.readsize), len(p))])
30+
}
31+
32+
func FuzzCopy(f *testing.F) {
33+
for _, i := range []uint{1, 2, 3, 5, 10, 100, 200, 1000, 1024, 32 * 1024} {
34+
f.Add(i)
35+
}
36+
37+
f.Fuzz(func(t *testing.T, readSize uint) {
38+
// 0 is not a valid read size
39+
if readSize == 0 {
40+
t.Skip()
41+
}
42+
43+
tests := []struct {
44+
name string
45+
from []byte
46+
expected []byte
47+
expectDetach bool
48+
keys []byte
49+
}{
50+
{
51+
name: "small copy",
52+
from: smallBytes,
53+
expected: smallBytes,
54+
keys: nil,
55+
},
56+
{
57+
name: "small copy with detach keys",
58+
from: smallBytes,
59+
expected: smallBytes,
60+
keys: []byte{'A', 'B'},
61+
},
62+
{
63+
name: "big copy",
64+
from: bigBytes,
65+
expected: bigBytes,
66+
keys: nil,
67+
},
68+
{
69+
name: "big copy with detach keys",
70+
from: bigBytes,
71+
expected: bigBytes,
72+
keys: []byte{'A', 'B'},
73+
},
74+
{
75+
name: "simple detach 1 key",
76+
from: append(smallBytes, 'A'),
77+
expected: smallBytes,
78+
expectDetach: true,
79+
keys: []byte{'A'},
80+
},
81+
{
82+
name: "simple detach 2 keys",
83+
from: append(smallBytes, 'A', 'B'),
84+
expected: smallBytes,
85+
expectDetach: true,
86+
keys: []byte{'A', 'B'},
87+
},
88+
{
89+
name: "detach early",
90+
from: append(smallBytes, 'A', 'B', 'B', 'A'),
91+
expected: smallBytes,
92+
expectDetach: true,
93+
keys: []byte{'A', 'B'},
94+
},
95+
{
96+
name: "detach with partial match",
97+
from: append(smallBytes, 'A', 'A', 'A', 'B'),
98+
expected: append(smallBytes, 'A', 'A'),
99+
expectDetach: true,
100+
keys: []byte{'A', 'B'},
101+
},
102+
{
103+
name: "big buffer detach with partial match",
104+
from: append(bigBytes, 'A', 'A', 'A', 'B'),
105+
expected: append(bigBytes, 'A', 'A'),
106+
expectDetach: true,
107+
keys: []byte{'A', 'B'},
108+
},
109+
}
110+
for _, tt := range tests {
111+
t.Run(tt.name, func(t *testing.T) {
112+
dst := &bytes.Buffer{}
113+
src := newCustomReader(bytes.NewBuffer(tt.from), readSize)
114+
written, err := Copy(dst, src, tt.keys)
115+
if tt.expectDetach {
116+
assert.ErrorIs(t, err, ErrDetach)
117+
} else {
118+
assert.NoError(t, err)
119+
}
120+
assert.Equal(t, dst.Len(), int(written), "bytes written matches buffer")
121+
assert.Equal(t, tt.expected, dst.Bytes(), "buffer matches")
122+
})
123+
}
124+
})
125+
}

0 commit comments

Comments
 (0)