aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMiss Islington (bot) <31488909+miss-islington@users.noreply.github.com>2022-11-20 12:53:02 -0800
committerGitHub <noreply@github.com>2022-11-20 12:53:02 -0800
commitf38164481974a5ca643ec9ae19f118d8ad32353f (patch)
treef7f9408dc78f1f64b251088dd771ce2508bc6bd4
parent[3.11] gh-99211: Point to except/except* on syntax errors when mixing them (G... (diff)
downloadcpython-f38164481974a5ca643ec9ae19f118d8ad32353f.tar.gz
cpython-f38164481974a5ca643ec9ae19f118d8ad32353f.tar.bz2
cpython-f38164481974a5ca643ec9ae19f118d8ad32353f.zip
gh-99581: Fix a buffer overflow in the tokenizer when copying lines that fill the available buffer (GH-99605)
(cherry picked from commit e13d1d9dda8c27691180bc618bd5e9bf43dfa89f) Co-authored-by: Pablo Galindo Salgado <Pablogsal@gmail.com>
-rw-r--r--Lib/test/test_tokenize.py16
-rw-r--r--Misc/NEWS.d/next/Core and Builtins/2022-11-19-22-27-52.gh-issue-99581.yKYPbf.rst3
-rw-r--r--Parser/tokenizer.c7
3 files changed, 25 insertions, 1 deletions
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 47f2c06685b..63c2501cfe2 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -10,6 +10,8 @@ from textwrap import dedent
from unittest import TestCase, mock
from test.test_grammar import (VALID_UNDERSCORE_LITERALS,
INVALID_UNDERSCORE_LITERALS)
+from test.support import os_helper
+from test.support.script_helper import run_test_script, make_script
import os
import token
@@ -2631,5 +2633,19 @@ async def f():
self.assertEqual(get_tokens(code), get_tokens(code_no_cont))
+class CTokenizerBufferTests(unittest.TestCase):
+ def test_newline_at_the_end_of_buffer(self):
+ # See issue 99581: Make sure that if we need to add a new line at the
+ # end of the buffer, we have enough space in the buffer, specially when
+ # the current line is as long as the buffer space available.
+ test_script = f"""\
+ #coding: latin-1
+ #{"a"*10000}
+ #{"a"*10002}"""
+ with os_helper.temp_dir() as temp_dir:
+ file_name = make_script(temp_dir, 'foo', test_script)
+ run_test_script(file_name)
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/Misc/NEWS.d/next/Core and Builtins/2022-11-19-22-27-52.gh-issue-99581.yKYPbf.rst b/Misc/NEWS.d/next/Core and Builtins/2022-11-19-22-27-52.gh-issue-99581.yKYPbf.rst
new file mode 100644
index 00000000000..8071fd130dd
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2022-11-19-22-27-52.gh-issue-99581.yKYPbf.rst
@@ -0,0 +1,3 @@
+Fixed a bug that was causing a buffer overflow if the tokenizer copies a
+line missing the newline caracter from a file that is as long as the
+available tokenizer buffer. Patch by Pablo galindo
diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c
index a5cfb659b43..eda38a09a99 100644
--- a/Parser/tokenizer.c
+++ b/Parser/tokenizer.c
@@ -396,7 +396,11 @@ tok_readline_recode(struct tok_state *tok) {
error_ret(tok);
goto error;
}
- if (!tok_reserve_buf(tok, buflen + 1)) {
+ // Make room for the null terminator *and* potentially
+ // an extra newline character that we may need to artificially
+ // add.
+ size_t buffer_size = buflen + 2;
+ if (!tok_reserve_buf(tok, buffer_size)) {
goto error;
}
memcpy(tok->inp, buf, buflen);
@@ -983,6 +987,7 @@ tok_underflow_file(struct tok_state *tok) {
return 0;
}
if (tok->inp[-1] != '\n') {
+ assert(tok->inp + 1 < tok->end);
/* Last line does not end in \n, fake one */
*tok->inp++ = '\n';
*tok->inp = '\0';