1 #include "Python.h"
2 #include "pycore_call.h"
3 #include "pycore_import.h"
4 #include "pycore_fileutils.h"
5 #include "errcode.h"
6
7 #ifdef HAVE_UNISTD_H
8 # include <unistd.h> // lseek(), read()
9 #endif
10
11 #include "helpers.h"
12 #include "../lexer/state.h"
13 #include "../lexer/lexer.h"
14 #include "../lexer/buffer.h"
15
16 static int
tok_concatenate_interactive_new_line(struct tok_state * tok,const char * line)17 tok_concatenate_interactive_new_line(struct tok_state *tok, const char *line) {
18 assert(tok->fp_interactive);
19
20 if (!line) {
21 return 0;
22 }
23
24 Py_ssize_t current_size = tok->interactive_src_end - tok->interactive_src_start;
25 Py_ssize_t line_size = strlen(line);
26 char last_char = line[line_size > 0 ? line_size - 1 : line_size];
27 if (last_char != '\n') {
28 line_size += 1;
29 }
30 char* new_str = tok->interactive_src_start;
31
32 new_str = PyMem_Realloc(new_str, current_size + line_size + 1);
33 if (!new_str) {
34 if (tok->interactive_src_start) {
35 PyMem_Free(tok->interactive_src_start);
36 }
37 tok->interactive_src_start = NULL;
38 tok->interactive_src_end = NULL;
39 tok->done = E_NOMEM;
40 return -1;
41 }
42 strcpy(new_str + current_size, line);
43 tok->implicit_newline = 0;
44 if (last_char != '\n') {
45 /* Last line does not end in \n, fake one */
46 new_str[current_size + line_size - 1] = '\n';
47 new_str[current_size + line_size] = '\0';
48 tok->implicit_newline = 1;
49 }
50 tok->interactive_src_start = new_str;
51 tok->interactive_src_end = new_str + current_size + line_size;
52 return 0;
53 }
54
55 static int
tok_readline_raw(struct tok_state * tok)56 tok_readline_raw(struct tok_state *tok)
57 {
58 do {
59 if (!_PyLexer_tok_reserve_buf(tok, BUFSIZ)) {
60 return 0;
61 }
62 int n_chars = (int)(tok->end - tok->inp);
63 size_t line_size = 0;
64 char *line = _Py_UniversalNewlineFgetsWithSize(tok->inp, n_chars, tok->fp, NULL, &line_size);
65 if (line == NULL) {
66 return 1;
67 }
68 if (tok->fp_interactive &&
69 tok_concatenate_interactive_new_line(tok, line) == -1) {
70 return 0;
71 }
72 tok->inp += line_size;
73 if (tok->inp == tok->buf) {
74 return 0;
75 }
76 } while (tok->inp[-1] != '\n');
77 return 1;
78 }
79
80 static int
tok_readline_recode(struct tok_state * tok)81 tok_readline_recode(struct tok_state *tok) {
82 PyObject *line;
83 const char *buf;
84 Py_ssize_t buflen;
85 line = tok->decoding_buffer;
86 if (line == NULL) {
87 line = PyObject_CallNoArgs(tok->decoding_readline);
88 if (line == NULL) {
89 _PyTokenizer_error_ret(tok);
90 goto error;
91 }
92 }
93 else {
94 tok->decoding_buffer = NULL;
95 }
96 buf = PyUnicode_AsUTF8AndSize(line, &buflen);
97 if (buf == NULL) {
98 _PyTokenizer_error_ret(tok);
99 goto error;
100 }
101 // Make room for the null terminator *and* potentially
102 // an extra newline character that we may need to artificially
103 // add.
104 size_t buffer_size = buflen + 2;
105 if (!_PyLexer_tok_reserve_buf(tok, buffer_size)) {
106 goto error;
107 }
108 memcpy(tok->inp, buf, buflen);
109 tok->inp += buflen;
110 *tok->inp = '\0';
111 if (tok->fp_interactive &&
112 tok_concatenate_interactive_new_line(tok, buf) == -1) {
113 goto error;
114 }
115 Py_DECREF(line);
116 return 1;
117 error:
118 Py_XDECREF(line);
119 return 0;
120 }
121
122 /* Fetch the next byte from TOK. */
fp_getc(struct tok_state * tok)123 static int fp_getc(struct tok_state *tok) {
124 return getc(tok->fp);
125 }
126
127 /* Unfetch the last byte back into TOK. */
fp_ungetc(int c,struct tok_state * tok)128 static void fp_ungetc(int c, struct tok_state *tok) {
129 ungetc(c, tok->fp);
130 }
131
132 /* Set the readline function for TOK to a StreamReader's
133 readline function. The StreamReader is named ENC.
134
135 This function is called from _PyTokenizer_check_bom and _PyTokenizer_check_coding_spec.
136
137 ENC is usually identical to the future value of tok->encoding,
138 except for the (currently unsupported) case of UTF-16.
139
140 Return 1 on success, 0 on failure. */
141 static int
fp_setreadl(struct tok_state * tok,const char * enc)142 fp_setreadl(struct tok_state *tok, const char* enc)
143 {
144 PyObject *readline, *open, *stream;
145 int fd;
146 long pos;
147
148 fd = fileno(tok->fp);
149 /* Due to buffering the file offset for fd can be different from the file
150 * position of tok->fp. If tok->fp was opened in text mode on Windows,
151 * its file position counts CRLF as one char and can't be directly mapped
152 * to the file offset for fd. Instead we step back one byte and read to
153 * the end of line.*/
154 pos = ftell(tok->fp);
155 if (pos == -1 ||
156 lseek(fd, (off_t)(pos > 0 ? pos - 1 : pos), SEEK_SET) == (off_t)-1) {
157 PyErr_SetFromErrnoWithFilename(PyExc_OSError, NULL);
158 return 0;
159 }
160
161 open = _PyImport_GetModuleAttrString("io", "open");
162 if (open == NULL) {
163 return 0;
164 }
165 stream = PyObject_CallFunction(open, "isisOOO",
166 fd, "r", -1, enc, Py_None, Py_None, Py_False);
167 Py_DECREF(open);
168 if (stream == NULL) {
169 return 0;
170 }
171
172 readline = PyObject_GetAttr(stream, &_Py_ID(readline));
173 Py_DECREF(stream);
174 if (readline == NULL) {
175 return 0;
176 }
177 Py_XSETREF(tok->decoding_readline, readline);
178
179 if (pos > 0) {
180 PyObject *bufobj = _PyObject_CallNoArgs(readline);
181 if (bufobj == NULL) {
182 return 0;
183 }
184 Py_DECREF(bufobj);
185 }
186
187 return 1;
188 }
189
190 static int
tok_underflow_interactive(struct tok_state * tok)191 tok_underflow_interactive(struct tok_state *tok) {
192 if (tok->interactive_underflow == IUNDERFLOW_STOP) {
193 tok->done = E_INTERACT_STOP;
194 return 1;
195 }
196 char *newtok = PyOS_Readline(tok->fp ? tok->fp : stdin, stdout, tok->prompt);
197 if (newtok != NULL) {
198 char *translated = _PyTokenizer_translate_newlines(newtok, 0, 0, tok);
199 PyMem_Free(newtok);
200 if (translated == NULL) {
201 return 0;
202 }
203 newtok = translated;
204 }
205 if (tok->encoding && newtok && *newtok) {
206 /* Recode to UTF-8 */
207 Py_ssize_t buflen;
208 const char* buf;
209 PyObject *u = _PyTokenizer_translate_into_utf8(newtok, tok->encoding);
210 PyMem_Free(newtok);
211 if (u == NULL) {
212 tok->done = E_DECODE;
213 return 0;
214 }
215 buflen = PyBytes_GET_SIZE(u);
216 buf = PyBytes_AS_STRING(u);
217 newtok = PyMem_Malloc(buflen+1);
218 if (newtok == NULL) {
219 Py_DECREF(u);
220 tok->done = E_NOMEM;
221 return 0;
222 }
223 strcpy(newtok, buf);
224 Py_DECREF(u);
225 }
226 if (tok->fp_interactive &&
227 tok_concatenate_interactive_new_line(tok, newtok) == -1) {
228 PyMem_Free(newtok);
229 return 0;
230 }
231 if (tok->nextprompt != NULL) {
232 tok->prompt = tok->nextprompt;
233 }
234 if (newtok == NULL) {
235 tok->done = E_INTR;
236 }
237 else if (*newtok == '\0') {
238 PyMem_Free(newtok);
239 tok->done = E_EOF;
240 }
241 else if (tok->start != NULL) {
242 Py_ssize_t cur_multi_line_start = tok->multi_line_start - tok->buf;
243 _PyLexer_remember_fstring_buffers(tok);
244 size_t size = strlen(newtok);
245 ADVANCE_LINENO();
246 if (!_PyLexer_tok_reserve_buf(tok, size + 1)) {
247 PyMem_Free(tok->buf);
248 tok->buf = NULL;
249 PyMem_Free(newtok);
250 return 0;
251 }
252 memcpy(tok->cur, newtok, size + 1);
253 PyMem_Free(newtok);
254 tok->inp += size;
255 tok->multi_line_start = tok->buf + cur_multi_line_start;
256 _PyLexer_restore_fstring_buffers(tok);
257 }
258 else {
259 _PyLexer_remember_fstring_buffers(tok);
260 ADVANCE_LINENO();
261 PyMem_Free(tok->buf);
262 tok->buf = newtok;
263 tok->cur = tok->buf;
264 tok->line_start = tok->buf;
265 tok->inp = strchr(tok->buf, '\0');
266 tok->end = tok->inp + 1;
267 _PyLexer_restore_fstring_buffers(tok);
268 }
269 if (tok->done != E_OK) {
270 if (tok->prompt != NULL) {
271 PySys_WriteStderr("\n");
272 }
273 return 0;
274 }
275
276 if (tok->tok_mode_stack_index && !_PyLexer_update_fstring_expr(tok, 0)) {
277 return 0;
278 }
279 return 1;
280 }
281
282 static int
tok_underflow_file(struct tok_state * tok)283 tok_underflow_file(struct tok_state *tok) {
284 if (tok->start == NULL && !INSIDE_FSTRING(tok)) {
285 tok->cur = tok->inp = tok->buf;
286 }
287 if (tok->decoding_state == STATE_INIT) {
288 /* We have not yet determined the encoding.
289 If an encoding is found, use the file-pointer
290 reader functions from now on. */
291 if (!_PyTokenizer_check_bom(fp_getc, fp_ungetc, fp_setreadl, tok)) {
292 _PyTokenizer_error_ret(tok);
293 return 0;
294 }
295 assert(tok->decoding_state != STATE_INIT);
296 }
297 /* Read until '\n' or EOF */
298 if (tok->decoding_readline != NULL) {
299 /* We already have a codec associated with this input. */
300 if (!tok_readline_recode(tok)) {
301 return 0;
302 }
303 }
304 else {
305 /* We want a 'raw' read. */
306 if (!tok_readline_raw(tok)) {
307 return 0;
308 }
309 }
310 if (tok->inp == tok->cur) {
311 tok->done = E_EOF;
312 return 0;
313 }
314 tok->implicit_newline = 0;
315 if (tok->inp[-1] != '\n') {
316 assert(tok->inp + 1 < tok->end);
317 /* Last line does not end in \n, fake one */
318 *tok->inp++ = '\n';
319 *tok->inp = '\0';
320 tok->implicit_newline = 1;
321 }
322
323 if (tok->tok_mode_stack_index && !_PyLexer_update_fstring_expr(tok, 0)) {
324 return 0;
325 }
326
327 ADVANCE_LINENO();
328 if (tok->decoding_state != STATE_NORMAL) {
329 if (tok->lineno > 2) {
330 tok->decoding_state = STATE_NORMAL;
331 }
332 else if (!_PyTokenizer_check_coding_spec(tok->cur, strlen(tok->cur),
333 tok, fp_setreadl))
334 {
335 return 0;
336 }
337 }
338 /* The default encoding is UTF-8, so make sure we don't have any
339 non-UTF-8 sequences in it. */
340 if (!tok->encoding && !_PyTokenizer_ensure_utf8(tok->cur, tok)) {
341 _PyTokenizer_error_ret(tok);
342 return 0;
343 }
344 assert(tok->done == E_OK);
345 return tok->done == E_OK;
346 }
347
348 /* Set up tokenizer for file */
349 struct tok_state *
_PyTokenizer_FromFile(FILE * fp,const char * enc,const char * ps1,const char * ps2)350 _PyTokenizer_FromFile(FILE *fp, const char* enc,
351 const char *ps1, const char *ps2)
352 {
353 struct tok_state *tok = _PyTokenizer_tok_new();
354 if (tok == NULL)
355 return NULL;
356 if ((tok->buf = (char *)PyMem_Malloc(BUFSIZ)) == NULL) {
357 _PyTokenizer_Free(tok);
358 return NULL;
359 }
360 tok->cur = tok->inp = tok->buf;
361 tok->end = tok->buf + BUFSIZ;
362 tok->fp = fp;
363 tok->prompt = ps1;
364 tok->nextprompt = ps2;
365 if (ps1 || ps2) {
366 tok->underflow = &tok_underflow_interactive;
367 } else {
368 tok->underflow = &tok_underflow_file;
369 }
370 if (enc != NULL) {
371 /* Must copy encoding declaration since it
372 gets copied into the parse tree. */
373 tok->encoding = _PyTokenizer_new_string(enc, strlen(enc), tok);
374 if (!tok->encoding) {
375 _PyTokenizer_Free(tok);
376 return NULL;
377 }
378 tok->decoding_state = STATE_NORMAL;
379 }
380 return tok;
381 }
382
383 #if defined(__wasi__) || (defined(__EMSCRIPTEN__) && (__EMSCRIPTEN_major__ >= 3))
384 // fdopen() with borrowed fd. WASI does not provide dup() and Emscripten's
385 // dup() emulation with open() is slow.
386 typedef union {
387 void *cookie;
388 int fd;
389 } borrowed;
390
391 static ssize_t
borrow_read(void * cookie,char * buf,size_t size)392 borrow_read(void *cookie, char *buf, size_t size)
393 {
394 borrowed b = {.cookie = cookie};
395 return read(b.fd, (void *)buf, size);
396 }
397
398 static FILE *
fdopen_borrow(int fd)399 fdopen_borrow(int fd) {
400 // supports only reading. seek fails. close and write are no-ops.
401 cookie_io_functions_t io_cb = {borrow_read, NULL, NULL, NULL};
402 borrowed b = {.fd = fd};
403 return fopencookie(b.cookie, "r", io_cb);
404 }
405 #else
406 static FILE *
fdopen_borrow(int fd)407 fdopen_borrow(int fd) {
408 fd = _Py_dup(fd);
409 if (fd < 0) {
410 return NULL;
411 }
412 return fdopen(fd, "r");
413 }
414 #endif
415
416 /* Get the encoding of a Python file. Check for the coding cookie and check if
417 the file starts with a BOM.
418
419 _PyTokenizer_FindEncodingFilename() returns NULL when it can't find the
420 encoding in the first or second line of the file (in which case the encoding
421 should be assumed to be UTF-8).
422
423 The char* returned is malloc'ed via PyMem_Malloc() and thus must be freed
424 by the caller. */
425 char *
_PyTokenizer_FindEncodingFilename(int fd,PyObject * filename)426 _PyTokenizer_FindEncodingFilename(int fd, PyObject *filename)
427 {
428 struct tok_state *tok;
429 FILE *fp;
430 char *encoding = NULL;
431
432 fp = fdopen_borrow(fd);
433 if (fp == NULL) {
434 return NULL;
435 }
436 tok = _PyTokenizer_FromFile(fp, NULL, NULL, NULL);
437 if (tok == NULL) {
438 fclose(fp);
439 return NULL;
440 }
441 if (filename != NULL) {
442 tok->filename = Py_NewRef(filename);
443 }
444 else {
445 tok->filename = PyUnicode_FromString("<string>");
446 if (tok->filename == NULL) {
447 fclose(fp);
448 _PyTokenizer_Free(tok);
449 return encoding;
450 }
451 }
452 struct token token;
453 // We don't want to report warnings here because it could cause infinite recursion
454 // if fetching the encoding shows a warning.
455 tok->report_warnings = 0;
456 while (tok->lineno < 2 && tok->done == E_OK) {
457 _PyToken_Init(&token);
458 _PyTokenizer_Get(tok, &token);
459 _PyToken_Free(&token);
460 }
461 fclose(fp);
462 if (tok->encoding) {
463 encoding = (char *)PyMem_Malloc(strlen(tok->encoding) + 1);
464 if (encoding) {
465 strcpy(encoding, tok->encoding);
466 }
467 }
468 _PyTokenizer_Free(tok);
469 return encoding;
470 }
471