2014-05-03 23:27:38 +01:00
|
|
|
/*
|
|
|
|
* This file is part of the Micro Python project, http://micropython.org/
|
|
|
|
*
|
|
|
|
* The MIT License (MIT)
|
|
|
|
*
|
|
|
|
* Copyright (c) 2013, 2014 Damien P. George
|
2014-05-13 06:44:45 +01:00
|
|
|
* Copyright (c) 2014 Paul Sokolovsky
|
2014-05-03 23:27:38 +01:00
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2013-10-04 19:53:11 +01:00
|
|
|
#include <stdio.h>
|
2014-03-30 15:35:53 +01:00
|
|
|
#include <string.h>
|
2013-10-04 19:53:11 +01:00
|
|
|
#include <assert.h>
|
|
|
|
|
2015-01-01 23:30:53 +00:00
|
|
|
#include "py/mpstate.h"
|
2015-01-01 20:27:54 +00:00
|
|
|
#include "py/nlr.h"
|
|
|
|
#include "py/emitglue.h"
|
|
|
|
#include "py/runtime.h"
|
|
|
|
#include "py/bc0.h"
|
|
|
|
#include "py/bc.h"
|
2013-10-04 19:53:11 +01:00
|
|
|
|
2014-04-23 01:40:24 +01:00
|
|
|
#if 0
|
2014-12-12 17:18:56 +00:00
|
|
|
#define TRACE(ip) printf("sp=" INT_FMT " ", sp - code_state->sp); mp_bytecode_print2(ip, 1);
|
2014-04-23 01:40:24 +01:00
|
|
|
#else
|
|
|
|
#define TRACE(ip)
|
|
|
|
#endif
|
2014-04-10 17:21:34 +01:00
|
|
|
|
2014-01-31 17:45:15 +00:00
|
|
|
// Value stack grows up (this makes it incompatible with native C stack, but
|
|
|
|
// makes sure that arguments to functions are in natural order arg1..argN
|
|
|
|
// (Python semantics mandates left-to-right evaluation order, including for
|
|
|
|
// function arguments). Stack pointer is pre-incremented and points at the
|
|
|
|
// top element.
|
|
|
|
// Exception stack also grows up, top element is also pointed at.
|
|
|
|
|
2014-01-31 22:55:05 +00:00
|
|
|
// Exception stack unwind reasons (WHY_* in CPython-speak)
|
2014-02-01 20:08:18 +00:00
|
|
|
// TODO perhaps compress this to RETURN=0, JUMP>0, with number of unwinds
|
|
|
|
// left to do encoded in the JUMP number
|
2014-01-31 22:55:05 +00:00
|
|
|
typedef enum {
|
|
|
|
UNWIND_RETURN = 1,
|
2014-02-01 20:08:18 +00:00
|
|
|
UNWIND_JUMP,
|
2014-01-31 22:55:05 +00:00
|
|
|
} mp_unwind_reason_t;
|
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
#define DECODE_UINT \
|
|
|
|
mp_uint_t unum = 0; \
|
2014-02-18 19:21:22 +00:00
|
|
|
do { \
|
|
|
|
unum = (unum << 7) + (*ip & 0x7f); \
|
2014-12-02 19:25:10 +00:00
|
|
|
} while ((*ip++ & 0x80) != 0)
|
|
|
|
#define DECODE_ULABEL mp_uint_t ulab = (ip[0] | (ip[1] << 8)); ip += 2
|
|
|
|
#define DECODE_SLABEL mp_uint_t slab = (ip[0] | (ip[1] << 8)) - 0x8000; ip += 2
|
2014-05-25 22:58:04 +01:00
|
|
|
#define DECODE_QSTR qstr qst = 0; \
|
2014-02-18 19:21:22 +00:00
|
|
|
do { \
|
|
|
|
qst = (qst << 7) + (*ip & 0x7f); \
|
2014-05-25 22:58:04 +01:00
|
|
|
} while ((*ip++ & 0x80) != 0)
|
2014-12-02 19:25:10 +00:00
|
|
|
#define DECODE_PTR \
|
2014-07-03 13:25:24 +01:00
|
|
|
ip = (byte*)(((mp_uint_t)ip + sizeof(mp_uint_t) - 1) & (~(sizeof(mp_uint_t) - 1))); /* align ip */ \
|
2014-12-02 19:25:10 +00:00
|
|
|
void *ptr = (void*)*(mp_uint_t*)ip; \
|
|
|
|
ip += sizeof(mp_uint_t)
|
2014-01-18 14:10:48 +00:00
|
|
|
#define PUSH(val) *++sp = (val)
|
|
|
|
#define POP() (*sp--)
|
2013-12-10 17:27:24 +00:00
|
|
|
#define TOP() (*sp)
|
|
|
|
#define SET_TOP(val) *sp = (val)
|
2013-10-04 19:53:11 +01:00
|
|
|
|
2014-12-22 12:49:57 +00:00
|
|
|
#define PUSH_EXC_BLOCK(with_or_finally) do { \
|
2014-03-29 00:49:07 +00:00
|
|
|
DECODE_ULABEL; /* except labels are always forward */ \
|
|
|
|
++exc_sp; \
|
2014-12-02 19:25:10 +00:00
|
|
|
exc_sp->handler = ip + ulab; \
|
2014-12-22 12:49:57 +00:00
|
|
|
exc_sp->val_sp = MP_TAGPTR_MAKE(sp, ((with_or_finally) << 1) | currently_in_except_block); \
|
2014-03-30 00:54:48 +00:00
|
|
|
exc_sp->prev_exc = MP_OBJ_NULL; \
|
2014-12-02 19:25:10 +00:00
|
|
|
currently_in_except_block = 0; /* in a try block now */ \
|
|
|
|
} while (0)
|
2014-03-29 00:49:07 +00:00
|
|
|
|
2014-03-29 21:16:27 +00:00
|
|
|
#define POP_EXC_BLOCK() \
|
2014-12-22 12:49:57 +00:00
|
|
|
currently_in_except_block = MP_TAGPTR_TAG0(exc_sp->val_sp); /* restore previous state */ \
|
2014-03-29 21:16:27 +00:00
|
|
|
exc_sp--; /* pop back to previous exception handler */
|
|
|
|
|
2014-01-18 14:10:48 +00:00
|
|
|
// fastn has items in reverse order (fastn[0] is local[0], fastn[-1] is local[1], etc)
|
|
|
|
// sp points to bottom of stack which grows up
|
2014-02-15 22:55:00 +00:00
|
|
|
// returns:
|
|
|
|
// MP_VM_RETURN_NORMAL, sp valid, return value in *sp
|
|
|
|
// MP_VM_RETURN_YIELD, ip, sp valid, yielded value in *sp
|
|
|
|
// MP_VM_RETURN_EXCEPTION, exception in fastn[0]
|
2014-06-07 14:16:08 +01:00
|
|
|
mp_vm_return_kind_t mp_execute_bytecode(mp_code_state *code_state, volatile mp_obj_t inject_exc) {
|
2014-12-28 05:17:43 +00:00
|
|
|
#define SELECTIVE_EXC_IP (0)
|
|
|
|
#if SELECTIVE_EXC_IP
|
2014-12-29 00:29:59 +00:00
|
|
|
#define MARK_EXC_IP_SELECTIVE() { code_state->ip = ip; } /* stores ip 1 byte past last opcode */
|
2014-12-28 05:17:43 +00:00
|
|
|
#define MARK_EXC_IP_GLOBAL()
|
|
|
|
#else
|
|
|
|
#define MARK_EXC_IP_SELECTIVE()
|
2014-12-29 00:29:59 +00:00
|
|
|
#define MARK_EXC_IP_GLOBAL() { code_state->ip = ip; } /* stores ip pointing to last opcode */
|
2014-12-28 05:17:43 +00:00
|
|
|
#endif
|
2014-05-21 20:32:59 +01:00
|
|
|
#if MICROPY_OPT_COMPUTED_GOTO
|
2015-01-01 20:27:54 +00:00
|
|
|
#include "py/vmentrytable.h"
|
2014-04-15 08:57:01 +01:00
|
|
|
#define DISPATCH() do { \
|
2014-04-23 01:40:24 +01:00
|
|
|
TRACE(ip); \
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_GLOBAL(); \
|
2014-04-27 18:19:06 +01:00
|
|
|
goto *entry_table[*ip++]; \
|
2014-04-14 16:22:44 +01:00
|
|
|
} while(0)
|
2014-10-25 18:19:55 +01:00
|
|
|
#define DISPATCH_WITH_PEND_EXC_CHECK() goto pending_exception_check
|
2014-04-15 08:57:01 +01:00
|
|
|
#define ENTRY(op) entry_##op
|
|
|
|
#define ENTRY_DEFAULT entry_default
|
2014-04-14 16:22:44 +01:00
|
|
|
#else
|
2014-04-15 08:57:01 +01:00
|
|
|
#define DISPATCH() break
|
2014-10-25 18:19:55 +01:00
|
|
|
#define DISPATCH_WITH_PEND_EXC_CHECK() goto pending_exception_check
|
2014-04-15 08:57:01 +01:00
|
|
|
#define ENTRY(op) case op
|
|
|
|
#define ENTRY_DEFAULT default
|
2014-04-14 16:22:44 +01:00
|
|
|
#endif
|
|
|
|
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 16:50:23 +01:00
|
|
|
// nlr_raise needs to be implemented as a goto, so that the C compiler's flow analyser
|
|
|
|
// sees that it's possible for us to jump from the dispatch loop to the exception
|
|
|
|
// handler. Without this, the code may have a different stack layout in the dispatch
|
|
|
|
// loop and the exception handler, leading to very obscure bugs.
|
|
|
|
#define RAISE(o) do { nlr_pop(); nlr.ret_val = o; goto exception_handler; } while(0)
|
2013-10-04 19:53:11 +01:00
|
|
|
|
2014-06-07 14:16:08 +01:00
|
|
|
// Pointers which are constant for particular invocation of mp_execute_bytecode()
|
2014-05-31 14:50:46 +01:00
|
|
|
mp_obj_t *const fastn = &code_state->state[code_state->n_state - 1];
|
|
|
|
mp_exc_stack_t *const exc_stack = (mp_exc_stack_t*)(code_state->state + code_state->n_state);
|
|
|
|
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 16:50:23 +01:00
|
|
|
// variables that are visible to the exception handler (declared volatile)
|
2014-12-22 12:49:57 +00:00
|
|
|
volatile bool currently_in_except_block = MP_TAGPTR_TAG0(code_state->exc_sp); // 0 or 1, to detect nested exceptions
|
2014-05-31 14:50:46 +01:00
|
|
|
mp_exc_stack_t *volatile exc_sp = MP_TAGPTR_PTR(code_state->exc_sp); // stack grows up, exc_sp points to top of stack
|
2013-10-15 23:46:01 +01:00
|
|
|
|
2013-10-15 22:25:17 +01:00
|
|
|
// outer exception handling loop
|
2013-10-04 19:53:11 +01:00
|
|
|
for (;;) {
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 16:50:23 +01:00
|
|
|
nlr_buf_t nlr;
|
2014-03-26 18:37:06 +00:00
|
|
|
outer_dispatch_loop:
|
2013-10-15 22:25:17 +01:00
|
|
|
if (nlr_push(&nlr) == 0) {
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 16:50:23 +01:00
|
|
|
// local variables that are not visible to the exception handler
|
2014-05-31 14:50:46 +01:00
|
|
|
const byte *ip = code_state->ip;
|
|
|
|
mp_obj_t *sp = code_state->sp;
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_obj_t obj_shared;
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 16:50:23 +01:00
|
|
|
|
2014-03-22 15:50:12 +00:00
|
|
|
// If we have exception to inject, now that we finish setting up
|
|
|
|
// execution context, raise it. This works as if RAISE_VARARGS
|
|
|
|
// bytecode was executed.
|
2014-03-26 15:36:12 +00:00
|
|
|
// Injecting exc into yield from generator is a special case,
|
|
|
|
// handled by MP_BC_YIELD_FROM itself
|
|
|
|
if (inject_exc != MP_OBJ_NULL && *ip != MP_BC_YIELD_FROM) {
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_obj_t exc = inject_exc;
|
2014-03-22 15:50:12 +00:00
|
|
|
inject_exc = MP_OBJ_NULL;
|
2014-05-25 22:58:04 +01:00
|
|
|
exc = mp_make_raise_obj(exc);
|
|
|
|
RAISE(exc);
|
2014-03-22 15:50:12 +00:00
|
|
|
}
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 16:50:23 +01:00
|
|
|
|
2013-10-15 22:25:17 +01:00
|
|
|
// loop to execute byte code
|
|
|
|
for (;;) {
|
2014-01-31 22:55:05 +00:00
|
|
|
dispatch_loop:
|
2014-05-21 20:32:59 +01:00
|
|
|
#if MICROPY_OPT_COMPUTED_GOTO
|
2014-04-14 16:22:44 +01:00
|
|
|
DISPATCH();
|
|
|
|
#else
|
2014-04-23 01:40:24 +01:00
|
|
|
TRACE(ip);
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_GLOBAL();
|
2014-04-27 18:19:06 +01:00
|
|
|
switch (*ip++) {
|
2014-04-14 16:22:44 +01:00
|
|
|
#endif
|
2014-04-09 15:26:46 +01:00
|
|
|
|
2014-04-14 16:22:44 +01:00
|
|
|
ENTRY(MP_BC_LOAD_CONST_FALSE):
|
|
|
|
PUSH(mp_const_false);
|
|
|
|
DISPATCH();
|
2014-04-09 15:26:46 +01:00
|
|
|
|
2014-04-14 16:22:44 +01:00
|
|
|
ENTRY(MP_BC_LOAD_CONST_NONE):
|
|
|
|
PUSH(mp_const_none);
|
|
|
|
DISPATCH();
|
2014-03-23 19:19:02 +00:00
|
|
|
|
2014-04-14 16:22:44 +01:00
|
|
|
ENTRY(MP_BC_LOAD_CONST_TRUE):
|
|
|
|
PUSH(mp_const_true);
|
|
|
|
DISPATCH();
|
2014-04-08 21:11:49 +01:00
|
|
|
|
2014-04-14 16:22:44 +01:00
|
|
|
ENTRY(MP_BC_LOAD_CONST_ELLIPSIS):
|
|
|
|
PUSH((mp_obj_t)&mp_const_ellipsis_obj);
|
|
|
|
DISPATCH();
|
2013-10-15 22:25:17 +01:00
|
|
|
|
2014-04-14 16:22:44 +01:00
|
|
|
ENTRY(MP_BC_LOAD_CONST_SMALL_INT): {
|
2014-07-03 13:25:24 +01:00
|
|
|
mp_int_t num = 0;
|
2014-04-14 16:22:44 +01:00
|
|
|
if ((ip[0] & 0x40) != 0) {
|
|
|
|
// Number is negative
|
|
|
|
num--;
|
|
|
|
}
|
|
|
|
do {
|
|
|
|
num = (num << 7) | (*ip & 0x7f);
|
|
|
|
} while ((*ip++ & 0x80) != 0);
|
|
|
|
PUSH(MP_OBJ_NEW_SMALL_INT(num));
|
|
|
|
DISPATCH();
|
|
|
|
}
|
2013-10-15 22:25:17 +01:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_LOAD_CONST_INT): {
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_QSTR;
|
2014-05-28 14:07:21 +01:00
|
|
|
PUSH(mp_load_const_int(qst));
|
2014-04-14 16:22:44 +01:00
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_LOAD_CONST_DEC): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_QSTR;
|
|
|
|
PUSH(mp_load_const_dec(qst));
|
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_LOAD_CONST_BYTES): {
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_QSTR;
|
|
|
|
PUSH(mp_load_const_bytes(qst));
|
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_LOAD_CONST_STRING): {
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_QSTR;
|
|
|
|
PUSH(mp_load_const_str(qst));
|
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
|
|
|
ENTRY(MP_BC_LOAD_NULL):
|
|
|
|
PUSH(MP_OBJ_NULL);
|
|
|
|
DISPATCH();
|
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_LOAD_FAST_N): {
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
2014-05-25 22:58:04 +01:00
|
|
|
obj_shared = fastn[-unum];
|
2014-04-14 16:22:44 +01:00
|
|
|
load_check:
|
2014-05-25 22:58:04 +01:00
|
|
|
if (obj_shared == MP_OBJ_NULL) {
|
|
|
|
local_name_error: {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_obj_t obj = mp_obj_new_exception_msg(&mp_type_NameError, "local variable referenced before assignment");
|
|
|
|
RAISE(obj);
|
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
}
|
2014-05-25 22:58:04 +01:00
|
|
|
PUSH(obj_shared);
|
2014-04-14 16:22:44 +01:00
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_LOAD_DEREF): {
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
2014-05-25 22:58:04 +01:00
|
|
|
obj_shared = mp_obj_cell_get(fastn[-unum]);
|
2014-04-14 16:22:44 +01:00
|
|
|
goto load_check;
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_LOAD_NAME): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_QSTR;
|
|
|
|
PUSH(mp_load_name(qst));
|
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_LOAD_GLOBAL): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_QSTR;
|
|
|
|
PUSH(mp_load_global(qst));
|
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_LOAD_ATTR): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_QSTR;
|
|
|
|
SET_TOP(mp_load_attr(TOP(), qst));
|
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_LOAD_METHOD): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_QSTR;
|
|
|
|
mp_load_method(*sp, qst, sp);
|
|
|
|
sp += 1;
|
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
|
|
|
ENTRY(MP_BC_LOAD_BUILD_CLASS):
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
PUSH(mp_load_build_class());
|
|
|
|
DISPATCH();
|
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_LOAD_SUBSCR): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_obj_t index = POP();
|
|
|
|
SET_TOP(mp_obj_subscr(TOP(), index, MP_OBJ_SENTINEL));
|
2014-04-17 22:10:53 +01:00
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-17 22:10:53 +01:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_STORE_FAST_N): {
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
|
|
|
fastn[-unum] = POP();
|
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_STORE_DEREF): {
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
|
|
|
mp_obj_cell_set(fastn[-unum], POP());
|
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_STORE_NAME): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_QSTR;
|
|
|
|
mp_store_name(qst, POP());
|
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_STORE_GLOBAL): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_QSTR;
|
|
|
|
mp_store_global(qst, POP());
|
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_STORE_ATTR): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_QSTR;
|
|
|
|
mp_store_attr(sp[0], qst, sp[-1]);
|
|
|
|
sp -= 2;
|
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
|
|
|
ENTRY(MP_BC_STORE_SUBSCR):
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-17 22:10:53 +01:00
|
|
|
mp_obj_subscr(sp[-1], sp[0], sp[-2]);
|
2014-04-14 16:22:44 +01:00
|
|
|
sp -= 3;
|
|
|
|
DISPATCH();
|
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_DELETE_FAST): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
|
|
|
if (fastn[-unum] == MP_OBJ_NULL) {
|
|
|
|
goto local_name_error;
|
|
|
|
}
|
|
|
|
fastn[-unum] = MP_OBJ_NULL;
|
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2013-10-15 22:25:17 +01:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_DELETE_DEREF): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
|
|
|
if (mp_obj_cell_get(fastn[-unum]) == MP_OBJ_NULL) {
|
|
|
|
goto local_name_error;
|
|
|
|
}
|
|
|
|
mp_obj_cell_set(fastn[-unum], MP_OBJ_NULL);
|
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_DELETE_NAME): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_QSTR;
|
|
|
|
mp_delete_name(qst);
|
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_DELETE_GLOBAL): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_QSTR;
|
|
|
|
mp_delete_global(qst);
|
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_DUP_TOP): {
|
|
|
|
mp_obj_t top = TOP();
|
|
|
|
PUSH(top);
|
2014-04-14 16:22:44 +01:00
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
|
|
|
ENTRY(MP_BC_DUP_TOP_TWO):
|
|
|
|
sp += 2;
|
|
|
|
sp[0] = sp[-2];
|
|
|
|
sp[-1] = sp[-3];
|
|
|
|
DISPATCH();
|
|
|
|
|
|
|
|
ENTRY(MP_BC_POP_TOP):
|
|
|
|
sp -= 1;
|
|
|
|
DISPATCH();
|
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_ROT_TWO): {
|
|
|
|
mp_obj_t top = sp[0];
|
2014-04-14 16:22:44 +01:00
|
|
|
sp[0] = sp[-1];
|
2014-05-25 22:58:04 +01:00
|
|
|
sp[-1] = top;
|
2014-04-14 16:22:44 +01:00
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_ROT_THREE): {
|
|
|
|
mp_obj_t top = sp[0];
|
2014-04-14 16:22:44 +01:00
|
|
|
sp[0] = sp[-1];
|
|
|
|
sp[-1] = sp[-2];
|
2014-05-25 22:58:04 +01:00
|
|
|
sp[-2] = top;
|
2014-04-14 16:22:44 +01:00
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_JUMP): {
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_SLABEL;
|
2014-12-02 19:25:10 +00:00
|
|
|
ip += slab;
|
2014-10-25 18:19:55 +01:00
|
|
|
DISPATCH_WITH_PEND_EXC_CHECK();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_POP_JUMP_IF_TRUE): {
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_SLABEL;
|
|
|
|
if (mp_obj_is_true(POP())) {
|
2014-12-02 19:25:10 +00:00
|
|
|
ip += slab;
|
2014-04-14 16:22:44 +01:00
|
|
|
}
|
2014-10-25 18:19:55 +01:00
|
|
|
DISPATCH_WITH_PEND_EXC_CHECK();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2013-11-09 20:12:32 +00:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_POP_JUMP_IF_FALSE): {
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_SLABEL;
|
|
|
|
if (!mp_obj_is_true(POP())) {
|
2014-12-02 19:25:10 +00:00
|
|
|
ip += slab;
|
2014-04-14 16:22:44 +01:00
|
|
|
}
|
2014-10-25 18:19:55 +01:00
|
|
|
DISPATCH_WITH_PEND_EXC_CHECK();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2013-11-09 20:12:32 +00:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_JUMP_IF_TRUE_OR_POP): {
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_SLABEL;
|
|
|
|
if (mp_obj_is_true(TOP())) {
|
2014-12-02 19:25:10 +00:00
|
|
|
ip += slab;
|
2014-04-14 16:22:44 +01:00
|
|
|
} else {
|
|
|
|
sp--;
|
|
|
|
}
|
2014-10-25 18:19:55 +01:00
|
|
|
DISPATCH_WITH_PEND_EXC_CHECK();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2013-10-15 22:25:17 +01:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_JUMP_IF_FALSE_OR_POP): {
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_SLABEL;
|
|
|
|
if (mp_obj_is_true(TOP())) {
|
|
|
|
sp--;
|
|
|
|
} else {
|
2014-12-02 19:25:10 +00:00
|
|
|
ip += slab;
|
2014-04-14 16:22:44 +01:00
|
|
|
}
|
2014-10-25 18:19:55 +01:00
|
|
|
DISPATCH_WITH_PEND_EXC_CHECK();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_SETUP_WITH): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_obj_t obj = TOP();
|
|
|
|
SET_TOP(mp_load_attr(obj, MP_QSTR___exit__));
|
|
|
|
mp_load_method(obj, MP_QSTR___enter__, sp + 1);
|
|
|
|
mp_obj_t ret = mp_call_method_n_kw(0, 0, sp + 1);
|
2014-12-22 12:49:57 +00:00
|
|
|
PUSH_EXC_BLOCK(1);
|
2014-05-25 22:58:04 +01:00
|
|
|
PUSH(ret);
|
2014-04-14 16:22:44 +01:00
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
|
|
|
ENTRY(MP_BC_WITH_CLEANUP): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
// Arriving here, there's "exception control block" on top of stack,
|
|
|
|
// and __exit__ bound method underneath it. Bytecode calls __exit__,
|
|
|
|
// and "deletes" it off stack, shifting "exception control block"
|
|
|
|
// to its place.
|
|
|
|
static const mp_obj_t no_exc[] = {mp_const_none, mp_const_none, mp_const_none};
|
|
|
|
if (TOP() == mp_const_none) {
|
|
|
|
sp--;
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_obj_t obj = TOP();
|
2014-04-14 16:22:44 +01:00
|
|
|
SET_TOP(mp_const_none);
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_call_function_n_kw(obj, 3, 0, no_exc);
|
2014-04-14 16:22:44 +01:00
|
|
|
} else if (MP_OBJ_IS_SMALL_INT(TOP())) {
|
|
|
|
mp_obj_t cause = POP();
|
|
|
|
switch (MP_OBJ_SMALL_INT_VALUE(cause)) {
|
|
|
|
case UNWIND_RETURN: {
|
|
|
|
mp_obj_t retval = POP();
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_call_function_n_kw(TOP(), 3, 0, no_exc);
|
2014-04-14 16:22:44 +01:00
|
|
|
SET_TOP(retval);
|
|
|
|
PUSH(cause);
|
|
|
|
break;
|
2014-03-29 02:10:11 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
case UNWIND_JUMP: {
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_call_function_n_kw(sp[-2], 3, 0, no_exc);
|
2014-04-14 16:22:44 +01:00
|
|
|
// Pop __exit__ boundmethod at sp[-2]
|
|
|
|
sp[-2] = sp[-1];
|
|
|
|
sp[-1] = sp[0];
|
|
|
|
SET_TOP(cause);
|
|
|
|
break;
|
2014-03-29 02:10:11 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
default:
|
|
|
|
assert(0);
|
2014-03-29 02:10:11 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
} else if (mp_obj_is_exception_type(TOP())) {
|
|
|
|
mp_obj_t args[3] = {sp[0], sp[-1], sp[-2]};
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_obj_t ret_value = mp_call_function_n_kw(sp[-3], 3, 0, args);
|
2014-04-14 16:22:44 +01:00
|
|
|
// Pop __exit__ boundmethod at sp[-3]
|
2014-05-25 22:58:04 +01:00
|
|
|
// TODO: Once semantics is proven, optimize for case when ret_value == True
|
2014-04-14 16:22:44 +01:00
|
|
|
sp[-3] = sp[-2];
|
|
|
|
sp[-2] = sp[-1];
|
|
|
|
sp[-1] = sp[0];
|
|
|
|
sp--;
|
2014-05-25 22:58:04 +01:00
|
|
|
if (mp_obj_is_true(ret_value)) {
|
2014-04-14 16:22:44 +01:00
|
|
|
// This is what CPython does
|
|
|
|
//PUSH(MP_OBJ_NEW_SMALL_INT(UNWIND_SILENCED));
|
|
|
|
// But what we need to do is - pop exception from value stack...
|
|
|
|
sp -= 3;
|
|
|
|
// ... pop "with" exception handler, and signal END_FINALLY
|
|
|
|
// to just execute finally handler normally (by pushing None
|
|
|
|
// on value stack)
|
2014-02-01 20:08:18 +00:00
|
|
|
assert(exc_sp >= exc_stack);
|
2014-04-14 16:22:44 +01:00
|
|
|
POP_EXC_BLOCK();
|
|
|
|
PUSH(mp_const_none);
|
2014-01-30 11:49:18 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
} else {
|
|
|
|
assert(0);
|
|
|
|
}
|
|
|
|
DISPATCH();
|
|
|
|
}
|
2013-10-15 22:25:17 +01:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_UNWIND_JUMP): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_SLABEL;
|
2014-12-02 19:25:10 +00:00
|
|
|
PUSH((void*)(ip + slab)); // push destination ip for jump
|
2014-07-03 13:25:24 +01:00
|
|
|
PUSH((void*)(mp_uint_t)(*ip)); // push number of exception handlers to unwind (0x80 bit set if we also need to pop stack)
|
2014-12-02 19:25:10 +00:00
|
|
|
unwind_jump:;
|
|
|
|
mp_uint_t unum = (mp_uint_t)POP(); // get number of exception handlers to unwind
|
2014-05-30 15:20:41 +01:00
|
|
|
while ((unum & 0x7f) > 0) {
|
2014-04-14 16:22:44 +01:00
|
|
|
unum -= 1;
|
2014-03-22 11:49:31 +00:00
|
|
|
assert(exc_sp >= exc_stack);
|
2014-12-22 12:49:57 +00:00
|
|
|
if (MP_TAGPTR_TAG1(exc_sp->val_sp)) {
|
2014-04-14 16:22:44 +01:00
|
|
|
// We're going to run "finally" code as a coroutine
|
|
|
|
// (not calling it recursively). Set up a sentinel
|
|
|
|
// on a stack so it can return back to us when it is
|
|
|
|
// done (when END_FINALLY reached).
|
|
|
|
PUSH((void*)unum); // push number of exception handlers left to unwind
|
|
|
|
PUSH(MP_OBJ_NEW_SMALL_INT(UNWIND_JUMP)); // push sentinel
|
|
|
|
ip = exc_sp->handler; // get exception handler byte code address
|
|
|
|
exc_sp--; // pop exception handler
|
|
|
|
goto dispatch_loop; // run the exception handler
|
2014-02-01 23:04:09 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
exc_sp--;
|
|
|
|
}
|
|
|
|
ip = (const byte*)POP(); // pop destination ip for jump
|
2014-05-30 15:20:41 +01:00
|
|
|
if (unum != 0) {
|
|
|
|
sp--;
|
|
|
|
}
|
2014-10-25 18:19:55 +01:00
|
|
|
DISPATCH_WITH_PEND_EXC_CHECK();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
|
|
|
// matched against: POP_BLOCK or POP_EXCEPT (anything else?)
|
|
|
|
ENTRY(MP_BC_SETUP_EXCEPT):
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_SETUP_FINALLY): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-12-29 00:29:59 +00:00
|
|
|
#if SELECTIVE_EXC_IP
|
|
|
|
PUSH_EXC_BLOCK((code_state->ip[-1] == MP_BC_SETUP_FINALLY) ? 1 : 0);
|
|
|
|
#else
|
|
|
|
PUSH_EXC_BLOCK((code_state->ip[0] == MP_BC_SETUP_FINALLY) ? 1 : 0);
|
|
|
|
#endif
|
2014-04-14 16:22:44 +01:00
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
|
|
|
ENTRY(MP_BC_END_FINALLY):
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
// not fully implemented
|
|
|
|
// if TOS is an exception, reraises the exception (3 values on TOS)
|
|
|
|
// if TOS is None, just pops it and continues
|
|
|
|
// if TOS is an integer, does something else
|
|
|
|
// else error
|
|
|
|
if (mp_obj_is_exception_type(TOP())) {
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 16:50:23 +01:00
|
|
|
RAISE(sp[-1]);
|
2014-04-14 16:22:44 +01:00
|
|
|
}
|
|
|
|
if (TOP() == mp_const_none) {
|
2014-01-18 14:10:48 +00:00
|
|
|
sp--;
|
2014-04-14 16:22:44 +01:00
|
|
|
} else if (MP_OBJ_IS_SMALL_INT(TOP())) {
|
|
|
|
// We finished "finally" coroutine and now dispatch back
|
|
|
|
// to our caller, based on TOS value
|
|
|
|
mp_unwind_reason_t reason = MP_OBJ_SMALL_INT_VALUE(POP());
|
|
|
|
switch (reason) {
|
|
|
|
case UNWIND_RETURN:
|
|
|
|
goto unwind_return;
|
|
|
|
case UNWIND_JUMP:
|
|
|
|
goto unwind_jump;
|
|
|
|
}
|
|
|
|
assert(0);
|
|
|
|
} else {
|
|
|
|
assert(0);
|
|
|
|
}
|
|
|
|
DISPATCH();
|
|
|
|
|
|
|
|
ENTRY(MP_BC_GET_ITER):
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
SET_TOP(mp_getiter(TOP()));
|
|
|
|
DISPATCH();
|
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_FOR_ITER): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_ULABEL; // the jump offset if iteration finishes; for labels are always forward
|
py, vm: Replace save_ip, save_sp with code_state->{ip, sp}.
This may seem a bit of a risky change, in that it may introduce crazy
bugs with respect to volatile variables in the VM loop. But, I think it
should be fine: code_state points to some external memory, so the
compiler should always read/write to that memory when accessing the
ip/sp variables (ie not put them in registers).
Anyway, it passes all tests and improves on all efficiency fronts: about
2-4% faster (64-bit unix), 16 bytes less stack space per call (64-bit
unix) and slightly less executable size (unix and stmhal).
The reason it's more efficient is save_ip and save_sp were volatile
variables, so were anyway stored on the stack (in memory, not regs).
Thus converting them to code_state->{ip, sp} doesn't cost an extra
memory dereference (except maybe to get code_state, but that can be put
in a register and then made more efficient for other uses of it).
2014-06-01 12:32:28 +01:00
|
|
|
code_state->sp = sp;
|
2014-05-11 18:32:39 +01:00
|
|
|
assert(TOP());
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_obj_t value = mp_iternext_allow_raise(TOP());
|
|
|
|
if (value == MP_OBJ_STOP_ITERATION) {
|
2014-04-14 16:22:44 +01:00
|
|
|
--sp; // pop the exhausted iterator
|
2014-12-02 19:25:10 +00:00
|
|
|
ip += ulab; // jump to after for-block
|
2014-04-14 16:22:44 +01:00
|
|
|
} else {
|
2014-05-25 22:58:04 +01:00
|
|
|
PUSH(value); // push the next iteration value
|
2014-04-14 16:22:44 +01:00
|
|
|
}
|
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
|
|
|
// matched against: SETUP_EXCEPT, SETUP_FINALLY, SETUP_WITH
|
|
|
|
ENTRY(MP_BC_POP_BLOCK):
|
|
|
|
// we are exiting an exception handler, so pop the last one of the exception-stack
|
|
|
|
assert(exc_sp >= exc_stack);
|
|
|
|
POP_EXC_BLOCK();
|
|
|
|
DISPATCH();
|
|
|
|
|
|
|
|
// matched against: SETUP_EXCEPT
|
|
|
|
ENTRY(MP_BC_POP_EXCEPT):
|
|
|
|
// TODO need to work out how blocks work etc
|
|
|
|
// pops block, checks it's an exception block, and restores the stack, saving the 3 exception values to local threadstate
|
|
|
|
assert(exc_sp >= exc_stack);
|
|
|
|
assert(currently_in_except_block);
|
|
|
|
//sp = (mp_obj_t*)(*exc_sp--);
|
|
|
|
//exc_sp--; // discard ip
|
|
|
|
POP_EXC_BLOCK();
|
|
|
|
//sp -= 3; // pop 3 exception values
|
|
|
|
DISPATCH();
|
|
|
|
|
|
|
|
ENTRY(MP_BC_NOT):
|
|
|
|
if (TOP() == mp_const_true) {
|
|
|
|
SET_TOP(mp_const_false);
|
|
|
|
} else {
|
|
|
|
SET_TOP(mp_const_true);
|
|
|
|
}
|
|
|
|
DISPATCH();
|
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_BUILD_TUPLE): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
|
|
|
sp -= unum - 1;
|
|
|
|
SET_TOP(mp_obj_new_tuple(unum, sp));
|
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_BUILD_LIST): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
|
|
|
sp -= unum - 1;
|
|
|
|
SET_TOP(mp_obj_new_list(unum, sp));
|
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_LIST_APPEND): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
|
|
|
// I think it's guaranteed by the compiler that sp[unum] is a list
|
|
|
|
mp_obj_list_append(sp[-unum], sp[0]);
|
|
|
|
sp--;
|
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_BUILD_MAP): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
|
|
|
PUSH(mp_obj_new_dict(unum));
|
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
|
|
|
ENTRY(MP_BC_STORE_MAP):
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
sp -= 2;
|
|
|
|
mp_obj_dict_store(sp[0], sp[2], sp[1]);
|
|
|
|
DISPATCH();
|
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_MAP_ADD): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
|
|
|
// I think it's guaranteed by the compiler that sp[-unum - 1] is a map
|
|
|
|
mp_obj_dict_store(sp[-unum - 1], sp[0], sp[-1]);
|
|
|
|
sp -= 2;
|
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-06-01 13:46:47 +01:00
|
|
|
#if MICROPY_PY_BUILTINS_SET
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_BUILD_SET): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
|
|
|
sp -= unum - 1;
|
|
|
|
SET_TOP(mp_obj_new_set(unum, sp));
|
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_SET_ADD): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
|
|
|
// I think it's guaranteed by the compiler that sp[-unum] is a set
|
|
|
|
mp_obj_set_store(sp[-unum], sp[0]);
|
|
|
|
sp--;
|
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-06-01 13:46:47 +01:00
|
|
|
#endif
|
2013-10-16 20:57:49 +01:00
|
|
|
|
2014-06-01 13:32:54 +01:00
|
|
|
#if MICROPY_PY_BUILTINS_SLICE
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_BUILD_SLICE): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
|
|
|
if (unum == 2) {
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_obj_t stop = POP();
|
|
|
|
mp_obj_t start = TOP();
|
|
|
|
SET_TOP(mp_obj_new_slice(start, stop, mp_const_none));
|
2014-04-14 16:22:44 +01:00
|
|
|
} else {
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_obj_t step = POP();
|
|
|
|
mp_obj_t stop = POP();
|
|
|
|
mp_obj_t start = TOP();
|
|
|
|
SET_TOP(mp_obj_new_slice(start, stop, step));
|
2014-04-14 16:22:44 +01:00
|
|
|
}
|
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-01-03 23:34:23 +00:00
|
|
|
#endif
|
2014-01-03 00:48:56 +00:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_UNPACK_SEQUENCE): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
|
|
|
mp_unpack_sequence(sp[0], unum, sp);
|
|
|
|
sp += unum - 1;
|
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_UNPACK_EX): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
|
|
|
mp_unpack_ex(sp[0], unum, sp);
|
|
|
|
sp += (unum & 0xff) + ((unum >> 8) & 0xff);
|
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_MAKE_FUNCTION): {
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_PTR;
|
2014-12-02 19:25:10 +00:00
|
|
|
PUSH(mp_make_function_from_raw_code(ptr, MP_OBJ_NULL, MP_OBJ_NULL));
|
2014-04-14 16:22:44 +01:00
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_MAKE_FUNCTION_DEFARGS): {
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_PTR;
|
|
|
|
// Stack layout: def_tuple def_dict <- TOS
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_obj_t def_dict = POP();
|
2014-12-02 19:25:10 +00:00
|
|
|
SET_TOP(mp_make_function_from_raw_code(ptr, TOP(), def_dict));
|
2014-04-14 16:22:44 +01:00
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-04-20 17:50:40 +01:00
|
|
|
ENTRY(MP_BC_MAKE_CLOSURE): {
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_PTR;
|
2014-07-03 13:25:24 +01:00
|
|
|
mp_uint_t n_closed_over = *ip++;
|
2014-04-20 17:50:40 +01:00
|
|
|
// Stack layout: closed_overs <- TOS
|
|
|
|
sp -= n_closed_over - 1;
|
2014-12-02 19:25:10 +00:00
|
|
|
SET_TOP(mp_make_closure_from_raw_code(ptr, n_closed_over, sp));
|
2014-04-14 16:22:44 +01:00
|
|
|
DISPATCH();
|
2014-04-20 17:50:40 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-04-20 17:50:40 +01:00
|
|
|
ENTRY(MP_BC_MAKE_CLOSURE_DEFARGS): {
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_PTR;
|
2014-07-03 13:25:24 +01:00
|
|
|
mp_uint_t n_closed_over = *ip++;
|
2014-04-20 17:50:40 +01:00
|
|
|
// Stack layout: def_tuple def_dict closed_overs <- TOS
|
|
|
|
sp -= 2 + n_closed_over - 1;
|
2014-12-02 19:25:10 +00:00
|
|
|
SET_TOP(mp_make_closure_from_raw_code(ptr, 0x100 | n_closed_over, sp));
|
2014-04-14 16:22:44 +01:00
|
|
|
DISPATCH();
|
2014-04-20 17:50:40 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_CALL_FUNCTION): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
|
|
|
// unum & 0xff == n_positional
|
|
|
|
// (unum >> 8) & 0xff == n_keyword
|
|
|
|
sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe);
|
|
|
|
SET_TOP(mp_call_function_n_kw(*sp, unum & 0xff, (unum >> 8) & 0xff, sp + 1));
|
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_CALL_FUNCTION_VAR_KW): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
|
|
|
// unum & 0xff == n_positional
|
|
|
|
// (unum >> 8) & 0xff == n_keyword
|
|
|
|
// We have folowing stack layout here:
|
|
|
|
// fun arg0 arg1 ... kw0 val0 kw1 val1 ... seq dict <- TOS
|
|
|
|
sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 2;
|
|
|
|
SET_TOP(mp_call_method_n_kw_var(false, unum, sp));
|
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_CALL_METHOD): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
|
|
|
// unum & 0xff == n_positional
|
|
|
|
// (unum >> 8) & 0xff == n_keyword
|
|
|
|
sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 1;
|
|
|
|
SET_TOP(mp_call_method_n_kw(unum & 0xff, (unum >> 8) & 0xff, sp));
|
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-12-02 19:25:10 +00:00
|
|
|
ENTRY(MP_BC_CALL_METHOD_VAR_KW): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_UINT;
|
|
|
|
// unum & 0xff == n_positional
|
|
|
|
// (unum >> 8) & 0xff == n_keyword
|
|
|
|
// We have folowing stack layout here:
|
|
|
|
// fun self arg0 arg1 ... kw0 val0 kw1 val1 ... seq dict <- TOS
|
|
|
|
sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 3;
|
|
|
|
SET_TOP(mp_call_method_n_kw_var(true, unum, sp));
|
|
|
|
DISPATCH();
|
2014-12-02 19:25:10 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
|
|
|
ENTRY(MP_BC_RETURN_VALUE):
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-01-31 22:55:05 +00:00
|
|
|
unwind_return:
|
2014-04-14 16:22:44 +01:00
|
|
|
while (exc_sp >= exc_stack) {
|
2014-12-22 12:49:57 +00:00
|
|
|
if (MP_TAGPTR_TAG1(exc_sp->val_sp)) {
|
2014-04-14 16:22:44 +01:00
|
|
|
// We're going to run "finally" code as a coroutine
|
|
|
|
// (not calling it recursively). Set up a sentinel
|
|
|
|
// on a stack so it can return back to us when it is
|
|
|
|
// done (when END_FINALLY reached).
|
|
|
|
PUSH(MP_OBJ_NEW_SMALL_INT(UNWIND_RETURN));
|
|
|
|
ip = exc_sp->handler;
|
|
|
|
// We don't need to do anything with sp, finally is just
|
|
|
|
// syntactic sugar for sequential execution??
|
|
|
|
// sp =
|
2014-01-31 22:55:05 +00:00
|
|
|
exc_sp--;
|
2014-04-14 16:22:44 +01:00
|
|
|
goto dispatch_loop;
|
2014-01-31 22:55:05 +00:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
exc_sp--;
|
|
|
|
}
|
|
|
|
nlr_pop();
|
2014-05-31 14:50:46 +01:00
|
|
|
code_state->sp = sp;
|
2014-04-14 16:22:44 +01:00
|
|
|
assert(exc_sp == exc_stack - 1);
|
|
|
|
return MP_VM_RETURN_NORMAL;
|
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_RAISE_VARARGS): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-12-02 19:25:10 +00:00
|
|
|
mp_uint_t unum = *ip++;
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_obj_t obj;
|
2014-04-14 16:22:44 +01:00
|
|
|
assert(unum <= 1);
|
|
|
|
if (unum == 0) {
|
|
|
|
// search for the inner-most previous exception, to reraise it
|
2014-05-25 22:58:04 +01:00
|
|
|
obj = MP_OBJ_NULL;
|
2014-04-14 16:22:44 +01:00
|
|
|
for (mp_exc_stack_t *e = exc_sp; e >= exc_stack; e--) {
|
|
|
|
if (e->prev_exc != MP_OBJ_NULL) {
|
2014-05-25 22:58:04 +01:00
|
|
|
obj = e->prev_exc;
|
2014-04-14 16:22:44 +01:00
|
|
|
break;
|
2014-03-29 17:44:15 +00:00
|
|
|
}
|
2014-03-26 12:42:17 +00:00
|
|
|
}
|
2014-05-25 22:58:04 +01:00
|
|
|
if (obj == MP_OBJ_NULL) {
|
|
|
|
obj = mp_obj_new_exception_msg(&mp_type_RuntimeError, "No active exception to reraise");
|
|
|
|
RAISE(obj);
|
2014-04-14 16:22:44 +01:00
|
|
|
}
|
|
|
|
} else {
|
2014-05-25 22:58:04 +01:00
|
|
|
obj = POP();
|
2014-04-14 16:22:44 +01:00
|
|
|
}
|
2014-05-25 22:58:04 +01:00
|
|
|
obj = mp_make_raise_obj(obj);
|
|
|
|
RAISE(obj);
|
|
|
|
}
|
2014-01-10 14:09:55 +00:00
|
|
|
|
2014-04-14 16:22:44 +01:00
|
|
|
ENTRY(MP_BC_YIELD_VALUE):
|
2014-03-26 15:36:12 +00:00
|
|
|
yield:
|
2014-04-14 16:22:44 +01:00
|
|
|
nlr_pop();
|
2014-05-31 14:50:46 +01:00
|
|
|
code_state->ip = ip;
|
|
|
|
code_state->sp = sp;
|
|
|
|
code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block);
|
2014-04-14 16:22:44 +01:00
|
|
|
return MP_VM_RETURN_YIELD;
|
2013-10-15 22:25:17 +01:00
|
|
|
|
2014-04-14 16:22:44 +01:00
|
|
|
ENTRY(MP_BC_YIELD_FROM): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-03-26 15:36:12 +00:00
|
|
|
//#define EXC_MATCH(exc, type) MP_OBJ_IS_TYPE(exc, type)
|
|
|
|
#define EXC_MATCH(exc, type) mp_obj_exception_match(exc, type)
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 16:50:23 +01:00
|
|
|
#define GENERATOR_EXIT_IF_NEEDED(t) if (t != MP_OBJ_NULL && EXC_MATCH(t, &mp_type_GeneratorExit)) { RAISE(t); }
|
2014-04-14 16:22:44 +01:00
|
|
|
mp_vm_return_kind_t ret_kind;
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_obj_t send_value = POP();
|
2014-04-14 16:22:44 +01:00
|
|
|
mp_obj_t t_exc = MP_OBJ_NULL;
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_obj_t ret_value;
|
2014-04-14 16:22:44 +01:00
|
|
|
if (inject_exc != MP_OBJ_NULL) {
|
|
|
|
t_exc = inject_exc;
|
|
|
|
inject_exc = MP_OBJ_NULL;
|
2014-05-25 22:58:04 +01:00
|
|
|
ret_kind = mp_resume(TOP(), MP_OBJ_NULL, t_exc, &ret_value);
|
2014-04-14 16:22:44 +01:00
|
|
|
} else {
|
2014-05-25 22:58:04 +01:00
|
|
|
ret_kind = mp_resume(TOP(), send_value, MP_OBJ_NULL, &ret_value);
|
2014-04-14 16:22:44 +01:00
|
|
|
}
|
2014-03-26 15:36:12 +00:00
|
|
|
|
2014-04-14 16:22:44 +01:00
|
|
|
if (ret_kind == MP_VM_RETURN_YIELD) {
|
|
|
|
ip--;
|
2014-05-25 22:58:04 +01:00
|
|
|
PUSH(ret_value);
|
2014-04-14 16:22:44 +01:00
|
|
|
goto yield;
|
|
|
|
}
|
|
|
|
if (ret_kind == MP_VM_RETURN_NORMAL) {
|
|
|
|
// Pop exhausted gen
|
|
|
|
sp--;
|
2014-05-25 22:58:04 +01:00
|
|
|
if (ret_value == MP_OBJ_NULL) {
|
2014-04-14 16:22:44 +01:00
|
|
|
// Optimize StopIteration
|
|
|
|
// TODO: get StopIteration's value
|
|
|
|
PUSH(mp_const_none);
|
|
|
|
} else {
|
2014-05-25 22:58:04 +01:00
|
|
|
PUSH(ret_value);
|
2014-03-26 15:36:12 +00:00
|
|
|
}
|
|
|
|
|
2014-04-14 16:22:44 +01:00
|
|
|
// If we injected GeneratorExit downstream, then even
|
|
|
|
// if it was swallowed, we re-raise GeneratorExit
|
|
|
|
GENERATOR_EXIT_IF_NEEDED(t_exc);
|
|
|
|
DISPATCH();
|
|
|
|
}
|
|
|
|
if (ret_kind == MP_VM_RETURN_EXCEPTION) {
|
|
|
|
// Pop exhausted gen
|
|
|
|
sp--;
|
2014-05-25 22:58:04 +01:00
|
|
|
if (EXC_MATCH(ret_value, &mp_type_StopIteration)) {
|
|
|
|
PUSH(mp_obj_exception_get_value(ret_value));
|
2014-03-26 17:24:03 +00:00
|
|
|
// If we injected GeneratorExit downstream, then even
|
|
|
|
// if it was swallowed, we re-raise GeneratorExit
|
|
|
|
GENERATOR_EXIT_IF_NEEDED(t_exc);
|
2014-04-14 16:22:44 +01:00
|
|
|
DISPATCH();
|
|
|
|
} else {
|
2014-05-25 22:58:04 +01:00
|
|
|
RAISE(ret_value);
|
2014-03-26 15:36:12 +00:00
|
|
|
}
|
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
}
|
2014-03-26 15:36:12 +00:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_IMPORT_NAME): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_QSTR;
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_obj_t obj = POP();
|
|
|
|
SET_TOP(mp_import_name(qst, obj, TOP()));
|
2014-04-14 16:22:44 +01:00
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
2014-05-25 22:58:04 +01:00
|
|
|
ENTRY(MP_BC_IMPORT_FROM): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
DECODE_QSTR;
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_obj_t obj = mp_import_from(TOP(), qst);
|
|
|
|
PUSH(obj);
|
2014-04-14 16:22:44 +01:00
|
|
|
DISPATCH();
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
2014-04-14 16:22:44 +01:00
|
|
|
|
|
|
|
ENTRY(MP_BC_IMPORT_STAR):
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2014-04-14 16:22:44 +01:00
|
|
|
mp_import_all(POP());
|
|
|
|
DISPATCH();
|
|
|
|
|
py: Compress load-int, load-fast, store-fast, unop, binop bytecodes.
There is a lot potential in compress bytecodes and make more use of the
coding space. This patch introduces "multi" bytecodes which have their
argument included in the bytecode (by addition).
UNARY_OP and BINARY_OP now no longer take a 1 byte argument for the
opcode. Rather, the opcode is included in the first byte itself.
LOAD_FAST_[0,1,2] and STORE_FAST_[0,1,2] are removed in favour of their
multi versions, which can take an argument between 0 and 15 inclusive.
The majority of LOAD_FAST/STORE_FAST codes fit in this range and so this
saves a byte for each of these.
LOAD_CONST_SMALL_INT_MULTI is used to load small ints between -16 and 47
inclusive. Such ints are quite common and now only need 1 byte to
store, and now have much faster decoding.
In all this patch saves about 2% RAM for typically bytecode (1.8% on
64-bit test, 2.5% on pyboard test). It also reduces the binary size
(because bytecodes are simplified) and doesn't harm performance.
2014-10-25 16:43:46 +01:00
|
|
|
#if MICROPY_OPT_COMPUTED_GOTO
|
|
|
|
ENTRY(MP_BC_LOAD_CONST_SMALL_INT_MULTI):
|
|
|
|
PUSH(MP_OBJ_NEW_SMALL_INT((mp_int_t)ip[-1] - MP_BC_LOAD_CONST_SMALL_INT_MULTI - 16));
|
|
|
|
DISPATCH();
|
|
|
|
|
|
|
|
ENTRY(MP_BC_LOAD_FAST_MULTI):
|
|
|
|
obj_shared = fastn[MP_BC_LOAD_FAST_MULTI - (mp_int_t)ip[-1]];
|
|
|
|
goto load_check;
|
|
|
|
|
|
|
|
ENTRY(MP_BC_STORE_FAST_MULTI):
|
|
|
|
fastn[MP_BC_STORE_FAST_MULTI - (mp_int_t)ip[-1]] = POP();
|
|
|
|
DISPATCH();
|
|
|
|
|
|
|
|
ENTRY(MP_BC_UNARY_OP_MULTI):
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
py: Compress load-int, load-fast, store-fast, unop, binop bytecodes.
There is a lot potential in compress bytecodes and make more use of the
coding space. This patch introduces "multi" bytecodes which have their
argument included in the bytecode (by addition).
UNARY_OP and BINARY_OP now no longer take a 1 byte argument for the
opcode. Rather, the opcode is included in the first byte itself.
LOAD_FAST_[0,1,2] and STORE_FAST_[0,1,2] are removed in favour of their
multi versions, which can take an argument between 0 and 15 inclusive.
The majority of LOAD_FAST/STORE_FAST codes fit in this range and so this
saves a byte for each of these.
LOAD_CONST_SMALL_INT_MULTI is used to load small ints between -16 and 47
inclusive. Such ints are quite common and now only need 1 byte to
store, and now have much faster decoding.
In all this patch saves about 2% RAM for typically bytecode (1.8% on
64-bit test, 2.5% on pyboard test). It also reduces the binary size
(because bytecodes are simplified) and doesn't harm performance.
2014-10-25 16:43:46 +01:00
|
|
|
SET_TOP(mp_unary_op(ip[-1] - MP_BC_UNARY_OP_MULTI, TOP()));
|
|
|
|
DISPATCH();
|
|
|
|
|
|
|
|
ENTRY(MP_BC_BINARY_OP_MULTI): {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
py: Compress load-int, load-fast, store-fast, unop, binop bytecodes.
There is a lot potential in compress bytecodes and make more use of the
coding space. This patch introduces "multi" bytecodes which have their
argument included in the bytecode (by addition).
UNARY_OP and BINARY_OP now no longer take a 1 byte argument for the
opcode. Rather, the opcode is included in the first byte itself.
LOAD_FAST_[0,1,2] and STORE_FAST_[0,1,2] are removed in favour of their
multi versions, which can take an argument between 0 and 15 inclusive.
The majority of LOAD_FAST/STORE_FAST codes fit in this range and so this
saves a byte for each of these.
LOAD_CONST_SMALL_INT_MULTI is used to load small ints between -16 and 47
inclusive. Such ints are quite common and now only need 1 byte to
store, and now have much faster decoding.
In all this patch saves about 2% RAM for typically bytecode (1.8% on
64-bit test, 2.5% on pyboard test). It also reduces the binary size
(because bytecodes are simplified) and doesn't harm performance.
2014-10-25 16:43:46 +01:00
|
|
|
mp_obj_t rhs = POP();
|
|
|
|
mp_obj_t lhs = TOP();
|
|
|
|
SET_TOP(mp_binary_op(ip[-1] - MP_BC_BINARY_OP_MULTI, lhs, rhs));
|
|
|
|
DISPATCH();
|
|
|
|
}
|
|
|
|
|
|
|
|
ENTRY_DEFAULT:
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
py: Compress load-int, load-fast, store-fast, unop, binop bytecodes.
There is a lot potential in compress bytecodes and make more use of the
coding space. This patch introduces "multi" bytecodes which have their
argument included in the bytecode (by addition).
UNARY_OP and BINARY_OP now no longer take a 1 byte argument for the
opcode. Rather, the opcode is included in the first byte itself.
LOAD_FAST_[0,1,2] and STORE_FAST_[0,1,2] are removed in favour of their
multi versions, which can take an argument between 0 and 15 inclusive.
The majority of LOAD_FAST/STORE_FAST codes fit in this range and so this
saves a byte for each of these.
LOAD_CONST_SMALL_INT_MULTI is used to load small ints between -16 and 47
inclusive. Such ints are quite common and now only need 1 byte to
store, and now have much faster decoding.
In all this patch saves about 2% RAM for typically bytecode (1.8% on
64-bit test, 2.5% on pyboard test). It also reduces the binary size
(because bytecodes are simplified) and doesn't harm performance.
2014-10-25 16:43:46 +01:00
|
|
|
#else
|
|
|
|
ENTRY_DEFAULT:
|
|
|
|
if (ip[-1] < MP_BC_LOAD_CONST_SMALL_INT_MULTI + 64) {
|
|
|
|
PUSH(MP_OBJ_NEW_SMALL_INT((mp_int_t)ip[-1] - MP_BC_LOAD_CONST_SMALL_INT_MULTI - 16));
|
|
|
|
DISPATCH();
|
|
|
|
} else if (ip[-1] < MP_BC_LOAD_FAST_MULTI + 16) {
|
|
|
|
obj_shared = fastn[MP_BC_LOAD_FAST_MULTI - (mp_int_t)ip[-1]];
|
|
|
|
goto load_check;
|
|
|
|
} else if (ip[-1] < MP_BC_STORE_FAST_MULTI + 16) {
|
|
|
|
fastn[MP_BC_STORE_FAST_MULTI - (mp_int_t)ip[-1]] = POP();
|
|
|
|
DISPATCH();
|
|
|
|
} else if (ip[-1] < MP_BC_UNARY_OP_MULTI + 5) {
|
|
|
|
SET_TOP(mp_unary_op(ip[-1] - MP_BC_UNARY_OP_MULTI, TOP()));
|
|
|
|
DISPATCH();
|
|
|
|
} else if (ip[-1] < MP_BC_BINARY_OP_MULTI + 35) {
|
|
|
|
mp_obj_t rhs = POP();
|
|
|
|
mp_obj_t lhs = TOP();
|
|
|
|
SET_TOP(mp_binary_op(ip[-1] - MP_BC_BINARY_OP_MULTI, lhs, rhs));
|
|
|
|
DISPATCH();
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
2014-05-25 22:58:04 +01:00
|
|
|
mp_obj_t obj = mp_obj_new_exception_msg(&mp_type_NotImplementedError, "byte code not implemented");
|
2014-04-14 16:22:44 +01:00
|
|
|
nlr_pop();
|
2014-05-25 22:58:04 +01:00
|
|
|
fastn[0] = obj;
|
2014-04-14 16:22:44 +01:00
|
|
|
return MP_VM_RETURN_EXCEPTION;
|
2014-05-25 22:58:04 +01:00
|
|
|
}
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 16:50:23 +01:00
|
|
|
|
2014-05-21 20:32:59 +01:00
|
|
|
#if !MICROPY_OPT_COMPUTED_GOTO
|
2014-04-15 08:57:01 +01:00
|
|
|
} // switch
|
2014-04-14 16:22:44 +01:00
|
|
|
#endif
|
2014-10-25 18:19:55 +01:00
|
|
|
|
|
|
|
pending_exception_check:
|
2015-01-01 23:30:53 +00:00
|
|
|
if (MP_STATE_VM(mp_pending_exception) != MP_OBJ_NULL) {
|
2014-12-28 05:17:43 +00:00
|
|
|
MARK_EXC_IP_SELECTIVE();
|
2015-01-01 23:30:53 +00:00
|
|
|
mp_obj_t obj = MP_STATE_VM(mp_pending_exception);
|
|
|
|
MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
|
2014-10-25 18:19:55 +01:00
|
|
|
RAISE(obj);
|
|
|
|
}
|
|
|
|
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 16:50:23 +01:00
|
|
|
} // for loop
|
2013-10-15 22:25:17 +01:00
|
|
|
|
|
|
|
} else {
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 16:50:23 +01:00
|
|
|
exception_handler:
|
2013-10-15 22:25:17 +01:00
|
|
|
// exception occurred
|
|
|
|
|
2014-12-29 00:29:59 +00:00
|
|
|
#if SELECTIVE_EXC_IP
|
|
|
|
// with selective ip, we store the ip 1 byte past the opcode, so move ptr back
|
|
|
|
code_state->ip -= 1;
|
|
|
|
#endif
|
|
|
|
|
2014-03-26 18:37:06 +00:00
|
|
|
// check if it's a StopIteration within a for block
|
py, vm: Replace save_ip, save_sp with code_state->{ip, sp}.
This may seem a bit of a risky change, in that it may introduce crazy
bugs with respect to volatile variables in the VM loop. But, I think it
should be fine: code_state points to some external memory, so the
compiler should always read/write to that memory when accessing the
ip/sp variables (ie not put them in registers).
Anyway, it passes all tests and improves on all efficiency fronts: about
2-4% faster (64-bit unix), 16 bytes less stack space per call (64-bit
unix) and slightly less executable size (unix and stmhal).
The reason it's more efficient is save_ip and save_sp were volatile
variables, so were anyway stored on the stack (in memory, not regs).
Thus converting them to code_state->{ip, sp} doesn't cost an extra
memory dereference (except maybe to get code_state, but that can be put
in a register and then made more efficient for other uses of it).
2014-06-01 12:32:28 +01:00
|
|
|
if (*code_state->ip == MP_BC_FOR_ITER && mp_obj_is_subclass_fast(mp_obj_get_type(nlr.ret_val), &mp_type_StopIteration)) {
|
|
|
|
const byte *ip = code_state->ip + 1;
|
2014-03-26 18:37:06 +00:00
|
|
|
DECODE_ULABEL; // the jump offset if iteration finishes; for labels are always forward
|
2014-12-02 19:25:10 +00:00
|
|
|
code_state->ip = ip + ulab; // jump to after for-block
|
py, vm: Replace save_ip, save_sp with code_state->{ip, sp}.
This may seem a bit of a risky change, in that it may introduce crazy
bugs with respect to volatile variables in the VM loop. But, I think it
should be fine: code_state points to some external memory, so the
compiler should always read/write to that memory when accessing the
ip/sp variables (ie not put them in registers).
Anyway, it passes all tests and improves on all efficiency fronts: about
2-4% faster (64-bit unix), 16 bytes less stack space per call (64-bit
unix) and slightly less executable size (unix and stmhal).
The reason it's more efficient is save_ip and save_sp were volatile
variables, so were anyway stored on the stack (in memory, not regs).
Thus converting them to code_state->{ip, sp} doesn't cost an extra
memory dereference (except maybe to get code_state, but that can be put
in a register and then made more efficient for other uses of it).
2014-06-01 12:32:28 +01:00
|
|
|
code_state->sp -= 1; // pop the exhausted iterator
|
2014-03-26 18:37:06 +00:00
|
|
|
goto outer_dispatch_loop; // continue with dispatch loop
|
|
|
|
}
|
|
|
|
|
2014-01-18 23:24:36 +00:00
|
|
|
// set file and line number that the exception occurred at
|
2014-01-30 11:49:18 +00:00
|
|
|
// TODO: don't set traceback for exceptions re-raised by END_FINALLY.
|
|
|
|
// But consider how to handle nested exceptions.
|
2014-04-04 11:52:59 +01:00
|
|
|
// TODO need a better way of not adding traceback to constant objects (right now, just GeneratorExit_obj and MemoryError_obj)
|
|
|
|
if (mp_obj_is_exception_instance(nlr.ret_val) && nlr.ret_val != &mp_const_GeneratorExit_obj && nlr.ret_val != &mp_const_MemoryError_obj) {
|
2014-09-04 14:44:01 +01:00
|
|
|
const byte *ip = code_state->code_info;
|
|
|
|
mp_uint_t code_info_size = mp_decode_uint(&ip);
|
|
|
|
qstr block_name = mp_decode_uint(&ip);
|
|
|
|
qstr source_file = mp_decode_uint(&ip);
|
|
|
|
mp_uint_t bc = code_state->ip - code_state->code_info - code_info_size;
|
2014-08-26 23:35:57 +01:00
|
|
|
mp_uint_t source_line = 1;
|
|
|
|
mp_uint_t c;
|
2014-09-04 14:44:01 +01:00
|
|
|
while ((c = *ip)) {
|
2014-08-26 23:35:57 +01:00
|
|
|
mp_uint_t b, l;
|
|
|
|
if ((c & 0x80) == 0) {
|
|
|
|
// 0b0LLBBBBB encoding
|
|
|
|
b = c & 0x1f;
|
|
|
|
l = c >> 5;
|
2014-09-04 14:44:01 +01:00
|
|
|
ip += 1;
|
2014-08-26 23:35:57 +01:00
|
|
|
} else {
|
|
|
|
// 0b1LLLBBBB 0bLLLLLLLL encoding (l's LSB in second byte)
|
|
|
|
b = c & 0xf;
|
2014-09-04 14:44:01 +01:00
|
|
|
l = ((c << 4) & 0x700) | ip[1];
|
|
|
|
ip += 2;
|
2014-08-26 23:35:57 +01:00
|
|
|
}
|
|
|
|
if (bc >= b) {
|
|
|
|
bc -= b;
|
|
|
|
source_line += l;
|
|
|
|
} else {
|
|
|
|
// found source line corresponding to bytecode offset
|
|
|
|
break;
|
2014-06-02 16:24:34 +01:00
|
|
|
}
|
2014-01-18 23:24:36 +00:00
|
|
|
}
|
2014-01-19 12:38:49 +00:00
|
|
|
mp_obj_exception_add_traceback(nlr.ret_val, source_file, source_line, block_name);
|
2014-01-18 23:24:36 +00:00
|
|
|
}
|
|
|
|
|
2013-12-29 16:54:59 +00:00
|
|
|
while (currently_in_except_block) {
|
|
|
|
// nested exception
|
|
|
|
|
2014-03-22 11:49:31 +00:00
|
|
|
assert(exc_sp >= exc_stack);
|
2013-12-29 16:54:59 +00:00
|
|
|
|
|
|
|
// TODO make a proper message for nested exception
|
|
|
|
// at the moment we are just raising the very last exception (the one that caused the nested exception)
|
|
|
|
|
|
|
|
// move up to previous exception handler
|
2014-03-29 21:16:27 +00:00
|
|
|
POP_EXC_BLOCK();
|
2013-12-29 16:54:59 +00:00
|
|
|
}
|
|
|
|
|
2014-03-22 11:49:31 +00:00
|
|
|
if (exc_sp >= exc_stack) {
|
2013-12-29 16:54:59 +00:00
|
|
|
// set flag to indicate that we are now handling an exception
|
|
|
|
currently_in_except_block = 1;
|
|
|
|
|
2013-10-15 22:25:17 +01:00
|
|
|
// catch exception and pass to byte code
|
2014-05-31 14:50:46 +01:00
|
|
|
code_state->ip = exc_sp->handler;
|
py: Tidy up variables in VM, probably fixes subtle bugs.
Things get tricky when using the nlr code to catch exceptions. Need to
ensure that the variables (stack layout) in the exception handler are
the same as in the bit protected by the exception handler.
Prior to this patch there were a few bugs. 1) The constant
mp_const_MemoryError_obj was being preloaded to a specific location on
the stack at the start of the function. But this location on the stack
was being overwritten in the opcode loop (since it didn't think that
variable would ever be referenced again), and so when an exception
occurred, the variable holding the address of MemoryError was corrupt.
2) The FOR_ITER opcode detection in the exception handler used sp, which
may or may not contain the right value coming out of the main opcode
loop.
With this patch there is a clear separation of variables used in the
opcode loop and in the exception handler (should fix issue (2) above).
Furthermore, nlr_raise is no longer used in the opcode loop. Instead,
it jumps directly into the exception handler. This tells the C compiler
more about the possible code flow, and means that it should have the
same stack layout for the exception handler. This should fix issue (1)
above. Indeed, the generated (ARM) assembler has been checked explicitly,
and with 'goto exception_handler', the problem with &MemoryError is
fixed.
This may now fix problems with rge-sm, and probably many other subtle
bugs yet to show themselves. Incidentally, rge-sm now passes on
pyboard (with a reduced range of integration)!
Main lesson: nlr is tricky. Don't use nlr_push unless you know what you
are doing! Luckily, it's not used in many places. Using nlr_raise/jump
is fine.
2014-04-17 16:50:23 +01:00
|
|
|
mp_obj_t *sp = MP_TAGPTR_PTR(exc_sp->val_sp);
|
2014-03-30 00:54:48 +00:00
|
|
|
// save this exception in the stack so it can be used in a reraise, if needed
|
|
|
|
exc_sp->prev_exc = nlr.ret_val;
|
2013-10-15 23:46:01 +01:00
|
|
|
// push(traceback, exc-val, exc-type)
|
2013-12-21 18:17:45 +00:00
|
|
|
PUSH(mp_const_none);
|
2013-10-15 23:46:01 +01:00
|
|
|
PUSH(nlr.ret_val);
|
2014-03-29 00:52:17 +00:00
|
|
|
PUSH(mp_obj_get_type(nlr.ret_val));
|
2014-05-31 14:50:46 +01:00
|
|
|
code_state->sp = sp;
|
2013-12-29 16:54:59 +00:00
|
|
|
|
2013-10-15 22:25:17 +01:00
|
|
|
} else {
|
2014-02-15 22:55:00 +00:00
|
|
|
// propagate exception to higher level
|
|
|
|
// TODO what to do about ip and sp? they don't really make sense at this point
|
|
|
|
fastn[0] = nlr.ret_val; // must put exception here because sp is invalid
|
|
|
|
return MP_VM_RETURN_EXCEPTION;
|
2013-10-15 22:25:17 +01:00
|
|
|
}
|
2013-10-04 19:53:11 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|