2014-05-03 23:27:38 +01:00
|
|
|
/*
|
2017-06-30 08:22:17 +01:00
|
|
|
* This file is part of the MicroPython project, http://micropython.org/
|
2014-05-03 23:27:38 +01:00
|
|
|
*
|
|
|
|
* The MIT License (MIT)
|
|
|
|
*
|
|
|
|
* Copyright (c) 2013, 2014 Damien P. George
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
all: Unify header guard usage.
The code conventions suggest using header guards, but do not define how
those should look like and instead point to existing files. However, not
all existing files follow the same scheme, sometimes omitting header guards
altogether, sometimes using non-standard names, making it easy to
accidentally pick a "wrong" example.
This commit ensures that all header files of the MicroPython project (that
were not simply copied from somewhere else) follow the same pattern, that
was already present in the majority of files, especially in the py folder.
The rules are as follows.
Naming convention:
* start with the words MICROPY_INCLUDED
* contain the full path to the file
* replace special characters with _
In addition, there are no empty lines before #ifndef, between #ifndef and
one empty line before #endif. #endif is followed by a comment containing
the name of the guard macro.
py/grammar.h cannot use header guards by design, since it has to be
included multiple times in a single C file. Several other files also do not
need header guards as they are only used internally and guaranteed to be
included only once:
* MICROPY_MPHALPORT_H
* mpconfigboard.h
* mpconfigport.h
* mpthreadport.h
* pin_defs_*.h
* qstrdefs*.h
2017-06-29 22:14:58 +01:00
|
|
|
#ifndef MICROPY_INCLUDED_PY_ASMX64_H
|
|
|
|
#define MICROPY_INCLUDED_PY_ASMX64_H
|
2015-01-01 18:07:43 +00:00
|
|
|
|
|
|
|
#include "py/mpconfig.h"
|
|
|
|
#include "py/misc.h"
|
2016-11-27 22:24:50 +00:00
|
|
|
#include "py/asmbase.h"
|
2014-05-03 23:27:38 +01:00
|
|
|
|
2014-09-07 01:06:19 +01:00
|
|
|
// AMD64 calling convention is:
|
|
|
|
// - args pass in: RDI, RSI, RDX, RCX, R08, R09
|
|
|
|
// - return value in RAX
|
|
|
|
// - stack must be aligned on a 16-byte boundary before all calls
|
|
|
|
// - RAX, RCX, RDX, RSI, RDI, R08, R09, R10, R11 are caller-save
|
|
|
|
// - RBX, RBP, R12, R13, R14, R15 are callee-save
|
|
|
|
|
2014-09-29 18:45:42 +01:00
|
|
|
// In the functions below, argument order follows x86 docs and generally
|
|
|
|
// the destination is the first argument.
|
|
|
|
// NOTE: this is a change from the old convention used in this file and
|
|
|
|
// some functions still use the old (reverse) convention.
|
|
|
|
|
2014-09-29 16:25:04 +01:00
|
|
|
#define ASM_X64_REG_RAX (0)
|
|
|
|
#define ASM_X64_REG_RCX (1)
|
|
|
|
#define ASM_X64_REG_RDX (2)
|
|
|
|
#define ASM_X64_REG_RBX (3)
|
|
|
|
#define ASM_X64_REG_RSP (4)
|
|
|
|
#define ASM_X64_REG_RBP (5)
|
|
|
|
#define ASM_X64_REG_RSI (6)
|
|
|
|
#define ASM_X64_REG_RDI (7)
|
|
|
|
#define ASM_X64_REG_R08 (8)
|
|
|
|
#define ASM_X64_REG_R09 (9)
|
|
|
|
#define ASM_X64_REG_R10 (10)
|
|
|
|
#define ASM_X64_REG_R11 (11)
|
|
|
|
#define ASM_X64_REG_R12 (12)
|
|
|
|
#define ASM_X64_REG_R13 (13)
|
|
|
|
#define ASM_X64_REG_R14 (14)
|
|
|
|
#define ASM_X64_REG_R15 (15)
|
2013-10-04 19:53:11 +01:00
|
|
|
|
2013-12-30 19:03:41 +00:00
|
|
|
// condition codes, used for jcc and setcc (despite their j-name!)
|
2014-09-06 23:06:36 +01:00
|
|
|
#define ASM_X64_CC_JB (0x2) // below, unsigned
|
2020-06-26 09:26:01 +01:00
|
|
|
#define ASM_X64_CC_JAE (0x3) // above or equal, unsigned
|
2014-09-06 23:06:36 +01:00
|
|
|
#define ASM_X64_CC_JZ (0x4)
|
|
|
|
#define ASM_X64_CC_JE (0x4)
|
|
|
|
#define ASM_X64_CC_JNZ (0x5)
|
|
|
|
#define ASM_X64_CC_JNE (0x5)
|
2020-06-26 09:26:01 +01:00
|
|
|
#define ASM_X64_CC_JBE (0x6) // below or equal, unsigned
|
|
|
|
#define ASM_X64_CC_JA (0x7) // above, unsigned
|
2014-09-06 23:06:36 +01:00
|
|
|
#define ASM_X64_CC_JL (0xc) // less, signed
|
2014-09-29 18:45:42 +01:00
|
|
|
#define ASM_X64_CC_JGE (0xd) // greater or equal, signed
|
|
|
|
#define ASM_X64_CC_JLE (0xe) // less or equal, signed
|
2014-09-29 10:13:49 +01:00
|
|
|
#define ASM_X64_CC_JG (0xf) // greater, signed
|
2013-10-04 19:53:11 +01:00
|
|
|
|
2016-11-27 22:24:50 +00:00
|
|
|
typedef struct _asm_x64_t {
|
|
|
|
mp_asm_base_t base;
|
|
|
|
int num_locals;
|
|
|
|
} asm_x64_t;
|
2013-10-04 19:53:11 +01:00
|
|
|
|
2016-11-27 22:24:50 +00:00
|
|
|
static inline void asm_x64_end_pass(asm_x64_t *as) {
|
|
|
|
(void)as;
|
|
|
|
}
|
2015-04-06 22:38:53 +01:00
|
|
|
|
2013-10-04 19:53:11 +01:00
|
|
|
void asm_x64_nop(asm_x64_t *as);
|
|
|
|
void asm_x64_push_r64(asm_x64_t *as, int src_r64);
|
|
|
|
void asm_x64_pop_r64(asm_x64_t *as, int dest_r64);
|
2014-09-29 18:45:42 +01:00
|
|
|
void asm_x64_mov_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
|
py: Add support to save native, viper and asm code to .mpy files.
This commit adds support for saving and loading .mpy files that contain
native code (native, viper and inline-asm). A lot of the ground work was
already done for this in the form of removing pointers from generated
native code. The changes here are mainly to link in qstr values to the
native code, and change the format of .mpy files to contain native code
blocks (possibly mixed with bytecode).
A top-level summary:
- @micropython.native, @micropython.viper and @micropython.asm_thumb/
asm_xtensa are now allowed in .py files when compiling to .mpy, and they
work transparently to the user.
- Entire .py files can be compiled to native via mpy-cross -X emit=native
and for the most part the generated .mpy files should work the same as
their bytecode version.
- The .mpy file format is changed to 1) specify in the header if the file
contains native code and if so the architecture (eg x86, ARMV7M, Xtensa);
2) for each function block the kind of code is specified (bytecode,
native, viper, asm).
- When native code is loaded from a .mpy file the native code must be
modified (in place) to link qstr values in, just like bytecode (see
py/persistentcode.c:arch_link_qstr() function).
In addition, this now defines a public, native ABI for dynamically loadable
native code generated by other languages, like C.
2019-02-21 04:18:33 +00:00
|
|
|
size_t asm_x64_mov_i32_to_r64(asm_x64_t *as, int src_i32, int dest_r64);
|
2013-10-04 19:53:11 +01:00
|
|
|
void asm_x64_mov_i64_to_r64(asm_x64_t *as, int64_t src_i64, int dest_r64);
|
|
|
|
void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r64);
|
2014-10-12 16:59:29 +01:00
|
|
|
void asm_x64_mov_r8_to_mem8(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
|
|
|
|
void asm_x64_mov_r16_to_mem16(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
|
2015-10-13 00:50:17 +01:00
|
|
|
void asm_x64_mov_r32_to_mem32(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
|
2014-10-12 16:59:29 +01:00
|
|
|
void asm_x64_mov_r64_to_mem64(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
|
|
|
|
void asm_x64_mov_mem8_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64);
|
|
|
|
void asm_x64_mov_mem16_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64);
|
2015-10-13 00:50:17 +01:00
|
|
|
void asm_x64_mov_mem32_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64);
|
2014-10-12 16:59:29 +01:00
|
|
|
void asm_x64_mov_mem64_to_r64(asm_x64_t *as, int src_r64, int src_disp, int dest_r64);
|
2014-10-12 14:21:06 +01:00
|
|
|
void asm_x64_and_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
|
|
|
|
void asm_x64_or_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
|
2014-09-29 18:45:42 +01:00
|
|
|
void asm_x64_xor_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
|
|
|
|
void asm_x64_shl_r64_cl(asm_x64_t *as, int dest_r64);
|
2020-06-25 04:09:07 +01:00
|
|
|
void asm_x64_shr_r64_cl(asm_x64_t *as, int dest_r64);
|
2014-09-29 18:45:42 +01:00
|
|
|
void asm_x64_sar_r64_cl(asm_x64_t *as, int dest_r64);
|
|
|
|
void asm_x64_add_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
|
|
|
|
void asm_x64_sub_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
|
2015-06-04 15:00:29 +01:00
|
|
|
void asm_x64_mul_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
|
2013-10-04 19:53:11 +01:00
|
|
|
void asm_x64_cmp_r64_with_r64(asm_x64_t *as, int src_r64_a, int src_r64_b);
|
|
|
|
void asm_x64_test_r8_with_r8(asm_x64_t *as, int src_r64_a, int src_r64_b);
|
2018-08-04 13:03:49 +01:00
|
|
|
void asm_x64_test_r64_with_r64(asm_x64_t *as, int src_r64_a, int src_r64_b);
|
2013-10-04 19:53:11 +01:00
|
|
|
void asm_x64_setcc_r8(asm_x64_t *as, int jcc_type, int dest_r8);
|
2018-08-16 04:45:24 +01:00
|
|
|
void asm_x64_jmp_reg(asm_x64_t *as, int src_r64);
|
2015-01-22 14:08:58 +00:00
|
|
|
void asm_x64_jmp_label(asm_x64_t *as, mp_uint_t label);
|
|
|
|
void asm_x64_jcc_label(asm_x64_t *as, int jcc_type, mp_uint_t label);
|
2013-10-04 19:53:11 +01:00
|
|
|
void asm_x64_entry(asm_x64_t *as, int num_locals);
|
|
|
|
void asm_x64_exit(asm_x64_t *as);
|
|
|
|
void asm_x64_mov_local_to_r64(asm_x64_t *as, int src_local_num, int dest_r64);
|
|
|
|
void asm_x64_mov_r64_to_local(asm_x64_t *as, int src_r64, int dest_local_num);
|
|
|
|
void asm_x64_mov_local_addr_to_r64(asm_x64_t *as, int local_num, int dest_r64);
|
2018-08-16 04:45:24 +01:00
|
|
|
void asm_x64_mov_reg_pcrel(asm_x64_t *as, int dest_r64, mp_uint_t label);
|
2018-10-13 02:57:32 +01:00
|
|
|
void asm_x64_call_ind(asm_x64_t *as, size_t fun_id, int temp_r32);
|
2015-01-01 18:07:43 +00:00
|
|
|
|
2018-10-13 04:53:35 +01:00
|
|
|
// Holds a pointer to mp_fun_table
|
|
|
|
#define ASM_X64_REG_FUN_TABLE ASM_X64_REG_RBP
|
|
|
|
|
2016-12-08 06:47:17 +00:00
|
|
|
#if GENERIC_ASM_API
|
|
|
|
|
|
|
|
// The following macros provide a (mostly) arch-independent API to
|
|
|
|
// generate native code, and are used by the native emitter.
|
|
|
|
|
|
|
|
#define ASM_WORD_SIZE (8)
|
|
|
|
|
|
|
|
#define REG_RET ASM_X64_REG_RAX
|
|
|
|
#define REG_ARG_1 ASM_X64_REG_RDI
|
|
|
|
#define REG_ARG_2 ASM_X64_REG_RSI
|
|
|
|
#define REG_ARG_3 ASM_X64_REG_RDX
|
|
|
|
#define REG_ARG_4 ASM_X64_REG_RCX
|
|
|
|
#define REG_ARG_5 ASM_X64_REG_R08
|
|
|
|
|
|
|
|
// caller-save
|
|
|
|
#define REG_TEMP0 ASM_X64_REG_RAX
|
|
|
|
#define REG_TEMP1 ASM_X64_REG_RDI
|
|
|
|
#define REG_TEMP2 ASM_X64_REG_RSI
|
|
|
|
|
|
|
|
// callee-save
|
|
|
|
#define REG_LOCAL_1 ASM_X64_REG_RBX
|
|
|
|
#define REG_LOCAL_2 ASM_X64_REG_R12
|
|
|
|
#define REG_LOCAL_3 ASM_X64_REG_R13
|
|
|
|
#define REG_LOCAL_NUM (3)
|
|
|
|
|
2018-10-13 04:53:35 +01:00
|
|
|
// Holds a pointer to mp_fun_table
|
|
|
|
#define REG_FUN_TABLE ASM_X64_REG_FUN_TABLE
|
|
|
|
|
2016-12-08 06:47:17 +00:00
|
|
|
#define ASM_T asm_x64_t
|
|
|
|
#define ASM_END_PASS asm_x64_end_pass
|
|
|
|
#define ASM_ENTRY asm_x64_entry
|
|
|
|
#define ASM_EXIT asm_x64_exit
|
|
|
|
|
|
|
|
#define ASM_JUMP asm_x64_jmp_label
|
2018-08-04 13:03:49 +01:00
|
|
|
#define ASM_JUMP_IF_REG_ZERO(as, reg, label, bool_test) \
|
2016-12-08 06:47:17 +00:00
|
|
|
do { \
|
2018-08-04 13:03:49 +01:00
|
|
|
if (bool_test) { \
|
|
|
|
asm_x64_test_r8_with_r8((as), (reg), (reg)); \
|
|
|
|
} else { \
|
|
|
|
asm_x64_test_r64_with_r64((as), (reg), (reg)); \
|
|
|
|
} \
|
2016-12-08 06:47:17 +00:00
|
|
|
asm_x64_jcc_label(as, ASM_X64_CC_JZ, label); \
|
|
|
|
} while (0)
|
2018-08-04 13:03:49 +01:00
|
|
|
#define ASM_JUMP_IF_REG_NONZERO(as, reg, label, bool_test) \
|
2016-12-08 06:47:17 +00:00
|
|
|
do { \
|
2018-08-04 13:03:49 +01:00
|
|
|
if (bool_test) { \
|
|
|
|
asm_x64_test_r8_with_r8((as), (reg), (reg)); \
|
|
|
|
} else { \
|
|
|
|
asm_x64_test_r64_with_r64((as), (reg), (reg)); \
|
|
|
|
} \
|
2016-12-08 06:47:17 +00:00
|
|
|
asm_x64_jcc_label(as, ASM_X64_CC_JNZ, label); \
|
|
|
|
} while (0)
|
|
|
|
#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
|
|
|
|
do { \
|
|
|
|
asm_x64_cmp_r64_with_r64(as, reg1, reg2); \
|
|
|
|
asm_x64_jcc_label(as, ASM_X64_CC_JE, label); \
|
|
|
|
} while (0)
|
2018-08-16 04:45:24 +01:00
|
|
|
#define ASM_JUMP_REG(as, reg) asm_x64_jmp_reg((as), (reg))
|
2018-10-13 05:13:55 +01:00
|
|
|
#define ASM_CALL_IND(as, idx) asm_x64_call_ind(as, idx, ASM_X64_REG_RAX)
|
2016-12-08 06:47:17 +00:00
|
|
|
|
2017-11-15 00:46:49 +00:00
|
|
|
#define ASM_MOV_LOCAL_REG(as, local_num, reg_src) asm_x64_mov_r64_to_local((as), (reg_src), (local_num))
|
|
|
|
#define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_x64_mov_i64_to_r64_optimised((as), (imm), (reg_dest))
|
|
|
|
#define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_x64_mov_local_to_r64((as), (local_num), (reg_dest))
|
2016-12-08 06:47:17 +00:00
|
|
|
#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_x64_mov_r64_r64((as), (reg_dest), (reg_src))
|
2017-11-15 00:46:49 +00:00
|
|
|
#define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_x64_mov_local_addr_to_r64((as), (local_num), (reg_dest))
|
2018-08-16 04:45:24 +01:00
|
|
|
#define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_x64_mov_reg_pcrel((as), (reg_dest), (label))
|
2016-12-08 06:47:17 +00:00
|
|
|
|
|
|
|
#define ASM_LSL_REG(as, reg) asm_x64_shl_r64_cl((as), (reg))
|
2020-06-25 04:09:07 +01:00
|
|
|
#define ASM_LSR_REG(as, reg) asm_x64_shr_r64_cl((as), (reg))
|
2016-12-08 06:47:17 +00:00
|
|
|
#define ASM_ASR_REG(as, reg) asm_x64_sar_r64_cl((as), (reg))
|
|
|
|
#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x64_or_r64_r64((as), (reg_dest), (reg_src))
|
|
|
|
#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x64_xor_r64_r64((as), (reg_dest), (reg_src))
|
|
|
|
#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x64_and_r64_r64((as), (reg_dest), (reg_src))
|
|
|
|
#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x64_add_r64_r64((as), (reg_dest), (reg_src))
|
|
|
|
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x64_sub_r64_r64((as), (reg_dest), (reg_src))
|
|
|
|
#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_x64_mul_r64_r64((as), (reg_dest), (reg_src))
|
|
|
|
|
|
|
|
#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem64_to_r64((as), (reg_base), 0, (reg_dest))
|
|
|
|
#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x64_mov_mem64_to_r64((as), (reg_base), 8 * (word_offset), (reg_dest))
|
|
|
|
#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem8_to_r64zx((as), (reg_base), 0, (reg_dest))
|
|
|
|
#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem16_to_r64zx((as), (reg_base), 0, (reg_dest))
|
2022-05-20 05:30:30 +01:00
|
|
|
#define ASM_LOAD16_REG_REG_OFFSET(as, reg_dest, reg_base, uint16_offset) asm_x64_mov_mem16_to_r64zx((as), (reg_base), 2 * (uint16_offset), (reg_dest))
|
2016-12-08 06:47:17 +00:00
|
|
|
#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem32_to_r64zx((as), (reg_base), 0, (reg_dest))
|
|
|
|
|
|
|
|
#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_x64_mov_r64_to_mem64((as), (reg_src), (reg_base), 0)
|
|
|
|
#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_x64_mov_r64_to_mem64((as), (reg_src), (reg_base), 8 * (word_offset))
|
|
|
|
#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_x64_mov_r8_to_mem8((as), (reg_src), (reg_base), 0)
|
|
|
|
#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_x64_mov_r16_to_mem16((as), (reg_src), (reg_base), 0)
|
|
|
|
#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_x64_mov_r32_to_mem32((as), (reg_src), (reg_base), 0)
|
|
|
|
|
|
|
|
#endif // GENERIC_ASM_API
|
|
|
|
|
all: Unify header guard usage.
The code conventions suggest using header guards, but do not define how
those should look like and instead point to existing files. However, not
all existing files follow the same scheme, sometimes omitting header guards
altogether, sometimes using non-standard names, making it easy to
accidentally pick a "wrong" example.
This commit ensures that all header files of the MicroPython project (that
were not simply copied from somewhere else) follow the same pattern, that
was already present in the majority of files, especially in the py folder.
The rules are as follows.
Naming convention:
* start with the words MICROPY_INCLUDED
* contain the full path to the file
* replace special characters with _
In addition, there are no empty lines before #ifndef, between #ifndef and
one empty line before #endif. #endif is followed by a comment containing
the name of the guard macro.
py/grammar.h cannot use header guards by design, since it has to be
included multiple times in a single C file. Several other files also do not
need header guards as they are only used internally and guaranteed to be
included only once:
* MICROPY_MPHALPORT_H
* mpconfigboard.h
* mpconfigport.h
* mpthreadport.h
* pin_defs_*.h
* qstrdefs*.h
2017-06-29 22:14:58 +01:00
|
|
|
#endif // MICROPY_INCLUDED_PY_ASMX64_H
|