diff --git a/.gitignore b/.gitignore index 7a55dead..50d2c52a 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,4 @@ cmake-build-*/ prefix/ CMakeLists.txt.user CMakeUserPresets.json -maat_state_* +maat_state_* \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index c7be0169..b16161ea 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -19,6 +19,7 @@ include(cmake/variables.cmake) add_library(maat_maat src/arch/arch_EVM.cpp + src/arch/arch_PPC64.cpp src/arch/arch_X86.cpp src/arch/lifter.cpp src/arch/register_aliases.cpp @@ -180,15 +181,31 @@ macro(maat_sleigh_compile ARCH_DIR ARCH) configure_file("${spec_dir}/${ARCH_DIR}/data/languages/${ARCH}.pspec" "${spec_out_dir}/${ARCH}.pspec") endmacro() +macro(maat_sleigh_compile_files ARCH_DIR ARCH SLASPEC PSPEC) + # ARCH_DIR is the directory that appears in Ghidra's source code hierarchy + # ARCH appears in the name of the '.slaspec' and '.pspec' file (they should be the same) + # Creates a target maat_sleigh_spec_${ARCH} + sleigh_compile( + TARGET maat_sleigh_spec_${ARCH} + COMPILER "${maat_SLEIGH_COMPILER}" + SLASPEC "${spec_dir}/${ARCH_DIR}/data/languages/${SLASPEC}.slaspec" + LOG_FILE "${PROJECT_BINARY_DIR}/sleigh-log/${ARCH}.log" + OUT_FILE "${spec_out_dir}/${SLASPEC}.sla" + ) + configure_file("${spec_dir}/${ARCH_DIR}/data/languages/${PSPEC}.pspec" "${spec_out_dir}/${PSPEC}.pspec") +endmacro() + maat_sleigh_compile(x86 x86-64) maat_sleigh_compile(x86 x86) maat_sleigh_compile(EVM EVM) +maat_sleigh_compile_files(PowerPC PowerPC ppc_64_isa_be ppc_64) # All of the sla spec targets are combined into this one add_custom_target(maat_all_sla_specs DEPENDS maat_sleigh_spec_x86-64 maat_sleigh_spec_x86 maat_sleigh_spec_EVM + maat_sleigh_spec_PowerPC ) # Add sla specs as dependencies to our targets diff --git a/bindings/python/py_arch.cpp b/bindings/python/py_arch.cpp index 6961fe14..9c7049e3 100644 --- a/bindings/python/py_arch.cpp +++ b/bindings/python/py_arch.cpp @@ -11,6 +11,7 @@ void init_arch(PyObject* module) PyDict_SetItemString(arch_enum, "X86", PyLong_FromLong((int)Arch::Type::X86)); PyDict_SetItemString(arch_enum, "X64", PyLong_FromLong((int)Arch::Type::X64)); PyDict_SetItemString(arch_enum, "EVM", PyLong_FromLong((int)Arch::Type::EVM)); + PyDict_SetItemString(arch_enum, "PPC64", PyLong_FromLong((int)Arch::Type::PPC64)); PyObject* arch_class = create_class(PyUnicode_FromString("ARCH"), PyTuple_New(0), arch_enum); PyModule_AddObject(module, "ARCH", arch_class); diff --git a/src/arch/arch_PPC64.cpp b/src/arch/arch_PPC64.cpp new file mode 100644 index 00000000..ec2ec18a --- /dev/null +++ b/src/arch/arch_PPC64.cpp @@ -0,0 +1,229 @@ +/* +Commonwealth of Australia represented by the Department of Defence + +Produced by Nathan Do, Student Intern at DSTG (Defence Science and Technology Group) +*/ + +#include "maat/arch.hpp" +#include "maat/exception.hpp" +#include "maat/cpu.hpp" + +namespace maat +{ + +namespace PPC64 +{ + ArchPPC64::ArchPPC64(): Arch(Arch::Type::PPC64, 64, PPC64::NB_REGS) + { + available_modes = {CPUMode::PPC64}; + reg_map = + { + {"r0", R0}, + {"r1", R1}, + {"r2", R2}, + {"r3", R3}, + {"r4", R4}, + {"r5", R5}, + {"r6", R6}, + {"r7", R7}, + {"r8", R8}, + {"r9", R9}, + {"r10", R10}, + {"r11", R11}, + {"r12", R12}, + {"r13", R13}, + {"r14", R14}, + {"r15", R15}, + {"r16", R16}, + {"r17", R17}, + {"r18", R18}, + {"r19", R19}, + {"r20", R20}, + {"r21", R21}, + {"r22", R22}, + {"r23", R23}, + {"r24", R24}, + {"r25", R25}, + {"r26", R26}, + {"r27", R27}, + {"r28", R28}, + {"r29", R29}, + {"r30", R30}, + {"r31", R31}, + {"f0", F0}, + {"f1", F1}, + {"f2", F2}, + {"f3", F3}, + {"f4", F4}, + {"f5", F5}, + {"f6", F6}, + {"f7", F7}, + {"f8", F8}, + {"f9", F9}, + {"f10", F10}, + {"f11", F11}, + {"f12", F12}, + {"f13", F13}, + {"f14", F14}, + {"f15", F15}, + {"f16", F16}, + {"f17", F17}, + {"f18", F18}, + {"f19", F19}, + {"f20", F20}, + {"f21", F21}, + {"f22", F22}, + {"f23", F23}, + {"f24", F24}, + {"f25", F25}, + {"f26", F26}, + {"f27", F27}, + {"f28", F28}, + {"f29", F29}, + {"f30", F30}, + {"f31", F31}, + {"pc", PC}, + {"sp", SP}, + {"cr", CR}, + {"lr", LR}, + {"ctr", CTR}, + {"xer", XER}, + {"cr0", CR0}, + {"cr1", CR1}, + {"cr2", CR2}, + {"cr3", CR3}, + {"cr4", CR4}, + {"cr5", CR5}, + {"cr6", CR6}, + {"cr7", CR7}, + {"xer_so", XER_SO}, + {"xer_ov", XER_OV}, + {"xer_ca", XER_CA}, + {"tbl", TBL}, + {"tbu", TBU}, + {"fpscr", FPSCR}, + {"msr", MSR}, + {"pvr", PVR}, + {"r2save", R2SAVE}, + {"reserve", RESERVE} + }; + } + + size_t ArchPPC64::reg_size(reg_t reg_num) const + { + switch (reg_num) + { + case R0: + case R1: + case R2: + case R3: + case R4: + case R5: + case R6: + case R7: + case R8: + case R9: + case R10: + case R11: + case R12: + case R13: + case R14: + case R15: + case R16: + case R17: + case R18: + case R19: + case R20: + case R21: + case R22: + case R23: + case R24: + case R25: + case R26: + case R27: + case R28: + case R29: + case R30: + case R31: + case F0: + case F1: + case F2: + case F3: + case F4: + case F5: + case F6: + case F7: + case F8: + case F9: + case F10: + case F11: + case F12: + case F13: + case F14: + case F15: + case F16: + case F17: + case F18: + case F19: + case F20: + case F21: + case F22: + case F23: + case F24: + case F25: + case F26: + case F27: + case F28: + case F29: + case F30: + case F31: + return 64; + case CR: + case XER: + return 32; + case PC: + case LR: + case MSR: + case CTR: + case R2SAVE: + return 64; + case CR0: + case CR1: + case CR2: + case CR3: + case CR4: + case CR5: + case CR6: + case CR7: + case XER_SO: + case XER_OV: + case XER_CA: + return 8; + case TBL: + case TBU: + case FPSCR: + case PVR: + case RESERVE: + return 32; + default: + throw runtime_exception("ArchPPC64::reg_size(): got unsupported reg_num"); + } + } + + reg_t ArchPPC64::sp() const + { + return PPC64::R1; + } + + reg_t ArchPPC64::pc() const + { + return PPC64::PC; + } + + reg_t ArchPPC64::tsc() const + { + throw runtime_exception("ArchPPC64::tsc(): method not available"); + } + +} // namespace PPC64 +} // namespace maat \ No newline at end of file diff --git a/src/arch/lifter.cpp b/src/arch/lifter.cpp index 2cb4cebe..5cdfe5aa 100644 --- a/src/arch/lifter.cpp +++ b/src/arch/lifter.cpp @@ -38,6 +38,12 @@ Lifter::Lifter(CPUMode m): mode(m) pspecfile = config.find_sleigh_file("EVM.pspec"); arch = Arch::Type::EVM; } + else if (mode == CPUMode::PPC64) + { + slafile = config.find_sleigh_file("ppc_64_isa_be.sla"); + pspecfile = config.find_sleigh_file("ppc_64.pspec"); + arch = Arch::Type::PPC64; + } else { throw lifter_exception("Lifter: this CPU mode is not supported"); diff --git a/src/arch/register_aliases.cpp b/src/arch/register_aliases.cpp index 607eb608..e56f4c79 100644 --- a/src/arch/register_aliases.cpp +++ b/src/arch/register_aliases.cpp @@ -136,6 +136,59 @@ Value x64_alias_getter(CPUContext& ctx, ir::reg_t reg) std::set x64_aliases{X64::RFLAGS}; +void PPC64_alias_setter(CPUContext& ctx, ir::reg_t reg, const Value& val) +{ + if (reg == PPC64::CR) + { + _set_flag_from_bit(ctx, PPC64::CR7, val, 0, 4); + _set_flag_from_bit(ctx, PPC64::CR6, val, 4, 4); + _set_flag_from_bit(ctx, PPC64::CR5, val, 8, 4); + _set_flag_from_bit(ctx, PPC64::CR4, val, 12, 4); + _set_flag_from_bit(ctx, PPC64::CR3, val, 16, 4); + _set_flag_from_bit(ctx, PPC64::CR2, val, 20, 4); + _set_flag_from_bit(ctx, PPC64::CR1, val, 24, 4); + _set_flag_from_bit(ctx, PPC64::CR0, val, 28, 4); + } + else if (reg == PPC64::XER) + { + _set_flag_from_bit(ctx, PPC64::XER_SO, val, 31); + _set_flag_from_bit(ctx, PPC64::XER_OV, val, 30); + _set_flag_from_bit(ctx, PPC64::XER_CA, val, 29); + } + else + throw runtime_exception("PPC64_alias_setter: got unsupported register"); +} + +Value PPC64_alias_getter(CPUContext& ctx, ir::reg_t reg) +{ + Value res; + + if (reg == PPC64::CR) + { + res = extract(ctx.get(PPC64::CR7),3,0); + res.set_concat(extract(ctx.get(PPC64::CR6),3,0), res); + res.set_concat(extract(ctx.get(PPC64::CR5),3,0), res); + res.set_concat(extract(ctx.get(PPC64::CR4),3,0), res); + res.set_concat(extract(ctx.get(PPC64::CR3),3,0), res); + res.set_concat(extract(ctx.get(PPC64::CR2),3,0), res); + res.set_concat(extract(ctx.get(PPC64::CR1),3,0), res); + res.set_concat(extract(ctx.get(PPC64::CR0),3,0), res); + } + else if (reg == PPC64::XER) + { + res = extract(Value(28, 0), 28, 0); + res.set_concat(extract(ctx.get(PPC64::XER_CA),0,0), res); + res.set_concat(extract(ctx.get(PPC64::XER_OV),0,0), res); + res.set_concat(extract(ctx.get(PPC64::XER_SO),0,0), res); + } + else + throw runtime_exception("PPC64_alias_getter: got unsupported register"); + + return res; +} + +std::set PPC64_aliases{PPC64::CR,PPC64::XER}; + void CPUContext::init_alias_getset(Arch::Type arch) { if (arch == Arch::Type::X86) @@ -150,7 +203,13 @@ void CPUContext::init_alias_getset(Arch::Type arch) alias_getter = x64_alias_getter; aliased_regs = x64_aliases; } + else if (arch == Arch::Type::PPC64) + { + alias_setter = PPC64_alias_setter; + alias_getter = PPC64_alias_getter; + aliased_regs = PPC64_aliases; + } } } // namespace ir -} // namespace maat \ No newline at end of file +} // namespace maat diff --git a/src/engine/callother.cpp b/src/engine/callother.cpp index f12b7b49..b4e7330a 100644 --- a/src/engine/callother.cpp +++ b/src/engine/callother.cpp @@ -25,6 +25,11 @@ Id mnemonic_to_id(const std::string& mnemonic, Arch::Type arch) if (mnemonic == "STACK_PUSH") return Id::EVM_STACK_PUSH; if (mnemonic == "STACK_POP") return Id::EVM_STACK_POP; break; + case Arch::Type::PPC64: + if (mnemonic == "cntlzw") return Id::PPC64_CNTLZW; + if (mnemonic == "cntlzw.") return Id::PPC64_CNTLZW; + if (mnemonic == "sc") return Id::PPC64_SC; + break; default: break; } @@ -1041,6 +1046,68 @@ void EVM_LOG_handler(MaatEngine& engine, const ir::Inst& inst, ir::ProcessedInst } } +// Function handles the countleadingzero (CNTLZW) instruction in PowerPC +void PPC64_CNTLZW_handler(MaatEngine& engine, const ir::Inst& inst, ir::ProcessedInst& pinst) +{ + Value program_counter = engine.cpu.ctx().get(engine.arch->pc()); + const Value& cnt = pinst.in1.value(); + + if (not cnt.is_concrete(*engine.vars)) + throw callother_exception("CNTLZW: got symbolic position"); + + ucst_t reg_value = pinst.in1.value().as_uint(); + uint32_t reg_value_word = (uint32_t)reg_value; // cast value to 32-bit value + + int count = 0; + while (reg_value_word != 0) + { + reg_value_word = reg_value_word >> 1; + count++; + } + + count = 32 - count; + pinst.res = Number(inst.out.size(), count); +} + +/* +System call for PowerPC. +The syscalls are untested and don't work +*/ +void PPC64_SC_handler(MaatEngine& engine, const ir::Inst& inst, ir::ProcessedInst& pinst) +{ + engine.log.warning("System Call is untested and might not work!!"); + // Get syscall number + const Value& sys_num = engine.cpu.ctx().get(PPC64::R0); + if (sys_num.is_symbolic(*engine.vars)) + { + throw callother_exception("SC 0x0: syscall number is symbolic!"); + } + // Get function to emulate syscall` + try + { + const env::Function& func = engine.env->get_syscall_func_by_num( + sys_num.as_uint(*engine.vars) + ); + + // Execute function callback + switch (func.callback().execute(engine, env::abi::PPC64_SC::instance())) + { + case env::Action::CONTINUE: + break; + case env::Action::ERROR: + throw callother_exception( + "SC 0x0: Emulation callback signaled an error, SC is untested and might not work!!" + ); + } + } + catch(const env_exception& e) + { + throw callother_exception( + Fmt() << "SC 0x0: " << e.what() >> Fmt::to_str + ); + } +} + /// Return the default handler map for CALLOTHER occurences HandlerMap default_handler_map() { @@ -1083,6 +1150,9 @@ HandlerMap default_handler_map() h.set_handler(Id::EVM_SELFDESTRUCT, EVM_SELFDESTRUCT_handler); h.set_handler(Id::EVM_LOG, EVM_LOG_handler); + h.set_handler(Id::PPC64_CNTLZW, PPC64_CNTLZW_handler); + h.set_handler(Id::PPC64_SC, PPC64_SC_handler); + return h; } diff --git a/src/engine/engine.cpp b/src/engine/engine.cpp index 36965036..cb855024 100644 --- a/src/engine/engine.cpp +++ b/src/engine/engine.cpp @@ -32,6 +32,12 @@ MaatEngine::MaatEngine(Arch::Type _arch, env::OS os): env(nullptr), _uid(++_uid_ env = std::make_shared(); endianness = Endian::BIG; break; + case Arch::Type::PPC64: + arch = std::make_shared(); + lifters[CPUMode::PPC64] = std::make_shared(CPUMode::PPC64); + _current_cpu_mode = CPUMode::PPC64; + endianness = Endian::BIG; + break; case Arch::Type::NONE: arch = std::make_shared(); _current_cpu_mode = CPUMode::NONE; diff --git a/src/env/abi.cpp b/src/env/abi.cpp index 142660d8..60c96354 100644 --- a/src/env/abi.cpp +++ b/src/env/abi.cpp @@ -402,6 +402,115 @@ void X86_LINUX_INT80::ret(MaatEngine& engine) const // Do nothing } +/*============ LINUX PowerPC ABI ============*/ +PPC64ABI::PPC64ABI(): ABI(Type::PPC64ABI) +{} + +ABI& PPC64ABI::instance() +{ + static PPC64ABI abi; + return abi; +} + +void PPC64ABI::get_args( + MaatEngine& engine, + const args_spec_t& args_spec, + std::vector& args +) const +{ + int i = 0; + for (auto arg : args_spec) + args.push_back(get_arg(engine, i++, arg)); +} + +Value PPC64ABI::get_arg(MaatEngine& engine, int n, size_t arg_size) const +{ + // Regs on the stack, pushed right to left + arg_size = ABI::real_arg_size(engine, arg_size); + Value res = engine.mem->read(engine.cpu.ctx().get(PPC64::R1).as_uint() + 4 + 4*n, 4); + return _adjust_value_to_size(res, arg_size, engine); +} + +void PPC64ABI::set_ret_value( + MaatEngine& engine, + const FunctionCallback::return_t& ret_val +) const +{ + // Return value in R3 + std::visit(maat::util::overloaded{ + [](std::monostate arg){return;}, // no return value + [&engine](auto arg){engine.cpu.ctx().set(PPC64::R3, arg);} + }, ret_val); +} + +void PPC64ABI::prepare_ret_address(MaatEngine& engine, addr_t ret_addr) const +{ + // Push the return address, simply + engine.cpu.ctx().set(PPC64::PC, engine.cpu.ctx().get(PPC64::PC).as_uint() - 4); + engine.mem->write(engine.cpu.ctx().get(PPC64::R1).as_uint(), ret_addr, 4); +} + +void PPC64ABI::ret(MaatEngine& engine) const +{ + // Pop R1 (pop from the stack) + engine.cpu.ctx().set(PPC64::PC, engine.mem->read((engine.cpu.ctx().get(PPC64::R1).as_uint()), 4)); + engine.cpu.ctx().set(PPC64::R1, engine.cpu.ctx().get(PPC64::R1).as_uint() + 4); +} + +/* ============== ABI PowerPC SYSCALL LINUX ==============*/ +PPC64_SC::PPC64_SC(): ABI(Type::PPC64_SC) +{} + +ABI& PPC64_SC::instance() +{ + static PPC64_SC abi; + return abi; +} + +void PPC64_SC::get_args( + MaatEngine& engine, + const args_spec_t& args_spec, + std::vector& args +) const +{ + int i = 0; + for (auto arg : args_spec) + args.push_back(get_arg(engine, i++, arg)); +} + +Value PPC64_SC::get_arg(MaatEngine& engine, int n, size_t arg_size) const +{ + std::vector arg_regs{PPC64::R3,PPC64::R4,PPC64::R5,PPC64::R6,PPC64::R7,PPC64::R8,PPC64::R9,PPC64::R10}; + Value res; + arg_size = ABI::real_arg_size(engine, arg_size); + if (n >= arg_regs.size()) + { + throw env_exception("get_arg(): Linux PPC64 CS ABI supports only up to 8 arguments"); + } + else + { + res = engine.cpu.ctx().get(arg_regs[n]); + } + return _adjust_value_to_size(res, arg_size, engine); +} + +void PPC64_SC::set_ret_value( + MaatEngine& engine, + const FunctionCallback::return_t& ret_val +) const +{ + // Return value in R3 + std::visit(maat::util::overloaded{ + [](std::monostate arg){return;}, // no return value + [&engine](auto arg){engine.cpu.ctx().set(PPC64::R3, arg);} + }, ret_val); +} + +void PPC64_SC::ret(MaatEngine& engine) const +{ + // Do nothing +} + } // namespace abi } // namespace env } // namespace maat diff --git a/src/env/emulated_libs/libc.cpp b/src/env/emulated_libs/libc.cpp index 1c38fc1b..dde612c1 100644 --- a/src/env/emulated_libs/libc.cpp +++ b/src/env/emulated_libs/libc.cpp @@ -659,6 +659,20 @@ Library linux_x64_libc() return lib; } +// For PowerPC 64-bit +Library linux_ppc64_libc() +{ + Library lib("libc", libc_common_functions, libc_common_data); + // Arch specific functions... + lib.add_function(Function("__libc_start_main", + FunctionCallback({8,8,8,8,8,8,8}, linux_x64_libc_start_main_callback_part1) + )); + lib.add_function(Function("__libc_start_main_part2", + FunctionCallback({}, linux_x64_libc_start_main_callback_part2) + )); + return lib; +} + } // namespace emulated } // namespace env } // namespace maat diff --git a/src/env/emulated_syscalls/linux_syscalls.cpp b/src/env/emulated_syscalls/linux_syscalls.cpp index 23f912db..e72dd150 100644 --- a/src/env/emulated_syscalls/linux_syscalls.cpp +++ b/src/env/emulated_syscalls/linux_syscalls.cpp @@ -833,6 +833,32 @@ syscall_func_map_t linux_x64_syscall_map() return res; } +syscall_func_map_t linux_ppc64_syscall_map() + { + syscall_func_map_t res + { + {1, Function("sys_exit", FunctionCallback({4}, sys_linux_exit))}, + {3, Function("sys_read", FunctionCallback({4, env::abi::auto_argsize, 4}, sys_linux_read))}, + {4, Function("sys_write", FunctionCallback({4, env::abi::auto_argsize, 4}, sys_linux_write))}, + {5, Function("sys_open", FunctionCallback({env::abi::auto_argsize, 4, 4}, sys_linux_open))}, + {6, Function("sys_close", FunctionCallback({4}, sys_linux_close))}, + {33, Function("sys_access", FunctionCallback({env::abi::auto_argsize, 4}, sys_linux_access))}, + {45, Function("sys_brk", FunctionCallback({env::abi::auto_argsize}, sys_linux_brk))}, + {85, Function("sys_readlink", FunctionCallback({env::abi::auto_argsize, env::abi::auto_argsize, env::abi::auto_argsize}, sys_linux_readlink))}, + {90, Function("sys_mmap", FunctionCallback({env::abi::auto_argsize, 4, 4, 4, 4, 4}, sys_linux_mmap))}, + {91, Function("sys_munmap", FunctionCallback({env::abi::auto_argsize, env::abi::auto_argsize}, sys_linux_munmap))}, + {106, Function("sys_stat", FunctionCallback({env::abi::auto_argsize, env::abi::auto_argsize}, sys_linux_stat))}, + {108, Function("sys_fstat", FunctionCallback({4, env::abi::auto_argsize}, sys_linux_fstat))}, + {122, Function("sys_newuname", FunctionCallback({env::abi::auto_argsize}, sys_linux_newuname))}, + {125, Function("sys_mprotect", FunctionCallback({env::abi::auto_argsize, 4, 4}, sys_linux_mprotect))}, + {146, Function("sys_writev", FunctionCallback({4, env::abi::auto_argsize, env::abi::auto_argsize}, sys_linux_writev))}, + {234, Function("sys_exit_group", FunctionCallback({4}, sys_linux_exit))}, + {286, Function("sys_openat", FunctionCallback({4, env::abi::auto_argsize, 4, 4}, sys_linux_openat))}, + {291, Function("sys_newfstatat", FunctionCallback({4, env::abi::auto_argsize, env::abi::auto_argsize, 4}, sys_linux_fstatat))} + }; + return res; +} + } // namespace emulated } // namespace env } // namespace maat diff --git a/src/env/env.cpp b/src/env/env.cpp index 0e12cf7e..ea9fc949 100644 --- a/src/env/env.cpp +++ b/src/env/env.cpp @@ -15,6 +15,11 @@ abi::ABI* _get_default_abi(Arch::Type arch, OS os) if (os == OS::LINUX) return &abi::X64_SYSTEM_V::instance(); } + else if (arch == Arch::Type::PPC64) + { + if (os == OS::LINUX) + return &abi::PPC64ABI::instance(); + } return &abi::ABI_NONE::instance(); } @@ -25,6 +30,11 @@ abi::ABI* _get_syscall_abi(Arch::Type arch, OS os) if (os == OS::LINUX) return &abi::X64_LINUX_SYSCALL::instance(); } + else if (arch == Arch::Type::PPC64) + { + if (os == OS::LINUX) + return &abi::PPC64_SC::instance(); + } return &abi::ABI_NONE::instance(); } diff --git a/src/env/env_linux.cpp b/src/env/env_linux.cpp index a1f0d585..c3db4e55 100644 --- a/src/env/env_linux.cpp +++ b/src/env/env_linux.cpp @@ -23,6 +23,9 @@ void LinuxEmulator::_init(Arch::Type arch) _libraries.push_back(env::emulated::linux_x64_libc()); _syscall_func_map = env::emulated::linux_x64_syscall_map(); break; + case Arch::Type::PPC64: + _libraries.push_back(env::emulated::linux_ppc64_libc()); + _syscall_func_map = env::emulated::linux_ppc64_syscall_map(); case Arch::Type::NONE: default: break; diff --git a/src/include/maat/arch.hpp b/src/include/maat/arch.hpp index ddb96ef3..4ac61dff 100644 --- a/src/include/maat/arch.hpp +++ b/src/include/maat/arch.hpp @@ -36,6 +36,7 @@ enum class CPUMode T32, ///< ARM Thumb A64, ///< ARM 64-bits EVM, ///< Ethereum byte-code + PPC64, ///<< PowerPC 64-bit NONE }; @@ -57,6 +58,7 @@ class Arch: public serial::Serializable ARM32, // TODO ///< armv7 (32 bits) ARM64, // TODO ///< armv8 (64 bits) EVM, ///< Ethereum byte-code + PPC64, ///< PowerPC (64 bits) NONE }; @@ -422,7 +424,130 @@ namespace ARM64 } // namespace ARM64 -/** \} */ // Arch doxygen group +/* ================================================== + * PowerPC + * ================================================= */ + +/// Namespace for PowerPC 64 specific definitions and classes +namespace PPC64 +{ + /* Registers */ + static constexpr reg_t R0 = 0; ///< General Purpose Register + static constexpr reg_t R1 = 1; ///< General Purpose Register + static constexpr reg_t R2 = 2; ///< General Purpose Register + static constexpr reg_t R3 = 3; ///< General Purpose Register + static constexpr reg_t R4 = 4; ///< General Purpose Register + static constexpr reg_t R5 = 5; ///< General Purpose Register + static constexpr reg_t R6 = 6; ///< General Purpose Register + static constexpr reg_t R7 = 7; ///< General Purpose Register + static constexpr reg_t R8 = 8; ///< General Purpose Register + static constexpr reg_t R9 = 9; ///< General Purpose Register + static constexpr reg_t R10 = 10; ///< General Purpose Register + static constexpr reg_t R11 = 11; ///< General Purpose Register + static constexpr reg_t R12 = 12; ///< General Purpose Register + static constexpr reg_t R13 = 13; ///< General Purpose Register + static constexpr reg_t R14 = 14; ///< General Purpose Register + static constexpr reg_t R15 = 15; ///< General Purpose Register + static constexpr reg_t R16 = 16; ///< General Purpose Register + static constexpr reg_t R17 = 17; ///< General Purpose Register + static constexpr reg_t R18 = 18; ///< General Purpose Register + static constexpr reg_t R19 = 19; ///< General Purpose Register + static constexpr reg_t R20 = 20; ///< General Purpose Register + static constexpr reg_t R21 = 21; ///< General Purpose Register + static constexpr reg_t R22 = 22; ///< General Purpose Register + static constexpr reg_t R23 = 23; ///< General Purpose Register + static constexpr reg_t R24 = 24; ///< General Purpose Register + static constexpr reg_t R25 = 25; ///< General Purpose Register + static constexpr reg_t R26 = 26; ///< General Purpose Register + static constexpr reg_t R27 = 27; ///< General Purpose Register + static constexpr reg_t R28 = 28; ///< General Purpose Register + static constexpr reg_t R29 = 29; ///< General Purpose Register + static constexpr reg_t R30 = 30; ///< General Purpose Register + static constexpr reg_t R31 = 31; ///< General Purpose Register + /* Floating Point Registers */ + static constexpr reg_t F0 = 32; ///< Floating Point Registers + static constexpr reg_t F1 = 33; ///< Floating Point Registers + static constexpr reg_t F2 = 34; ///< Floating Point Registers + static constexpr reg_t F3 = 35; ///< Floating Point Registers + static constexpr reg_t F4 = 36; ///< Floating Point Registers + static constexpr reg_t F5 = 37; ///< Floating Point Registers + static constexpr reg_t F6 = 38; ///< Floating Point Registers + static constexpr reg_t F7 = 39; ///< Floating Point Registers + static constexpr reg_t F8 = 40; ///< Floating Point Registers + static constexpr reg_t F9 = 41; ///< Floating Point Registers + static constexpr reg_t F10 = 42; ///< Floating Point Registers + static constexpr reg_t F11 = 43; ///< Floating Point Registers + static constexpr reg_t F12 = 44; ///< Floating Point Registers + static constexpr reg_t F13 = 45; ///< Floating Point Registers + static constexpr reg_t F14 = 46; ///< Floating Point Registers + static constexpr reg_t F15 = 47; ///< Floating Point Registers + static constexpr reg_t F16 = 48; ///< Floating Point Registers + static constexpr reg_t F17 = 49; ///< Floating Point Registers + static constexpr reg_t F18 = 50; ///< Floating Point Registers + static constexpr reg_t F19 = 51; ///< Floating Point Registers + static constexpr reg_t F20 = 52; ///< Floating Point Registers + static constexpr reg_t F21 = 53; ///< Floating Point Registers + static constexpr reg_t F22 = 54; ///< Floating Point Registers + static constexpr reg_t F23 = 55; ///< Floating Point Registers + static constexpr reg_t F24 = 56; ///< Floating Point Registers + static constexpr reg_t F25 = 57; ///< Floating Point Registers + static constexpr reg_t F26 = 58; ///< Floating Point Registers + static constexpr reg_t F27 = 59; ///< Floating Point Registers + static constexpr reg_t F28 = 60; ///< Floating Point Registers + static constexpr reg_t F29 = 61; ///< Floating Point Registers + static constexpr reg_t F30 = 62; ///< Floating Point Registers + static constexpr reg_t F31 = 63; ///< Floating Point Registers + /* Special Registers */ + static constexpr reg_t PC = 64; ///< Program Counter + static constexpr reg_t SP = 1; ///< Same As R1 + static constexpr reg_t CR = 65; ///< Condition Register + static constexpr reg_t LR = 66; ///< Link Register + static constexpr reg_t CTR = 67; ///< Count Register + static constexpr reg_t XER = 68; ///< Bit Control Register (CA,OV,SO) + /* CR Flags */ + static constexpr reg_t CR0 = 69; ///< Condition Register 0 + static constexpr reg_t CR1 = 70; ///< Condition Register 1 + static constexpr reg_t CR2 = 71; ///< Condition Register 2 + static constexpr reg_t CR3 = 72; ///< Condition Register 3 + static constexpr reg_t CR4 = 73; ///< Condition Register 4 + static constexpr reg_t CR5 = 74; ///< Condition Register 5 + static constexpr reg_t CR6 = 75; ///< Condition Register 6 + static constexpr reg_t CR7 = 76; ///< Condition Register 7 + /* XER Flags */ + static constexpr reg_t XER_SO = 77; ///< Summary Overflow flag + static constexpr reg_t XER_OV = 78; ///< Overflow flag + static constexpr reg_t XER_CA = 79; ///< Carry flag + /* Time Base Register */ + static constexpr reg_t TBL = 80; + static constexpr reg_t TBU = 81; + /* Floating Point Status and Control Registers */ + static constexpr reg_t FPSCR = 82; + /* Machine State Register */ + static constexpr reg_t MSR = 83; + /* PVR Special Surpose Register */ + static constexpr reg_t PVR = 84; + /* Reserved Registers */ + static constexpr reg_t R2SAVE = 85; + static constexpr reg_t RESERVE = 86; + static constexpr reg_t NB_REGS = 87; + + /** \addtogroup arch + * \{ */ + + /// PowerPC 64 architecture + class ArchPPC64: public Arch + { + public: + ArchPPC64(); + ~ArchPPC64() = default; + size_t reg_size(reg_t reg_num) const ; + reg_t sp() const ; + reg_t pc() const ; + reg_t tsc() const ; + }; + + /** \} */ // Arch doxygen group +} // namespace PPC64 } // namespace maat diff --git a/src/include/maat/callother.hpp b/src/include/maat/callother.hpp index 0e3e34e3..2ce7ec40 100644 --- a/src/include/maat/callother.hpp +++ b/src/include/maat/callother.hpp @@ -65,6 +65,9 @@ enum class Id EVM_CREATE, EVM_SELFDESTRUCT, EVM_LOG, + //PPC 64 + PPC64_CNTLZW, ///< CNTLZW instruction + PPC64_SC, ///< System Call UNSUPPORTED }; diff --git a/src/include/maat/env/library.hpp b/src/include/maat/env/library.hpp index 914d747b..82ab6a17 100644 --- a/src/include/maat/env/library.hpp +++ b/src/include/maat/env/library.hpp @@ -103,6 +103,9 @@ enum class Type X64_LINUX_SYSCALL, /* ARM64 */ ARM64, + /* PowerPC */ + PPC64ABI, + PPC64_SC, /* Custom */ X86_LINUX_CUSTOM_SYSCALL, ///< Used internally X64_LINUX_CUSTOM_SYSCALL, ///< Used internally @@ -308,6 +311,61 @@ class X64_LINUX_SYSCALL : public ABI virtual void ret(MaatEngine& engine) const; }; +/// PowerPC Default ABI +class PPC64ABI : public ABI +{ +protected: + PPC64ABI(); +public: + /// ABI instance (singleton pattern) + static ABI& instance(); +public: + /// Get function arguments + virtual void get_args( + MaatEngine& engine, + const args_spec_t& args_spec, + std::vector& args + ) const; + /// Get function argument number 'n' (starting at 0) + virtual Value get_arg(MaatEngine& engine, int n, size_t arg_size) const; + /// Set a function's return value before it returns + virtual void set_ret_value( + MaatEngine& engine, + const FunctionCallback::return_t& ret_val + ) const; + /// Set the return address prior to call a function + virtual void prepare_ret_address(MaatEngine& engine, addr_t ret_addr) const; + /// Return from a function + virtual void ret(MaatEngine& engine) const; +}; + +/// PowerPC Linux SYSCALL ABI +class PPC64_SC : public ABI +{ +protected: + PPC64_SC(); +public: + /// ABI instance (singleton pattern) + static ABI& instance(); +public: + /// Get function arguments + virtual void get_args( + MaatEngine& engine, + const args_spec_t& args_spec, + std::vector& args + ) const; + /// Get function argument number 'n' (starting at 0) + virtual Value get_arg(MaatEngine& engine, int n, size_t arg_size) const; + /// Set a function's return value before it returns + virtual void set_ret_value( + MaatEngine& engine, + const FunctionCallback::return_t& ret_val + ) const; + /// Return from the syscall + virtual void ret(MaatEngine& engine) const; +}; + + /** \} */ // doxygen group env } // namespace ABI @@ -417,6 +475,8 @@ namespace emulated Library linux_x86_libc(); /// Return the emulated libc.so for Linux on X64 Library linux_x64_libc(); +/// Return the emulated libc.so for Linux on PPC64 +Library linux_ppc64_libc(); } diff --git a/src/include/maat/env/syscall.hpp b/src/include/maat/env/syscall.hpp index b6fd7bf1..028c1e86 100644 --- a/src/include/maat/env/syscall.hpp +++ b/src/include/maat/env/syscall.hpp @@ -21,6 +21,8 @@ namespace emulated syscall_func_map_t linux_x86_syscall_map(); /// Return the emulated syscalls for Linux on X64 syscall_func_map_t linux_x64_syscall_map(); +/// Return the emulated syscalls for Linux on PPC64 +syscall_func_map_t linux_ppc64_syscall_map(); } diff --git a/src/include/maat/loader.hpp b/src/include/maat/loader.hpp index 2f9fe3d8..ebadd180 100644 --- a/src/include/maat/loader.hpp +++ b/src/include/maat/loader.hpp @@ -144,7 +144,7 @@ class LoaderLIEF : public Loader private: void parse_binary(const std::string& binary, loader::Format type); void get_arch_special_registers( - const Arch& arch, reg_t& pc, reg_t& sp, reg_t& bp, reg_t& gs, reg_t& fs + const Arch& arch, std::optional& pc, std::optional& sp, std::optional& bp, std::optional& gs, std::optional& fs ); void map_elf_segments(MaatEngine*engine, addr_t base_address); void load_elf_dependencies( diff --git a/src/include/maat/sleigh_interface.hpp b/src/include/maat/sleigh_interface.hpp index d2884524..4ec1ae61 100644 --- a/src/include/maat/sleigh_interface.hpp +++ b/src/include/maat/sleigh_interface.hpp @@ -40,7 +40,8 @@ namespace maat inline maat::ir::Param sleigh_reg_translate_X86(const std::string& reg_name); inline maat::ir::Param sleigh_reg_translate_X64(const std::string& reg_name); inline maat::ir::Param sleigh_reg_translate_EVM(const std::string& reg_name); - + inline maat::ir::Param sleigh_reg_translate_PPC64(const std::string& reg_name); + inline maat::ir::Param sleigh_masked_reg_translate_PPC64(const std::string& reg_name, const int size); } #endif diff --git a/src/loader/loader_lief.cpp b/src/loader/loader_lief.cpp index 012eac5b..8cd48a6f 100644 --- a/src/loader/loader_lief.cpp +++ b/src/loader/loader_lief.cpp @@ -85,7 +85,7 @@ void LoaderLIEF::parse_binary(const std::string& binary, Format type) } void LoaderLIEF::get_arch_special_registers( - const Arch& arch, reg_t& pc, reg_t& sp, reg_t& bp, reg_t& gs, reg_t& fs + const Arch& arch, std::optional& pc, std::optional& sp, std::optional& bp, std::optional& gs, std::optional& fs ) { pc = arch.pc(); @@ -102,6 +102,8 @@ void LoaderLIEF::get_arch_special_registers( gs = X64::GS; fs = X64::FS; break; + case Arch::Type::PPC64: + break; default: throw loader_exception( Fmt() << "LoaderLIEF::get_arch_special_registers(): Unsupported architecture!" diff --git a/src/loader/loader_lief_elf.cpp b/src/loader/loader_lief_elf.cpp index 97d94e3e..dae2d566 100644 --- a/src/loader/loader_lief_elf.cpp +++ b/src/loader/loader_lief_elf.cpp @@ -212,11 +212,11 @@ void LoaderLIEF::load_elf_using_interpreter( const std::string& interp_path ) { - reg_t reg_sp = -1; - reg_t reg_bp = -1; - reg_t reg_gs = -1; - reg_t reg_fs = -1; - reg_t reg_pc = -1; + std::optional reg_sp = std::nullopt; + std::optional reg_bp = std::nullopt; + std::optional reg_gs = std::nullopt; + std::optional reg_fs = std::nullopt; + std::optional reg_pc = std::nullopt; addr_t stack_base, stack_size, stack_top; // Get particular registers @@ -242,8 +242,10 @@ void LoaderLIEF::load_elf_using_interpreter( stack_size = 0x00200000; stack_top = engine->arch->bits() == 32 ? 0x0c000000 : 0x80000000000; stack_base = alloc_segment(engine, stack_top-stack_size, stack_size, maat::mem_flag_rw, "Stack"); - engine->cpu.ctx().set(reg_sp, stack_base+stack_size-0x400); // - 0x400 to leave some space in memory - engine->cpu.ctx().set(reg_bp, stack_base+stack_size-0x400); + engine->cpu.ctx().set(reg_sp.value(), stack_base+stack_size-0x400); // - 0x400 to leave some space in memory + // For x86 special register 'reg_bp', leave some space in memory + if (reg_bp) + engine->cpu.ctx().set(*reg_bp, stack_base+stack_size-0x400); // Load interpreter load_elf_interpreter(engine, interp_path, *this); @@ -255,7 +257,7 @@ void LoaderLIEF::load_elf_using_interpreter( add_elf_dependencies_to_emulated_fs(engine, libdirs, ignore_libs, virtual_fs); // Point PC to interpreter entrypoint - engine->cpu.ctx().set(reg_pc, interpreter_entry.value()); + engine->cpu.ctx().set(reg_pc.value(), interpreter_entry.value()); } void LoaderLIEF::load_elf_binary( @@ -272,11 +274,11 @@ void LoaderLIEF::load_elf_binary( addr_t stack_base, stack_top, stack_size, heap_base, heap_size; addr_t gs, fs; std::list loaded_libs; - reg_t reg_sp = -1; - reg_t reg_bp = -1; - reg_t reg_gs = -1; - reg_t reg_fs = -1; - reg_t reg_pc = -1; + std::optional reg_sp = std::nullopt; + std::optional reg_bp = std::nullopt; + std::optional reg_gs = std::nullopt; + std::optional reg_fs = std::nullopt; + std::optional reg_pc = std::nullopt; int arch_bytes = engine->arch->octets(); // Get particular registers @@ -292,8 +294,10 @@ void LoaderLIEF::load_elf_binary( stack_size = 0x00200000; stack_top = engine->arch->bits() == 32 ? 0x0c000000 : 0x80000000000; stack_base = alloc_segment(engine, stack_top-stack_size, stack_size, maat::mem_flag_rw, "Stack"); - engine->cpu.ctx().set(reg_sp, stack_base+stack_size-0x400); // - 0x400 to leave some space in memory - engine->cpu.ctx().set(reg_bp, stack_base+stack_size-0x400); + engine->cpu.ctx().set(reg_sp.value(), stack_base+stack_size-0x400); // - 0x400 to leave some space in memory + // For x86 special register 'reg_bp', leave some space in memory + if (reg_bp) + engine->cpu.ctx().set(*reg_bp, stack_base+stack_size-0x400); // Setup heap heap_base = end_of_segment(*engine->mem, binary_name); @@ -306,12 +310,12 @@ void LoaderLIEF::load_elf_binary( ); // Allocate some segments for GS and FS segment selectors (stack canary stuff) - if (reg_gs != -1) + if (reg_gs && reg_fs) { gs = alloc_segment(engine, 0x00aa0000, 0x1000, maat::mem_flag_rw, "Fake GS: segment"); fs = alloc_segment(engine, 0x00aa0000, 0x1000, maat::mem_flag_rw, "Fake FS: segment"); - engine->cpu.ctx().set(reg_gs, gs); - engine->cpu.ctx().set(reg_fs, fs); + engine->cpu.ctx().set(*reg_gs, gs); + engine->cpu.ctx().set(*reg_fs, fs); } // Preload emulated libraries. We do it before loading dependencies @@ -333,7 +337,7 @@ void LoaderLIEF::load_elf_binary( elf_setup_stack(engine, base, args, envp); // Point PC to program entrypoint - engine->cpu.ctx().set(reg_pc, _elf->entrypoint() + base); + engine->cpu.ctx().set(reg_pc.value(), _elf->entrypoint() + base); } void LoaderLIEF::force_relocation(MaatEngine* engine, addr_t base, const std::string& rel_name, addr_t value) @@ -728,6 +732,8 @@ std::vector> LoaderLIEF::generate_aux_vector( platform = "x86_64"; else if (engine->arch->type == Arch::Type::ARM64) platform = "arm64"; + else if (engine->arch->type == Arch::Type::PPC64) + platform = "PowerPC64"; else throw loader_exception("LIEFLoader::_generate_aux_vector(): got unsupported architecture"); diff --git a/src/third-party/sleigh/native/reg_translator.cpp b/src/third-party/sleigh/native/reg_translator.cpp index bc39c700..eecedf1f 100644 --- a/src/third-party/sleigh/native/reg_translator.cpp +++ b/src/third-party/sleigh/native/reg_translator.cpp @@ -1320,4 +1320,177 @@ maat::ir::Param sleigh_reg_translate_EVM(const std::string& reg_name) ); } +maat::ir::Param sleigh_reg_translate_PPC64(const std::string& reg_name) +{ + if (reg_name == "r0") return maat::ir::Reg(maat::PPC64::R0, 64); + if (reg_name == "r1") return maat::ir::Reg(maat::PPC64::R1, 64); + if (reg_name == "r2") return maat::ir::Reg(maat::PPC64::R2, 64); + if (reg_name == "r3") return maat::ir::Reg(maat::PPC64::R3, 64); + if (reg_name == "r4") return maat::ir::Reg(maat::PPC64::R4, 64); + if (reg_name == "r5") return maat::ir::Reg(maat::PPC64::R5, 64); + if (reg_name == "r6") return maat::ir::Reg(maat::PPC64::R6, 64); + if (reg_name == "r7") return maat::ir::Reg(maat::PPC64::R7, 64); + if (reg_name == "r8") return maat::ir::Reg(maat::PPC64::R8, 64); + if (reg_name == "r9") return maat::ir::Reg(maat::PPC64::R9, 64); + if (reg_name == "r10") return maat::ir::Reg(maat::PPC64::R10, 64); + if (reg_name == "r11") return maat::ir::Reg(maat::PPC64::R11, 64); + if (reg_name == "r12") return maat::ir::Reg(maat::PPC64::R12, 64); + if (reg_name == "r13") return maat::ir::Reg(maat::PPC64::R13, 64); + if (reg_name == "r14") return maat::ir::Reg(maat::PPC64::R14, 64); + if (reg_name == "r15") return maat::ir::Reg(maat::PPC64::R15, 64); + if (reg_name == "r16") return maat::ir::Reg(maat::PPC64::R16, 64); + if (reg_name == "r17") return maat::ir::Reg(maat::PPC64::R17, 64); + if (reg_name == "r18") return maat::ir::Reg(maat::PPC64::R18, 64); + if (reg_name == "r19") return maat::ir::Reg(maat::PPC64::R19, 64); + if (reg_name == "r20") return maat::ir::Reg(maat::PPC64::R20, 64); + if (reg_name == "r21") return maat::ir::Reg(maat::PPC64::R21, 64); + if (reg_name == "r22") return maat::ir::Reg(maat::PPC64::R22, 64); + if (reg_name == "r23") return maat::ir::Reg(maat::PPC64::R23, 64); + if (reg_name == "r24") return maat::ir::Reg(maat::PPC64::R24, 64); + if (reg_name == "r25") return maat::ir::Reg(maat::PPC64::R25, 64); + if (reg_name == "r26") return maat::ir::Reg(maat::PPC64::R26, 64); + if (reg_name == "r27") return maat::ir::Reg(maat::PPC64::R27, 64); + if (reg_name == "r28") return maat::ir::Reg(maat::PPC64::R28, 64); + if (reg_name == "r29") return maat::ir::Reg(maat::PPC64::R29, 64); + if (reg_name == "r30") return maat::ir::Reg(maat::PPC64::R30, 64); + if (reg_name == "r31") return maat::ir::Reg(maat::PPC64::R31, 64); + + if (reg_name == "f0") return maat::ir::Reg(maat::PPC64::F0, 64); + if (reg_name == "f1") return maat::ir::Reg(maat::PPC64::F1, 64); + if (reg_name == "f2") return maat::ir::Reg(maat::PPC64::F2, 64); + if (reg_name == "f3") return maat::ir::Reg(maat::PPC64::F3, 64); + if (reg_name == "f4") return maat::ir::Reg(maat::PPC64::F4, 64); + if (reg_name == "f5") return maat::ir::Reg(maat::PPC64::F5, 64); + if (reg_name == "f6") return maat::ir::Reg(maat::PPC64::F6, 64); + if (reg_name == "f7") return maat::ir::Reg(maat::PPC64::F7, 64); + if (reg_name == "f8") return maat::ir::Reg(maat::PPC64::F8, 64); + if (reg_name == "f9") return maat::ir::Reg(maat::PPC64::F9, 64); + if (reg_name == "f10") return maat::ir::Reg(maat::PPC64::F10, 64); + if (reg_name == "f11") return maat::ir::Reg(maat::PPC64::F11, 64); + if (reg_name == "f12") return maat::ir::Reg(maat::PPC64::F12, 64); + if (reg_name == "f13") return maat::ir::Reg(maat::PPC64::F13, 64); + if (reg_name == "f14") return maat::ir::Reg(maat::PPC64::F14, 64); + if (reg_name == "f15") return maat::ir::Reg(maat::PPC64::F15, 64); + if (reg_name == "f16") return maat::ir::Reg(maat::PPC64::F16, 64); + if (reg_name == "f17") return maat::ir::Reg(maat::PPC64::F17, 64); + if (reg_name == "f18") return maat::ir::Reg(maat::PPC64::F18, 64); + if (reg_name == "f19") return maat::ir::Reg(maat::PPC64::F19, 64); + if (reg_name == "f20") return maat::ir::Reg(maat::PPC64::F20, 64); + if (reg_name == "f21") return maat::ir::Reg(maat::PPC64::F21, 64); + if (reg_name == "f22") return maat::ir::Reg(maat::PPC64::F22, 64); + if (reg_name == "f23") return maat::ir::Reg(maat::PPC64::F23, 64); + if (reg_name == "f24") return maat::ir::Reg(maat::PPC64::F24, 64); + if (reg_name == "f25") return maat::ir::Reg(maat::PPC64::F25, 64); + if (reg_name == "f26") return maat::ir::Reg(maat::PPC64::F26, 64); + if (reg_name == "f27") return maat::ir::Reg(maat::PPC64::F27, 64); + if (reg_name == "f28") return maat::ir::Reg(maat::PPC64::F28, 64); + if (reg_name == "f29") return maat::ir::Reg(maat::PPC64::F29, 64); + if (reg_name == "f30") return maat::ir::Reg(maat::PPC64::F30, 64); + if (reg_name == "f31") return maat::ir::Reg(maat::PPC64::F31, 64); + + if (reg_name == "pc") return maat::ir::Reg(maat::PPC64::PC, 64); + if (reg_name == "LR") return maat::ir::Reg(maat::PPC64::LR, 64); + if (reg_name == "CTR") return maat::ir::Reg(maat::PPC64::CTR, 64); + if (reg_name == "xer") return maat::ir::Reg(maat::PPC64::XER, 32); + + if (reg_name == "xer_so") return maat::ir::Reg(maat::PPC64::XER_SO,8); + if (reg_name == "xer_ov") return maat::ir::Reg(maat::PPC64::XER_OV,8); + if (reg_name == "xer_ca") return maat::ir::Reg(maat::PPC64::XER_CA,8); + + if (reg_name == "cr0") return maat::ir::Reg(maat::PPC64::CR0,8); + if (reg_name == "cr1") return maat::ir::Reg(maat::PPC64::CR1,8); + if (reg_name == "cr2") return maat::ir::Reg(maat::PPC64::CR2,8); + if (reg_name == "cr3") return maat::ir::Reg(maat::PPC64::CR3,8); + if (reg_name == "cr4") return maat::ir::Reg(maat::PPC64::CR4,8); + if (reg_name == "cr5") return maat::ir::Reg(maat::PPC64::CR5,8); + if (reg_name == "cr6") return maat::ir::Reg(maat::PPC64::CR6,8); + if (reg_name == "cr7") return maat::ir::Reg(maat::PPC64::CR7,8); + + if (reg_name == "tblw") return maat::ir::Reg(maat::PPC64::TBL,32); + if (reg_name == "tbuw") return maat::ir::Reg(maat::PPC64::TBU,32); + if (reg_name == "tblr") return maat::ir::Reg(maat::PPC64::TBL,32); + if (reg_name == "tbur") return maat::ir::Reg(maat::PPC64::TBU,32); + + if (reg_name == "fpscr") return maat::ir::Reg(maat::PPC64::FPSCR,32); + if (reg_name == "fp_fx") return maat::ir::Reg(maat::PPC64::FPSCR,0,0); + if (reg_name == "fp_fex") return maat::ir::Reg(maat::PPC64::FPSCR,1,1); + if (reg_name == "fp_vx") return maat::ir::Reg(maat::PPC64::FPSCR,2,2); + if (reg_name == "fp_ox") return maat::ir::Reg(maat::PPC64::FPSCR,3,3); + if (reg_name == "fp_ux") return maat::ir::Reg(maat::PPC64::FPSCR,4,4); + if (reg_name == "fp_zx") return maat::ir::Reg(maat::PPC64::FPSCR,5,5); + if (reg_name == "fp_xx") return maat::ir::Reg(maat::PPC64::FPSCR,6,6); + if (reg_name == "fp_vxsnan") return maat::ir::Reg(maat::PPC64::FPSCR,7,7); + if (reg_name == "fp_vxisi") return maat::ir::Reg(maat::PPC64::FPSCR,8,8); + if (reg_name == "fp_vxidi") return maat::ir::Reg(maat::PPC64::FPSCR,9,9); + if (reg_name == "fp_vxzdz") return maat::ir::Reg(maat::PPC64::FPSCR,10,10); + if (reg_name == "fp_vximz") return maat::ir::Reg(maat::PPC64::FPSCR,11,11); + if (reg_name == "fp_vxvc") return maat::ir::Reg(maat::PPC64::FPSCR,12,12); + if (reg_name == "fp_fr") return maat::ir::Reg(maat::PPC64::FPSCR,13,13); + if (reg_name == "fp_fi") return maat::ir::Reg(maat::PPC64::FPSCR,14,14); + if (reg_name == "fp_fprf") return maat::ir::Reg(maat::PPC64::FPSCR,19,15); + if (reg_name == "fp_vxsoft") return maat::ir::Reg(maat::PPC64::FPSCR,21,21); + if (reg_name == "fp_vxsqrt") return maat::ir::Reg(maat::PPC64::FPSCR,22,22); + if (reg_name == "fp_vxcvi") return maat::ir::Reg(maat::PPC64::FPSCR,23,23); + if (reg_name == "fp_ve") return maat::ir::Reg(maat::PPC64::FPSCR,24,24); + if (reg_name == "fp_oe") return maat::ir::Reg(maat::PPC64::FPSCR,25,25); + if (reg_name == "fp_ue") return maat::ir::Reg(maat::PPC64::FPSCR,26,26); + if (reg_name == "fp_ze") return maat::ir::Reg(maat::PPC64::FPSCR,27,27); + if (reg_name == "fp_xe") return maat::ir::Reg(maat::PPC64::FPSCR,28,28); + if (reg_name == "fp_ni") return maat::ir::Reg(maat::PPC64::FPSCR,29,29); + if (reg_name == "fp_rn") return maat::ir::Reg(maat::PPC64::FPSCR,31,30); + + if (reg_name == "MSR") return maat::ir::Reg(maat::PPC64::MSR, 63, 0); + if (reg_name == "spr11f") return maat::ir::Reg(maat::PPC64::PVR,32); + if (reg_name == "r2Save") return maat::ir::Reg(maat::PPC64::R2SAVE, 63, 0); + if (reg_name == "RESERVE") return maat::ir::Reg(maat::PPC64::RESERVE,32); + + throw maat::runtime_exception(maat::Fmt() + << "PPC64: Register translation from SLEIGH to MAAT missing for register " + << reg_name + >> maat::Fmt::to_str + ); +} + +maat::ir::Param sleigh_masked_reg_translate_PPC64(const std::string& reg_name, const int size) +{ + if (reg_name == "r0") return maat::ir::Reg(maat::PPC64::R0, size); + if (reg_name == "r1") return maat::ir::Reg(maat::PPC64::R1, size); + if (reg_name == "r2") return maat::ir::Reg(maat::PPC64::R2, size); + if (reg_name == "r3") return maat::ir::Reg(maat::PPC64::R3, size); + if (reg_name == "r4") return maat::ir::Reg(maat::PPC64::R4, size); + if (reg_name == "r5") return maat::ir::Reg(maat::PPC64::R5, size); + if (reg_name == "r6") return maat::ir::Reg(maat::PPC64::R6, size); + if (reg_name == "r7") return maat::ir::Reg(maat::PPC64::R7, size); + if (reg_name == "r8") return maat::ir::Reg(maat::PPC64::R8, size); + if (reg_name == "r9") return maat::ir::Reg(maat::PPC64::R9, size); + if (reg_name == "r10") return maat::ir::Reg(maat::PPC64::R10, size); + if (reg_name == "r11") return maat::ir::Reg(maat::PPC64::R11, size); + if (reg_name == "r12") return maat::ir::Reg(maat::PPC64::R12, size); + if (reg_name == "r13") return maat::ir::Reg(maat::PPC64::R13, size); + if (reg_name == "r14") return maat::ir::Reg(maat::PPC64::R14, size); + if (reg_name == "r15") return maat::ir::Reg(maat::PPC64::R15, size); + if (reg_name == "r16") return maat::ir::Reg(maat::PPC64::R16, size); + if (reg_name == "r17") return maat::ir::Reg(maat::PPC64::R17, size); + if (reg_name == "r18") return maat::ir::Reg(maat::PPC64::R18, size); + if (reg_name == "r19") return maat::ir::Reg(maat::PPC64::R19, size); + if (reg_name == "r20") return maat::ir::Reg(maat::PPC64::R20, size); + if (reg_name == "r21") return maat::ir::Reg(maat::PPC64::R21, size); + if (reg_name == "r22") return maat::ir::Reg(maat::PPC64::R22, size); + if (reg_name == "r23") return maat::ir::Reg(maat::PPC64::R23, size); + if (reg_name == "r24") return maat::ir::Reg(maat::PPC64::R24, size); + if (reg_name == "r25") return maat::ir::Reg(maat::PPC64::R25, size); + if (reg_name == "r26") return maat::ir::Reg(maat::PPC64::R26, size); + if (reg_name == "r27") return maat::ir::Reg(maat::PPC64::R27, size); + if (reg_name == "r28") return maat::ir::Reg(maat::PPC64::R28, size); + if (reg_name == "r29") return maat::ir::Reg(maat::PPC64::R29, size); + if (reg_name == "r30") return maat::ir::Reg(maat::PPC64::R30, size); + if (reg_name == "r31") return maat::ir::Reg(maat::PPC64::R31, size); + + throw maat::runtime_exception(maat::Fmt() + << "PPC64: Register translation from SLEIGH to MAAT missing for register " + << reg_name + >> maat::Fmt::to_str + ); +} + } // namespace maat \ No newline at end of file diff --git a/src/third-party/sleigh/native/sleigh_interface.cpp b/src/third-party/sleigh/native/sleigh_interface.cpp index e31b5702..e429721f 100644 --- a/src/third-party/sleigh/native/sleigh_interface.cpp +++ b/src/third-party/sleigh/native/sleigh_interface.cpp @@ -399,6 +399,15 @@ class TranslationContext // TODO - is this useful ? will this hinder performance ? // Needs to be here apparently but maybe we could tweak setData so we don't need to reset... m_sleigh->reset(&m_loader, &m_context_internal); + + // If arch is PowerPC 64-bit then don't allow contextSet() + // this fixes instructions such as bgt and other instructions that use context switching + if (arch == Arch::Type::PPC64) + { + // Disable context setting for PowerPC 64-bit architecture + m_sleigh->allowContextSet(false); + } + m_sleigh->initialize(m_document_storage); // setData doesn't affect performance for a big num_bytes :) m_loader.setData(address, bytes, num_bytes); @@ -577,7 +586,7 @@ class TranslationContext }; // Translate a sleigh register name into a maat::ir::Param register -maat::ir::Param reg_name_to_maat_reg(maat::Arch::Type arch, const std::string& reg_name); +maat::ir::Param reg_name_to_maat_reg(maat::Arch::Type arch, const std::string& reg_name, const int reg_masked_bits); // Translate a pcode varnode into an parameter and add it to inst maat::ir::Param translate_pcode_param(TranslationContext* ctx, VarnodeData* v) { @@ -597,7 +606,7 @@ maat::ir::Param translate_pcode_param(TranslationContext* ctx, VarnodeData* v) if (addr_space_name == "register") { const std::string& reg_name = ctx->getRegisterName(v->space, v->offset, v->size); - return std::move(reg_name_to_maat_reg(ctx->arch, reg_name)); + return std::move(reg_name_to_maat_reg(ctx->arch, reg_name, v->size*8)); } else if (addr_space_name == "unique") { @@ -622,7 +631,7 @@ maat::ir::Param translate_pcode_param(TranslationContext* ctx, VarnodeData* v) return maat::ir::Param::None(); } -maat::ir::Param reg_name_to_maat_reg(maat::Arch::Type arch, const std::string& reg_name) +maat::ir::Param reg_name_to_maat_reg(maat::Arch::Type arch, const std::string& reg_name, const int reg_masked_bits) { if (arch == Arch::Type::X86) return sleigh_reg_translate_X86(reg_name); @@ -630,11 +639,19 @@ maat::ir::Param reg_name_to_maat_reg(maat::Arch::Type arch, const std::string& r return sleigh_reg_translate_X64(reg_name); else if (arch == Arch::Type::EVM) return sleigh_reg_translate_EVM(reg_name); + else if (arch == Arch::Type::PPC64){ + int reg_max_bits = sleigh_reg_translate_PPC64(reg_name).size(); + if(reg_masked_bits == reg_max_bits) + return sleigh_reg_translate_PPC64(reg_name); + else if(reg_masked_bits < reg_max_bits) + return sleigh_masked_reg_translate_PPC64(reg_name, reg_masked_bits); + else + throw maat::runtime_exception("Register translation from SLEIGH to MAAT, sleigh pcode register size is greater than maat register size!"); + } else throw maat::runtime_exception("Register translation from SLEIGH to MAAT not implemented for this architecture!"); } - std::shared_ptr new_sleigh_ctx( maat::Arch::Type arch, const std::string& slafile, diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/4xx.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/4xx.sinc new file mode 100644 index 00000000..392e96c7 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/4xx.sinc @@ -0,0 +1,7 @@ +#dcread r0,0,r0 0x7c 00 03 cc +:dcread S,RA_OR_ZERO,B is OP=31 & S & B & (XOP_1_10=486 | XOP_1_10=326) & BIT_0=0 & RA_OR_ZERO +{ + # ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + S = dataCacheRead(RA_OR_ZERO,B); +} + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/FPRC.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/FPRC.sinc new file mode 100644 index 00000000..e8b33dbe --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/FPRC.sinc @@ -0,0 +1,210 @@ + +period: "" is Rc=0 { setSummaryFPSCR(); } +period: "." is Rc=1 { setSummaryFPSCR(); cr1flags(); } + +# Floating Convert To Integer Doubleword Unsigned + +:fctidu^period fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=942 & period +{ + # src is rounded to integer + + fT = trunc(round(fB)); + + # if src is Nan, result is 0 and VXSNAN is set to 1 + + fT = fT * zext(nan(fB) == 0); + fp_vxsnan = fp_vxsnan | nan(fB); + + # if src > 2^64 - 1, result is 0xffff_ffff_ffff_ffff and VXCVI is set to 1 + + bigi:16 = 0xffffffffffffffff; + bigf:8 = int2float(bigi); + fT = fT - (0xffffffffffffffff + fT) * zext(fB f> bigf); + fp_vxcvi = fp_vxcvi | (fB f> bigf); + + # if rounded value < 0, result is 0 and VXCVI is set to 1 + + fp_vxcvi = fp_vxcvi | (fT s< 0); + fT = fT * zext(fT s> 0); + + build period; +} + +# Floating Convert To Integer Doubleword Unsigned with round toward Zero + +:fctiduz^period fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=943 & period +{ + # src is rounded to integer + + fT = trunc(fB); + + # if src is Nan, result is 0 and VXSNAN is set to 1 + + fT = fT * zext(nan(fB) == 0); + fp_vxsnan = fp_vxsnan | nan(fB); + + # if src > 2^64 - 1, result is 0xffff_ffff_ffff_ffff and VXCVI is set to 1 + + bigi:16 = 0xffffffffffffffff; + bigf:8 = int2float(bigi); + fT = fT - (0xffffffffffffffff + fT) * zext(fB f> bigf); + fp_vxcvi = fp_vxcvi | (fB f> bigf); + + # if rounded value < 0, result is 0 and VXCVI is set to 1 + + fp_vxcvi = fp_vxcvi | (fT s< 0); + fT = fT * zext(fT s> 0); + + build period; +} + +# Floating Convert To Integer Word Unsigned + +:fctiwu^period fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=142 & period +{ + # src is rounded to integer + + fT = trunc(round(fB)); + + # if src is NaN then result is 0 and VXSNAN is set to 1 + + fT = fT * zext(nan(fB) == 0); + fp_vxsnan = fp_vxsnan | nan(fB); + + # if src > 2^32 - 1, result is 0xffff_ffff and VXCVI is set to 1 + + bigi:16 = 0xffffffff; + bigf:8 = int2float(bigi); + fT = fT - (0xffffffff + fT) * zext(fB f> bigf); + fp_vxcvi = fp_vxcvi | (fB f> bigf); + + # if rounded value < 0, result is 0 and VXCVI is set to 1 + + fp_vxcvi = fp_vxcvi | (fT s< 0); + fT = fT * zext(fT s> 0); + + build period; +} + +# Floating Convert To Integer Word Unsigned with round toward Zero + +:fctiwuz^period fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=143 & period +{ + # src is rounded to integer + + fT = trunc(fB); + + # if src is NaN then result is 0 and VXNAN is set to 1 + + fT = fT * zext(nan(fB) == 0); + fp_vxsnan = fp_vxsnan | nan(fB); + + # if src > 2^32 - 1, result is 0xffff_ffff and VXCVI is set to 1 + + bigi:16 = 0xffffffff; + bigf:8 = int2float(bigi); + fT = fT - (0xffffffff + fT) * zext(fB f> bigf); + fp_vxcvi = fp_vxcvi | (fB f> bigf); + + # if rounded value < 0, result is 0 and VXCVI is set to 1 + + fp_vxcvi = fp_vxcvi | (fT s< 0); + fT = fT * zext(fT s> 0); + + build period; +} + +# Floating Convert From Integer Doubleword Unsigned X-form + +:fcfidu^period fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=974 & period +{ + # convert source to unsigned int by extension + + local tmpI:8 = zext(fB); + + # src is converted to floating point + + fT = int2float(tmpI); + + # FPSCR is class and sign of result + + setFPRF(fT); + + build period; +} + +# Floating Convert From Integer Doubleword Single X-form + +:fcfids^period fT,fB is $(NOTVLE) & OP=59 & fT & BITS_16_20=0 & fB & XOP_1_10=846 & period +{ + # src is converted to single-precision floating point + + local tmpF:4 = int2float(fB); + + # convert the result to double-precision + + fT = float2float(tmpF); + + # FPSCR is class and sign of result + + setFPRF(fT); + + build period; +} + +# fcfidus fT,fB +# Floating Convert From Integer Doubleword Unsigned Single + +:fcfidus^period fT,fB is $(NOTVLE) & OP=59 & fT & BITS_16_20=0 & fB & XOP_1_10=974 & period +{ + # convert source to unsigned int by extension + + local tmpI:8 = zext(fB); + + # src is converted to single-precision floating point + + local tmpF:4 = int2float(tmpI); + + # src is converted to double-precision + + fT = float2float(tmpF); + + # FPSCR is class and sign of result + + setFPRF(fT); + + build period; +} + +# Floating Test for software Divide + +:ftdiv CRFD,fA,fB is $(NOTVLE) & OP=63 & CRFD & BITS_21_22=0 & fA & fB & XOP_1_10=128 & BIT_0=0 +{ + zero:8 = int2float(0:1); + + # fe if fA or fB is Nan or infinity, or if fB is 0 + # and other conditions on the exponents + + fe_flag:1 = nan(fA) | nan(fB) | (fB f== zero); + + # fg if fA or fB are infinite, or fB is NaN or denomrmalized or zero + + fg_flag:1 = nan(fB) | (fB f== zero); + CRFD = (fg_flag << 2) | (fe_flag << 1); +} + +# Floating Test for software Square Root + +:ftsqrt CRFD,fB is $(NOTVLE) & OP=63 & CRFD & BITS_21_22=0 & BITS_16_20=0 & fB & XOP_1_10=160 & BIT_0=0 +{ + zero:8 = int2float(0:1); + + # fe if fB is zero, NAN, infinity, or negative + + fe_flag:1 = nan(fB) | (fB f< zero); + + # fg if fB is zero, infinity, or denormalized + + fg_flag:1 = nan(fB) | (fB f== zero); + CRFD = (fg_flag << 2) | (fe_flag << 1); +} diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/PowerPC.opinion b/src/third-party/sleigh/processors/PowerPC/data/languages/PowerPC.opinion new file mode 100644 index 00000000..dddc58e6 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/PowerPC.opinion @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/SPEF_SCR.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/SPEF_SCR.sinc new file mode 100644 index 00000000..11bfd1e4 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/SPEF_SCR.sinc @@ -0,0 +1,130 @@ +# Based on "PowerISA Version 2.06 Revision B" document dated July 23, 2010 +# Category: SPE.Embedded Float Vector Instructions + +# version 1.0 + +define register offset=0x600 size=1 [ + spef_sovh spef_ovh spef_fgh spef_fxh spef_finvh spef_fdbzh spef_funfh spef_fovfh + spef_reserved1 spef_reserved2 + spef_finxs spef_finvs spef_fdbzs spef_funfs spef_fovfs + spef_reserved3 + spef_sov spef_ov spef_fg spef_fx spef_finv spef_fdbz spef_funf spef_fovf + spef_reserved4 + spef_finxe spef_finve spef_fdbze spef_funfe spef_fovfe spef_frmc0 spef_frmc1 +]; + + +macro setSPEFSCR_L(result) { + spef_finv = nan(result); + spef_finvs = spef_finvs | spef_finv; +} + + +macro setSPEFSCR_H(result) { + spef_finvh = nan(result); + spef_finvs = spef_finvs | spef_finvh; +} + + +macro setSummarySPEFSCR() { + spef_sov = spef_sov | spef_ov; + + spef_sovh = spef_sovh | spef_ovh; + + spef_finxs = spef_finxs | spef_fx | spef_fxh; + spef_finvs = spef_finvs | spef_finv | spef_finvh; + spef_fdbzs = spef_fdbzs | spef_fdbz | spef_fdbzh; + spef_funfs = spef_funfs | spef_funf | spef_funfh; + spef_fovfs = spef_fovfs | spef_fovf | spef_fovfh; +} + + +macro setSPEFSCRAddFlags_L(op1, op2, result) { + setSPEFSCR_L(result); + spef_fx = spef_fx | nan(op1) | nan(op2); + spef_finv = spef_fx; + setSummarySPEFSCR(); +} + + +macro setSPEFSCRAddFlags_H(op1, op2, result) { + setSPEFSCR_H(result); + spef_fxh = spef_fxh | nan(op1) | nan(op2); + spef_finvh = spef_fxh; + setSummarySPEFSCR(); +} + + +macro setSPEFSCRDivFlags_L(op1, op2, result) { + setSPEFSCR_L(result); + spef_fdbz = spef_fdbz | (op2 f== 0); + spef_fx = spef_fx | nan(op1) | nan(op2); + spef_finv = spef_fx; + setSummarySPEFSCR(); +} + + +macro setSPEFSCRDivFlags_H(op1, op2, result) { + setSPEFSCR_H(result); + spef_fdbzh = spef_fdbzh | (op2 f== 0); + spef_fxh = spef_fxh | nan(op1) | nan(op2); + spef_finvh = spef_fxh; + setSummarySPEFSCR(); +} + + +macro setSPEFSCRMulFlags_L(op1, op2, result) { + setSPEFSCR_L(result); + spef_fx = spef_fx | nan(op1) | nan(op2); + spef_finv = spef_fx; + setSummarySPEFSCR(); +} + + +macro setSPEFSCRMulFlags_H(op1, op2, result) { + setSPEFSCR_H(result); + spef_fxh = spef_fxh | nan(op1) | nan(op2); + spef_finvh = spef_fxh; + setSummarySPEFSCR(); +} + + +macro setSPEFSCRSubFlags_L(op1, op2, result) { + setSPEFSCR_L(result); + spef_fx = spef_fx | nan(op1) | nan(op2); + spef_finv = spef_fx; + setSummarySPEFSCR(); +} + + +macro setSPEFSCRSubFlags_H(op1, op2, result) { + setSPEFSCR_H(result); + spef_fxh = spef_fxh | nan(op1) | nan(op2); + spef_finvh = spef_fxh; + setSummarySPEFSCR(); +} + + +macro packSPEFSCR(tmp) { + packbits(tmp, + spef_sovh, spef_ovh, spef_fgh, spef_fxh, spef_finvh, spef_fdbzh, spef_funfh, spef_fovfh, + spef_reserved1, spef_reserved2, + spef_finxs, spef_finvs, spef_fdbzs, spef_funfs, spef_fovfs, + spef_reserved3, + spef_sov, spef_ov, spef_fg, spef_fx, spef_finv, spef_fdbz, spef_funf, spef_fovf, + spef_reserved4, + spef_finxe, spef_finve, spef_fdbze, spef_funfe, spef_fovfe, spef_frmc0, spef_frmc1 ); +} + + +macro unpackSPEFSCR(tmp) { + unpackbits(tmp, + spef_sovh, spef_ovh, spef_fgh, spef_fxh, spef_finvh, spef_fdbzh, spef_funfh, spef_fovfh, + spef_reserved1, spef_reserved2, + spef_finxs, spef_finvs, spef_fdbzs, spef_funfs, spef_fovfs, + spef_reserved3, + spef_sov, spef_ov, spef_fg, spef_fx, spef_finv, spef_fdbz, spef_funf, spef_fovf, + spef_reserved4, + spef_finxe, spef_finve, spef_fdbze, spef_funfe, spef_fovfe, spef_frmc0, spef_frmc1 ); +} + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/SPE_APU.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/SPE_APU.sinc new file mode 100644 index 00000000..56536a05 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/SPE_APU.sinc @@ -0,0 +1,3125 @@ +# Based on "EREF: A Reference for Motorola Book E and e500 Core" document version 01/2004 Rev2 +# Instructions that are specific to the (PowerPC) e500 core are implemented as auxiliary processing units (APUs) +# Signal Processing Engine APU (SPE APU) + +@ifdef BIT_64 +@define MEMMASK "0xFFFFFFFFFFFFFFFF" +@else +@define MEMMASK "0xFFFFFFFF" +@endif + + +# There are three versions of e500 core, namely e500v1, the e500v2, and the e500mc. +# A 64-bit evolution of the e500mc core is called e5500 core. +# All PowerQUICC 85xx devices are based on e500v1 or e500v2 cores. + +# The SPE, and embedded SPFP functionality is implemented in +# the MPC8540, the MPC8560 and in their derivatives (that is, in +# all PowerQUICC III devices). However, these instructions will +# not be supported in devices subsequent to PowerQUICC III. + +# version 1.0 + +# SPEFSCR.OVH Integer Overflow High bit 33 +# SPEFSCR.OV Integer Overflow bit 49 +# SPEFSCR.SOVH Summary Integer Overflow High bit 32 +# SPEFSCR.SOV Summary Integer Overflow bit 48 + +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh +# SPEFSCR.SOV = SPEFSCR.SOV | ovl + +# The SPE requires a GPR register file with thirty-two +# 64-bit registers. For 32-bit implementations, instructions +# that normally operate on a 32-bit register file +# access and change only the least significant 32-bits of +# the GPRs leaving the most significant 32-bits +# unchanged. For 64-bit implementations, operation of +# these instructions is unchanged, i.e. those instructions +# continue to operate on the 64-bit registers as they +# would if the SPE was not implemented. Most SPE +# instructions view the 64-bit register as being composed +# of a vector of two elements, each of which is 32 bits +# wide (some instructions read or write 16-bit elements). +# The most significant 32-bits are called the upper word, +# high word or even word. The least significant 32-bits +# are called the lower word, low word or odd word. +# Unless otherwise specified, SPE instructions write all +# 64-bits of the destination register. + +# Key to some symbols used in descriptions +# RT.l => low part of RT 0-31 bits +# RT.h => high part of RT 32-63 bits +# RT.t => total RT 0-63 bits +# temp.b31 => bit 31 of temp +# temp.B1 => byte 1 of temp +# temp.S0 => first 2 bytes +# ABS() +# EXTZ() => Result of extending x on the left with sign bits +# SATURATE() => +# ONESCOMP() => one's complement +# CR.bsub(..:..)=> bit range +# >u => unsigned greaterthan +# EQUIV => Equivalence logical operators = (a ^ (ONESCOMP(B))) +# *si Signed-integer multiplication +# *ui Unsigned-integer multiplication + +# *gsf +# Guarded signed fractional multiplication. +# Result of multiplying 2 signed fractional +# quantities having bit length 16 taking the +# least significant 31 bits of the sign +# extended product and concatenating a 0 +# to the least significant bit forming a +# guarded signed fractional result of 64 bits. +# Since guarded signed fractional multiplication +# produces a 64-bit result, fractional +# input quantities of -1 and -1 can produce +# +1 in the intermediate product. Two 16-bit +# fractional quantities, a and b are multiplied, +# as shown below: +# ea0:31 = EXTS(a) +# eb0:31 = EXTS(b) +# prod0:63 = ea X eb +# eprod0:63 = EXTS(prod32:63) +# result0:63 = eprod1:63 || 0b0 + +define pcodeop GuardedSignedFractionalMultiplication; + +# *sf +# Signed fractional multiplication. Result of +# multiplying 2 signed fractional quantities +# having bit length n taking the least significant +# 2n-1 bits of the sign extended product +# and concatenating a 0 to the least significant +# bit forming a signed fractional result +# of 2n bits. Two 16-bit signed fractional +# quantities, a and b are multiplied, as +# shown below: +# ea0:31 = EXTS(a) +# eb0:31 = EXTS(b) +# prod0:63 = ea X eb +# prod0:63 = EXTS(prod32:63) +# result0:31 = eprod33:63 || 0b0 + +define pcodeop SignedFractionalMultiplication; + +# ================================================================== + +# ======================================================================= +# Page D-10 + +# evabs RT,RA +# ISA-cmt: Vector Absolute Value +# evabs rD,rA 010 0000 1000 SPE_APU_Vector_Instructions +:evabs D,A is OP=4 & D & A & XOP_0_10=0x208 & BITS_11_15=0 { + # RT.l = ABS(RA.l); + # RT.h = ABS(RA.h); + + temp:8 = zext(A); + lo:8 = (( temp & (0x00000000FFFFFFFF) ) ); + lo = zext( ((lo:4 + (lo:4 >> 32)) ^ (lo:4 >> 32)) ); + hi:8 = (( temp & (0xFFFFFFFF00000000) ) >> 32); + hi = zext( ((hi:4 + (hi:4 >> 32)) ^ (hi:4 >> 32)) ); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evaddiw RT,RB,UI +# ISA-cmt: Vector Add Immediate Word +# evaddiw rD,BU_UIMM,rB 010 0000 0010 SPE_APU_Vector_Instructions +:evaddiw D,B,BU_UIMM is OP=4 & D & BU_UIMM & B & XOP_0_10=0x202 { + # RT.l = RB.l + EXTZ(UI); + # RT.h = RB.h + EXTZ(UI); + + tmp:8 = BU_UIMM; + lo:8 = (( B & (0x00000000FFFFFFFF) ) ) + (tmp & 0xFFFF); + hi:8 = (( B & (0xFFFFFFFF00000000) ) >> 32) + (tmp & 0xFFFF); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evaddsmiaaw RT,RA +# ISA-cmt: Vector Add Signed, Modulo, Integer to Accumulator Word +# evaddsmiaaw rD,rA 100 1100 1001 SPE_APU_Vector_Instructions +:evaddsmiaaw D,A is OP=4 & D & A & XOP_0_10=0x4C9 & BITS_11_15=0 { + # RT.l = ACC.l + RA.l; + # RT.h = ACC.h + RA.h; + # ACC.t = RT.t; + + lo:8 = (( ACC & (0x00000000FFFFFFFF) ) ) + (( A & (0x00000000FFFFFFFF) ) ); + hi:8 = (( ACC & (0xFFFFFFFF00000000) ) >> 32) + (( A & (0xFFFFFFFF00000000) ) >> 32); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# macro SATURATE(ov,carry, sat_ovn, sat_ov, val) { +# sat = (ov * carray)*sat_ovn + (ov * !carray)*sat_ov + (! ov) * val; +# } + +# evaddssiaaw RT,RA +# ISA-cmt: Vector Add Signed, Saturate, Integer to Accumulator Word +# evaddssiaaw rD,rA 100 1100 0001 SPE_APU_Vector_Instructions +define pcodeop VectorAddSignedSaturateIntgerToAccumulatorWord1; +define pcodeop VectorAddSignedSaturateIntgerToAccumulatorWord2; +:evaddssiaaw D,A is OP=4 & D & A & XOP_0_10=0x4C1 & BITS_11_15=0 { + # TODO definition complicated SATURATE() + # temp.t = EXTS(ACC.l) + EXTS(RA.l); + # ovh = temp.b31 ^ temp.b32; + # RT.l = SATURATE(ovh, temp.b31, 0x8000_0000, 0x7FFF_FFFF, temp.h); + # temp.t = EXTS(ACC.h) + EXTS(RA.h); + # ovl = temp.31 ^ temp.32; + # RT.h = SATURATE(ovl, temp.b31, 0x8000_0000, 0x7FFF_FFFF, temp.h); + # ACC.t = RT.t; + # SPEFSCR.ovh = ovh; + # SPEFSCR.ov = ov; + # SPEFSCR.sovh = SPEFSCR.sovh | ovh; + # SPEFSCR.sov = SPEFSCR.sov | ovh; + +# temp:8 = sext( extrBytes(ACC,8,4,0) ) + sext( extrBytes(A,8,4,0) ); +# ovh = getBits(temp,31,31,8) ^ getBits(temp,32,32,8); +# SATURATE(ovh, getBits(temp,31,31,8), 0x80000000,0x7FFFFFFF, temp); +# lo = sat; + + D = VectorAddSignedSaturateIntgerToAccumulatorWord1(ACC, A); + spr200 = VectorAddSignedSaturateIntgerToAccumulatorWord2(ACC, A); +} + +# evaddumiaaw RT,RA +# ISA-cmt: Vector Add Unsigned, Modulo, Integer to Accumulator Word +# evaddumiaaw rD,rA 100 1100 1000 SPE_APU_Vector_Instructions +:evaddumiaaw D,A is OP=4 & D & A & XOP_0_10=0x4C8 & BITS_11_15=0 { + # RT.l = ACC.l + RA.l; + # RT.h = ACC.h + RA.h; + # ACC.t = RT.t; + + lo:8 = (( ACC & (0x00000000FFFFFFFF) ) ) + (( A & (0x00000000FFFFFFFF) ) ); + hi:8 = (( ACC & (0xFFFFFFFF00000000) ) >> 32) + (( A & (0xFFFFFFFF00000000) ) >> 32); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evaddusiaaw RT,RA +# ISA-cmt: Vector Add Unsigned, Saturate, Integer to Accumulator Word +# evaddusiaaw rD,rA 100 1100 0000 +define pcodeop VectorAddUnsignedSaturateIntegerToAccumulatorWord1; +define pcodeop VectorAddUnsignedSaturateIntegerToAccumulatorWord2; +:evaddusiaaw D,A is OP=4 & D & A & XOP_0_10=0x4C0 & BITS_11_15=0 { + # TODO definition complicated SATURATE() + # temp.t = EXTZ(ACC.l) + EXTZ(RA.l); + # ovh = temp.b31; + # RT.l = SATURATE(ovh, temp.31, 0xFFFF_FFFF, 0xFFFF_FFFF, temp.h); + # ovl = temp.b31 + # RT.h = SATURATE(ovl, temp.31, 0xFFFF_FFFF, 0xFFFF_FFFF, temp.h); + # ACC.t = RT.t; + # SPEFSCR.ovh = ovh; + # SPEFSCR.ov = SPESCR.sovh | ovh; + # SPEFSCR.sovh = SPESCR.sov | ovl; + + D = VectorAddUnsignedSaturateIntegerToAccumulatorWord1(ACC, A); + spr200 = VectorAddUnsignedSaturateIntegerToAccumulatorWord2(ACC, A); +} + +# evaddw RT,RA,RB +# ISA-cmt: Vector Add Word +# evaddw rD,rA,rB 010 0000 0000 +:evaddw D,A,B is OP=4 & D & A & B & XOP_0_10=0x200 { + # RT.l = RA.l + RB.l; + # RT.h = RA.h + RB.h; + + lo:8 = (( A & (0x00000000FFFFFFFF) ) ) + (( B & (0x00000000FFFFFFFF) ) ); + hi:8 = (( A & (0xFFFFFFFF00000000) ) >> 32) + (( B & (0xFFFFFFFF00000000) ) >> 32); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evand RT,RA,RB +# ISA-cmt: Vector AND +# evand rD,rA,rB 010 0001 0001 +:evand D,A,B is OP=4 & D & A & B& XOP_0_10=0x211 { + # RT.l = RA.l & RB.l; + # RT.h = RA.h & RB.h; + + lo:8 = (( A & (0x00000000FFFFFFFF) ) ) & (( B & (0x00000000FFFFFFFF) ) ); + hi:8 = (( A & (0xFFFFFFFF00000000) ) >> 32) & (( B & (0xFFFFFFFF00000000) ) >> 32); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evandc RT,RA,RB +# ISA-cmt: Vector AND with Complement +# evandc rD,rA,rB 010 0001 0010 +:evandc D,A,B is OP=4 & D & A & B & XOP_0_10=0x212 { + # RT.l = RA.l & (ONESCOMP(RB.l)); + # RT.h = RA.h & (ONESCOMP(RB.h)); + + lo:8 = (( A & (0x00000000FFFFFFFF) ) ) & (~ (( B & (0x00000000FFFFFFFF) ) )); + hi:8 = (( A & (0xFFFFFFFF00000000) ) >> 32) & (~ (( B & (0xFFFFFFFF00000000) ) >> 32)); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evcmpeq BF,RA,RB +# ISA-cmt: Vector Compare Equal +# evcmpeq crfD,rA,rB 010 0011 0100 +:evcmpeq crfD,A,B is OP=4 & crfD & A & B & XOP_0_10=0x234 & BITS_21_22=0 { + # ah = RA.l + # al = RA.h + # bh = RB.l + # bl = RB.h + # if (ah == bh) { + # ch = 1; + # } else { + # ch = 0; + # } + # if (al == bl) { + # cl = 1; + # } else { + # cl = 0; + # } + # CR.bsub(4xBF+32:4xBF+35) = ch || cl || (ch | cl) || (ch & cl); + + lo:$(REGISTER_SIZE) = (A & 0x00000000FFFFFFFF); + hi:$(REGISTER_SIZE) = ((A & 0xFFFFFFFF00000000) >> 32); + b_lo:$(REGISTER_SIZE) = (B & 0x00000000FFFFFFFF); + b_hi:$(REGISTER_SIZE) = ((B & 0xFFFFFFFF00000000) >> 32); + + if (hi == b_hi) goto ; + ch:1 = 0; + + ch = 1; + if (lo == b_lo) goto ; + cl:1 = 0; + + ch = 1; + crfD = (ch | (cl < 1) | ((ch|cl) < 2) | ((ch&cl) < 3)); +} + +# evcmpgts BF,RA,RB +# ISA-cmt: Vector Compare Greater Than Signed +# evcmpgts crfD,rA,rB 010 0011 0001 +:evcmpgts crfD,A,B is OP=4 & crfD & A & B & XOP_0_10=0x231 & BITS_21_22=0 { + # ah = RA.l; + # al = RA.h; + # bh = RB.l; + # bl = RB.h; + # if (ah > bh) { + # ch = 1; + # } else { + # ch = 0; + # } + # if (al > bl) { + # cl = 1; + # } else { + # ch = 0; + # } + # CR.bsub(4xBF+32:4xBF+35) = ch || cl || (ch | cl) || (ch & cl); + + lo:$(REGISTER_SIZE) = (A & 0x00000000FFFFFFFF); + hi:$(REGISTER_SIZE) = ((A & 0xFFFFFFFF00000000) >> 32); + b_lo:$(REGISTER_SIZE) = (B & 0x00000000FFFFFFFF); + b_hi:$(REGISTER_SIZE) = ((B & 0xFFFFFFFF00000000) >> 32); + + if (hi s> b_hi) goto ; + ch:1 = 0; + + ch = 1; + + if (lo s> b_lo) goto ; + cl:1 = 0; + + cl = 1; + crfD = (ch | (cl < 1) | ((ch|cl) < 2) | ((ch&cl) < 3)); +} + +# evcmpgtu BF,RA,RB +# ISA-cmt: Vector Compare Greater Than Unsigned +# evcmpgtu crfD,rA,rB 010 0011 0000 +:evcmpgtu crfD,A,B is OP=4 & crfD & A & B & XOP_0_10=0x230 & BITS_21_22=0 { + # ah = RA.l; + # al = RA.h; + # bh = RB.l; + # bl = RB.h; + # if (ah >u bh) { + # ch = 1; + # } else { + # ch = 0; + # } + # if (al >u bl) { + # cl = 1; + # } else { + # cl = 0; + # } + # CR.bsub(4xBF+32:4xBF+35) = ch || cl || (ch | cl) || (ch & cl); + + lo:$(REGISTER_SIZE) = (A & 0x00000000FFFFFFFF); + hi:$(REGISTER_SIZE) = ((A & 0xFFFFFFFF00000000) >> 32); + b_lo:$(REGISTER_SIZE) = (B & 0x00000000FFFFFFFF); + b_hi:$(REGISTER_SIZE) = ((B & 0xFFFFFFFF00000000) >> 32); + + if (hi > b_hi) goto ; + ch:1 = 0; + + ch = 1; + + if (lo > b_lo) goto ; + cl:1 = 0; + + cl = 1; + + crfD = (ch | (cl < 1) | ((ch|cl) < 2) | ((ch&cl) < 3)); +} + +# evcmplts BF,RA,RB +# ISA-cmt: Vector Compare Less Than Signed +# evcmplts crfD,rA,rB 010 0011 0011 +:evcmplts crfD,A,B is OP=4 & crfD & A & B & XOP_0_10=0x233 & BITS_21_22=0 { + # ah = RA.l; + # al = RA.h; + # bh = RB.l; + # bl = RB.h; + # if (ah < bh) { + # ch = 1; + # } else { + # ch = 0; + # } + # if (al < bl) { + # cl = 1; + # } else { + # cl = 0; + # } + # CR.bsub(4xBF+32:4xBF+35) = ch || ch || (ch | cl) || (ch & cl); + + lo:$(REGISTER_SIZE) = (A & 0x00000000FFFFFFFF); + hi:$(REGISTER_SIZE) = ((A & 0xFFFFFFFF00000000) >> 32); + b_lo:$(REGISTER_SIZE) = (B & 0x00000000FFFFFFFF); + b_hi:$(REGISTER_SIZE) = ((B & 0xFFFFFFFF00000000) >> 32); + + if (hi s< b_hi) goto ; + ch:1 = 0; + + ch = 1; + + if (lo s< b_lo) goto ; + cl:1 = 0; + + cl = 1; + + crfD = (ch | (cl < 1) | ((ch|cl) < 2) | ((ch&cl) < 3)); +} + +# evcmpltu BF,RA,RB +# ISA-cmt: Vector Compare Less Than Unsigned +# evcmpltu crfD,rA,rB 010 0011 0010 +:evcmpltu crfD,A,B is OP=4 & crfD & A & B & XOP_0_10=0x232 & BITS_21_22=0 { + # ah = RA.l; + # al = RA.h; + # bh = RB.l; + # bl = RB.h; + # if (ah > 32); + b_lo:$(REGISTER_SIZE) = (B & 0x00000000FFFFFFFF); + b_hi:$(REGISTER_SIZE) = ((B & 0xFFFFFFFF00000000) >> 32); + + if (hi < b_hi) goto ; + ch:1 = 0; + + ch = 1; + + if (lo < b_lo) goto ; + cl:1 = 0; + + cl = 1; + + crfD = (ch | (cl < 1) | ((ch|cl) < 2) | ((ch&cl) < 3)); +} + +# evcntlsw RT,RA +# ISA-cmt: Vector Count Leading Signed Bits Word +# evcntlsw rD,rA 010 0000 1110 +define pcodeop VectorCountLeadingSignBitsWord; +:evcntlsw D,A is OP=4 & D & A & XOP_0_10=0x20E & BITS_11_15=0 { + # TODO definition complicated + # n = 0; + # s = RA.b(n); + # do while (n < 32) { + # if (RA.b(n) != s) { + # leave; + # } else { + # n = n + 1; + # } + # RT.l = n; + # n = 0; + # s = RA.b(n+32); + # do while (n < 32) { + # if (RA.b(n+32) != s) { + # leave; + # } + # n = n + 1; + # } + # RT.h = n; + # } + + D = VectorCountLeadingSignBitsWord(A); +} + +# evcntlzw RT,RA +# ISA-cmt: Vector Count Leading Zeros Word +# evcntlzw rD,rA 010 0000 1101 +define pcodeop VectorCountLeadingZerosWord; +:evcntlzw D,A is OP=4 & D & A & XOP_0_10=0x20D & BITS_11_15=0 { + # TODO definition + # n = 0; + # do while (n < 32) { + # if (RA.b(n) = 1) { + # leave; + # } else { + # n = n + 1; + # } + # } + # RT.l = n; + # n = 0; + # do while (n < 32) { + # if (RA.b(n+32) == 1) { + # leave; + # } else { + # n = n + 1; + # } + # } + # RT.h = n; + + D = VectorCountLeadingZerosWord(A); +} + +# evdivws RT,RA,RB +# ISA-cmt: Vector Divide Word Signed +# evdivws rD,rA,rB 100 1100 0110 +define pcodeop VectorDivideWordSigned1; +define pcodeop VectorDivideWordSigned2; +:evdivws D,A,B is OP=4 & D & A & B & XOP_0_10=0x4C6 { + # TODO definition complicated +# SPEFSCR.OVH = ovh; +# SPEFSCR.OV = ovl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; +# SPEFSCR.SOV = SPEFSCR.SOV | ovl; + + D = VectorDivideWordSigned1(A,B); + flags:8 = VectorDivideWordSigned2(A,B); + + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evdivwu RT,RA,RB +# ISA-cmt: Vector Divide Word Unsigned +# evdivwu rD,rA,rB 100 1100 0111 +define pcodeop VectorDivideWordUnsigned1; +define pcodeop VectorDivideWordUnsigned2; +:evdivwu D,A,B is OP=4 & D & A & B & XOP_0_10=0x4C7 { + # TODO definition complicated +# SPEFSCR.OV = ovl +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh +# SPEFSCR.SOV = SPEFSCR.SOV | ovl + + D = VectorDivideWordUnsigned1(A,B); + flags:8 = VectorDivideWordUnsigned2(A,B); + + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + + +# eveqv RT,RA,RB +# ISA-cmt: Vector Equivalent +# eveqv rD,rA,rB 010 0001 1001 +:eveqv D,A,B is OP=4 & D & A & B & XOP_0_10=0x219 { + # RT.l = EQUIV(RA.l, RB.l); + # RT.h = EQUIV(RA.h, RB.h); + + lo:$(REGISTER_SIZE) = (A & 0x00000000FFFFFFFF); + hi:$(REGISTER_SIZE) = ((A & 0xFFFFFFFF00000000) >> 32); + b_lo:$(REGISTER_SIZE) = (B & 0x00000000FFFFFFFF); + b_hi:$(REGISTER_SIZE) = ((B & 0xFFFFFFFF00000000) >> 32); + + lo = lo ^ b_lo; # TODO check + hi = hi ^ b_hi; + + D = ((hi << 32) | lo); +} + +# evextsb RT,RA +# ISA-cmt: Vector Extend Sign Byte +# evextsb rD,rA 010 0000 1010 +:evextsb D,A is OP=4 & D & A & XOP_0_10=0x20A & BITS_11_15=0 { + # RT.l = EXTS(RA.B3); + # RT.h = EXTS(RA.B7); + + lo:$(REGISTER_SIZE) = (( A & (0x00000000FF000000) ) >> 24); + hi:$(REGISTER_SIZE) = (( A & (0xFF00000000000000) ) >> 56); + lo = sext(lo:1); + hi = sext(hi:1); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evextsh RT,RA +# ISA-cmt: Vector Extend Sign Halfword +# evextsh rD,rA, 010 0000 1011 +:evextsh D,A is OP=4 & D & A & XOP_0_10=0x20B & BITS_11_15=0 { + # RT.l = EXTS(RA.S1); + # RT.h = EXTS(RA.S3); + + lo:$(REGISTER_SIZE) = (( A & (0x00000000FFFF0000) ) >> 16); + hi:$(REGISTER_SIZE) = (( A & (0xFFFF000000000000) ) >> 48); + lo = sext(lo:2); + hi = sext(hi:2); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# ======================================================================= +# Page D-11 + +# evldd RT,D(RA) +# ISA-cmt: Vector Load Double Word into Double Word +# evldd rD,d(rA) +define pcodeop VectorLoadDoubleWordIntoDoubleWord; +# TODO: defined in evx.sinc +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + EXTZ(UI*8); +# RT = MEM(EA,8); + +#:evldd D,A is OP=4 & D & A & XOP_0_10=769 & BITS_11_15=0 { + # TODO: defined in evx.sinc +# VectorLoadDoubleWordIntoDoubleWord(D,A); +#} + +# evlddx RT,RA,RB +# ISA-cmt: Vector Load Double Word into Double Word Indexed +# evlddx +# define pcodeop vectorLoadDoubleWordIntoDoubleWordIndexed1; +# TODO: defined in evx.sinc +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + RB; +# RT = MEM(EA, 8); + +# :evlddx S,A,B is OP=4 & S & A & B & XOP_0_10=768 { +# TODO: defined in evx.sinc +# vectorLoadDoubleWordIntoDoubleWordIndexed1(S,A,B); +#} + +# evldh RT,D(RA) +# ISA-cmt: Vector Load Double into Four Halfwords +# evldh rD,rA 011 0000 0101 + + +:evldh RT,EVUIMM_8_RAt is OP=4 & A & D & RT & EVUIMM_8 & EVUIMM_8_RAt & XOP_0_10=0x305 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + EXTZ(UI*8); +# RT.S0 = MEM(EA, 2); +# RT.S1 = MEM(EA+2, 2); +# RT.S2 = MEM(EA+4, 2); +# RT.S3 = MEM(EA+6, 2); + + EA:8 = A + zext(EVUIMM_8_RAt); + *:2 (RT) = *:2 ((EA) & $(MEMMASK)); + *:2 (RT+2) = *:2 ((EA+2) & $(MEMMASK)); + *:2 (RT+4) = *:2 ((EA+4) & $(MEMMASK)); + *:2 (RT+6) = *:2 ((EA+6) & $(MEMMASK)); +} + +# evldhx RT,RA,RB +# ISA-cmt: Vector Load Double into Four Halfwords Indexed +# evldhx rD,rA,rB 011 0000 0100 + +:evldhx D,A,B is OP=4 & A & D & B & XOP_0_10=0x304 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + RB; +# RT.S0 = MEM(EA, 2); +# RT.S1 = MEM(EA+2, 2); +# RT.S2 = MEM(EA+4, 2); +# RT.S3 = MEM(EA+6, 2); + + EA:8 = A + B; + *:2 (D) = *:2 ((EA) & $(MEMMASK)); + *:2 (D+2) = *:2 ((EA+2) & $(MEMMASK)); + *:2 (D+4) = *:2 ((EA+4) & $(MEMMASK)); + *:2 (D+6) = *:2 ((EA+6) & $(MEMMASK)); +} + +# evldw RT,D(RA) +# ISA-cmt: Vector Load Double into Two Words +# evldw rD,rA 011 0000 0011 +:evldw RT,EVUIMM_8_RAt is OP=4 & A & D & RT & EVUIMM_8 & EVUIMM_8_RAt & XOP_0_10=0x303 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + EXTZ(UI*8); +# RT.l = MEM(EA, 4); +# RT.h = MEM(EA+4, 4); + + EA:$(REGISTER_SIZE) = A + zext(EVUIMM_8_RAt); + *:4 (RT) = *:4 ((EA) & $(MEMMASK)); + *:4 (RT+4) = *:4 ((EA+4) & $(MEMMASK)); +} + +# evldwx RT,RA,RB +# ISA-cmt: Vector Load Double into Two Words Indexed +# evldwx rD,rA,rB 011 0000 0010 +:evldwx D,A,B is OP=4 & A & B & D & XOP_0_10=0x302 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + RB; +# RT.l = MEM(EA, 4); +# RT.h = MEM(EA+4, 4); + + EA:$(REGISTER_SIZE) = A + B; + *:4 (D) = *:4 ((EA) & $(MEMMASK)); + *:4 (D+4) = *:4 ((EA+4) & $(MEMMASK)); +} + +# evlhhesplat RT,D(RA) +# ISA-cmt: Vector Load Halfword into Halfwords Even and Splat +# evlhhesplat rD,rA 011 0000 1001 +:evlhhesplat RT,EVUIMM_2_RAt is OP=4 & A & RT & EVUIMM_2_RAt & D & XOP_0_10=0x309 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + EXTZ(UI*2); +# RT.S0 = MEM(EA,2); +# RT.S1 = 0x0000; +# RT.S2 = MEM(EA,2); +# RT.S3 = 0x0000; + + EA:$(REGISTER_SIZE) = A + zext(EVUIMM_2_RAt); + *:2 (RT) = *:2 ((EA) & $(MEMMASK)); + *:2 (RT+2) = 0x0000; + *:2 (RT+4) = *:2 ((EA) & $(MEMMASK)); + *:2 (RT+6) = 0x0000; +} + +# evlhhesplatx RT,RA,RB +# ISA-cmt: Vector Load Halfword into Halfwords Even and Splat Indexed +# evlhhesplatx rD,rA,rB 011 0000 1000 +:evlhhesplatx D,A,B is OP=4 & A & B & D & XOP_0_10=0x308 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + RB; +# RT.S0 = MEM(EA, 2); +# RT.S1 = 0x0000; +# RT.S2 = MEM(EA, 2); +# RT.S3 = 0x0000; + + EA:$(REGISTER_SIZE) = A + B; + *:2 (D) = *:2 ((EA) & $(MEMMASK)); + *:2 (D+2) = 0x0000; + *:2 (D+4) = *:2 ((EA) & $(MEMMASK)); + *:2 (D+6) = 0x0000; +} + +# evlhhossplat RT,D(RA) +# ISA-cmt: Vector Load Halfword into Halfword Odd Signed and Splat +# evlhhossplat rD,rA 011 0000 1111 +:evlhhossplat RT,EVUIMM_2_RAt is OP=4 & A & RT & EVUIMM_2_RAt & D & XOP_0_10=0x30F { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + EXTZ(UI*2); +# RT.l = EXTS(MEM(EA, 2)); +# RT.h = EXTS(MEM(EA, 2)); + + EA:$(REGISTER_SIZE) = A + zext(EVUIMM_2_RAt); + *:4 (RT) = sext( *:2 (((EA) & $(MEMMASK)))); + *:4 (RT+4) = sext( *:2 (((EA) & $(MEMMASK)))); +} + +# evlhhossplatx RT,RA,RB +# ISA-cmt: Vector Load Halfword into Halfword Odd Signed and Splat Indexed +# evlhhossplatx rD,rA,rB 011 0000 1110 +:evlhhossplatx D,A,B is OP=4 & A & B & D & XOP_0_10=0x30E { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + RB; +# RT.l = EXTS(MEM(EA, 2)); +# RT.h = EXTS(MEM(EA, 2)); + + EA:$(REGISTER_SIZE) = A + B; + *:4 (D) = sext( *:2 (((EA) & $(MEMMASK)))); + *:4 (D+4) = sext( *:2 (((EA) & $(MEMMASK)))); +} + +# evlhhousplat RT,D(RA) +# ISA-cmt: Vector Load Halfword into Halfword Odd Unsigned and Splat +# evlhhousplat rD,rA 011 0000 1101 +:evlhhousplat RT,EVUIMM_2_RAt is OP=4 & A & RT & EVUIMM_2_RAt & D & XOP_0_10=0x30D { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + EXTZ(UI*2); +# RT.l = EXTZ(MEM(EA, 2)); +# RT.h = EXTZ(MEM(EA, 2)); + + EA:$(REGISTER_SIZE) = A + zext(EVUIMM_2_RAt); + *:4 (RT) = zext( *:2 (((EA) & $(MEMMASK)))); + *:4 (RT+4) = zext( *:2 (((EA) & $(MEMMASK)))); +} + +# evlhhousplatx RT,RA,RB +# ISA-cmt: Vector Load Halfword into Halfword Odd Unsigned and Splat Indexed +# evlhhousplatx rD,rA,rB 011 0000 1100 +:evlhhousplatx D,A,B is OP=4 & A & B & D & XOP_0_10=0x30C { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + RB; +# RT.l = EXTZ(MEM(EA, 2)); +# RT.h = EXTZ(MEM(EA, 2)); + + EA:$(REGISTER_SIZE) = A + B; + *:4 (D) = zext( *:2 (((EA) & $(MEMMASK)))); + *:4 (D) = zext( *:2 (((EA) & $(MEMMASK)))); +} + +# evlwhe RT,D(RA) +# ISA-cmt: Vector Load Word into Two Halfwords Even +# evlwhe rD,rA 011 0001 0001 +# evlwhe confict with mullhwu. +# define pcodeop VectorLoadWordIntoTwoHalfWordsEven; +# :evlwhe D,A is OP=4 & A & EVUIMM_4 & D & XOP_0_10=0x311 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + EXTZ(UI*4); +# RT.S0 = MEM(EA, 2); +# RT.S1 = 0x0000; +# RT.S2 = MEM(EA+2, 2); +# RT.S3 = 0x0000; + +# VectorLoadWordIntoTwoHalfWordsEven(D,A); +# } + +# ================================================================= +# Page D-12 + +# evlwhex RT,RA,RB +# ISA-cmt: Vector Load Word into Two Halfwords Even Indexed +# evlwhex rD,rA 011 0001 0000 +# evlwhex confict with mullhwu +# define pcodeop VectorLoadWordIntoTwoHalfWordsEvenIndexed; +# :evlwhex D,A is OP=4 & B & A & D & XOP_0_10=0x310 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b+ RB; +# RT.S0 = MEM(EA, 2); +# RT.S1 = 0x0000; +# RT.S2 = MEM(EA + 2, 2); +# RT.S3 = 0x0000; + +# VectorLoadWordIntoTwoHalfWordsEvenIndexed(D,A); +# } + +# evlwhos RT,D(RA) +# ISA-cmt: Vector Load Word into Two Halfwords Odd Signed (with sign extension) +# evlwhos rD,rA 011 0001 0111 +:evlwhos RT,EVUIMM_4_RAt is OP=4 & A & EVUIMM_4_RAt & RT & D & XOP_0_10=0x317 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + EXTZ(UI*4); +# RT.l = EXTS(MEM(EA, 2)); +# RT.h = EXTS(MEM(EA+2, 2)); + + EA:$(REGISTER_SIZE) = A + zext(EVUIMM_4_RAt); + *:4 (RT) = sext( *:2 (((EA) & $(MEMMASK)))); + *:4 (RT+4) = sext( *:2 (((EA+2) & $(MEMMASK)))); +} + +# evlwhosx RT,RA,RB +# ISA-cmt: Vector Load Word into Two Halfwords Odd Signed Indexed (with sign extension) +# evlwhosx rD,rA,rB 011 0001 0110 +:evlwhosx D,A,B is OP=4 & A & B & D & XOP_0_10=0x316 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + RB; +# RT.l = EXTS(MEM(EA,2)); +# RT.h = EXTS(MEM(EA+2, 2)); + + EA:$(REGISTER_SIZE) = A + B; + *:4 (D) = sext( *:2 (((EA) & $(MEMMASK)))); + *:4 (D+4) = sext( *:2 (((EA+2) & $(MEMMASK)))); +} + +# evlwhou RT,D(RA) +# ISA-cmt: Vector Load Word into Two Halfwords Odd Unsigned (zero-extended) +# evlwhou rD,rA 011 0001 0101 +:evlwhou RT,EVUIMM_4_RAt is OP=4 & A & EVUIMM_4_RAt & RT & D & XOP_0_10=0x315 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + EXTZ(UI*4); +# RT.l = EXTZ(MEM(EA, 2)); +# RT.h = EXTZ(MEM(EA+2, 2)); + + EA:$(REGISTER_SIZE) = A + zext(EVUIMM_4_RAt); + *:4 (RT) = zext( *:2 (((EA) & $(MEMMASK)))); + *:4 (RT+4) = zext( *:2 (((EA+2) & $(MEMMASK)))); +} + +# evlwhoux RT,RA,RB +# ISA-cmt: Vector Load Word into Two Halfwords Odd Unsigned Indexed (zero-extended) +# evlwhoux rD,rA,rB 011 0001 0100 +:evlwhoux D,A,B is OP=4 & A & B & D & XOP_0_10=0x314 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + RB; +# RT.l = EXTZ(MEM(EA,2)); +# RT.h = EXTZ(MEM(EA+2,2)); + + EA:$(REGISTER_SIZE) = A + B; + *:4 (D) = zext( *:2 (((EA) & $(MEMMASK)))); + *:4 (D+4) = zext( *:2 (((EA+2) & $(MEMMASK)))); +} + +# evlwhsplat RT,D(RA) +# ISA-cmt: Vector Load Word into Two Halfwords and Splat +# evlwhsplat rD,rA 011 0001 1101 +:evlwhsplat RS,EVUIMM_4_RAt is OP=4 & A & B & D & XOP_0_10=0x31D & EVUIMM_4_RAt & RS { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + EXTZ(UI*4); +# RT.S0 = MEM(EA,2); +# RT.S1 = MEM(EA,2); +# RT.S2 = MEM(EA+2,2); +# RT.S3 = MEM(EA+2,2); + + EA:$(REGISTER_SIZE) = A + zext(EVUIMM_4_RAt); + *:2 (RS) = *:2 ((EA) & $(MEMMASK)); + *:2 (RS+2) = *:2 ((EA) & $(MEMMASK)); + *:2 (RS+4) = *:2 ((EA+2) & $(MEMMASK)); + *:2 (RS+6) = *:2 ((EA+2) & $(MEMMASK)); +} + +# evlwhsplatx RT,RA,RB +# ISA-cmt: Vector Load Word into Two Halfwords and Splat Indexed +# evlwhsplatx rD,rA,rB 011 0001 1100 +:evlwhsplatx D,A,B is OP=4 & A & B & D & XOP_0_10=0x31C { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + RB; +# RT.S0 = MEM(EA,2); +# RT.S1 = MEM(EA,2); +# RT.S2 = MEM(EA+2,2); +# RT.S3 = MEM(EA+2,2); + + EA:$(REGISTER_SIZE) = A + B; + *:2 (D) = *:2 ((EA) & $(MEMMASK)); + *:2 (D+2) = *:2 ((EA) & $(MEMMASK)); + *:2 (D+4) = *:2 ((EA+2) & $(MEMMASK)); + *:2 (D+6) = *:2 ((EA+2) & $(MEMMASK)); +} + +# evlwwsplat RT,D(RA) +# ISA-cmt: Vector Load Word into Word and Splat +# evlwwsplat rD,rA 011 0001 1001 +# define pcodeop VectorLoadWordIntoWordAndSplat; +# evlwwsplat conficts with maclhwu. +# :evlwwsplat RT,EVUIMM_4_RAt is OP=4 & A & D & EVUIMM_4_RAt & RT & XOP_0_10=0x319 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + EXTZ(UI*4); +# RT.l = MEM(EA,4); +# RT.h = MEM(EA,4); + +# VectorLoadWordIntoWordAndSplat(D,A); +# } + +# evlwwsplatx RT,RA,RB +# ISA-cmt: Vector Load Word into Word and Splat Indexed +# evlwwsplatx rD,rA,rB 011 0001 1000 +# define pcodeop VectorLoadWordIntoWordAndSplatIndexed; +# evlwwsplatx conficts with maclhwu +# :evlwwsplatx D,A,B is OP=4 & A & B & D & XOP_0_10=0x318 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + RB; +# RT.l = MEM(EA,4); +# RT.h = MEM(EA,4); + +# VectorLoadWordIntoWordAndSplatIndexed(D,A,B); +# } + +# evmergehi RT,RA,RB +# ISA-cmt: Vector Merge High +# evmergehi rD,rA,rB 010 0010 1100 +# defined evx.sinc XXX +# define pcodeop VectorMergeHigh; +# :evmergehi D,A,B is OP=4 & A & B & D & XOP_0_10=0x22C { +# RT.l = RA.l; +# RT.h = RB.h; + +# VectorMergeHigh(D,A,B); +# } + +# evmergehilo RT,RA,RB +# ISA-cmt: Vector Merge High/Low +# evmergehilo rD,rA,rB 010 0010 1110 +#define pcodeop VectorMergeHighLow; +#:evmergehilo D,A,B is OP=4 & A & B & D & XOP_0_10=0x22E { +# RT.l = RA.l; +# RT.h = RA.h; + +# lo = (A & 0x00000000FFFFFFFF); +# hi = ((A & 0xFFFFFFFF00000000) >> 32); +# b_lo = (B & 0x00000000FFFFFFFF); +# b_hi = ((B & 0xFFFFFFFF00000000) >> 32); +# +# lo = lo; +# hi = b_hi; +# +# D = ((hi << 32) | lo); +#} + +# evmergelo RT,RA,RB +# ISA-cmt: Vector Merge Low +# evmergelo rD,rA,rB 010 0010 1101 +# defined evx.sinc XXX +# define pcodeop VectorMergeLow; +# :evmergelo D,A,B is OP=4 & A & B & D & XOP_0_10=0x22D { +# RT.l = RA.h; +# RT.h = RA.l; + +# VectorMergeLow(D,A,B); +# } + +# evmergelohi RT,RA,RB +# ISA-cmt: Vector Merge Low/High +# evmergelohi rD,rA,rB 010 0010 1111 +#:evmergelohi D,A,B is OP=4 & D & A & B & XOP_0_10=0x22F { +# RT.l = RA.h; +# RT.h = RA.l; + +# lo = (A & 0x00000000FFFFFFFF); +# hi = ((A & 0xFFFFFFFF00000000) >> 32); +# b_lo = (B & 0x00000000FFFFFFFF); +# b_hi = ((B & 0xFFFFFFFF00000000) >> 32); +# +# lo = lo; +# hi = b_lo; +# +# D = ((hi << 32) | lo); +#} + + +# evmhegsmfaa RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Guarded, Signed, Modulo, Fractional and Accumulate +# evmhegsmfaa rD,rA,rB 101 0010 1011 +:evmhegsmfaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x52B { +# u64 temp; +# temp = RA.S2 *gsf RB.S2; +# RT = ACC + temp; +# ACC = RT; + + D = ACC + GuardedSignedFractionalMultiplication( (( A & (0x0000FFFFFFFF0000) ) >> 16) , (( B & (0x0000FFFFFFFF0000) ) >> 16) ); + ACC = D; +} + +# evmhegsmfan RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Guarded, Signed, Modulo, Fractional and Accumulate Negative +# evmhegsmfan rD,rA,rB 101 1010 1011 +:evmhegsmfan D,A,B is OP=4 & A & B & D & XOP_0_10=0x5AB { +# u64 temp; +# temp = RA.S2 *gsf RB.S2; +# RT = ACC - temp; +# ACC = RT; + + D = ACC - GuardedSignedFractionalMultiplication( (( A & (0x0000FFFFFFFF0000) ) >> 16) , (( B & (0x0000FFFFFFFF0000) ) >> 16) ); + ACC = D; +} + +# evmhegsmiaa RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Guarded, Signed, Modulo, Integer and Accumulate +# evmhegsmiaa rD,rA,rB 101 0010 1001 +:evmhegsmiaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x529 { +# u64 temp; +# temp.l = RA.l2 *si RB.l2; +# temp.h = EXTS(temp.l); +# RT = ACC + temp; +# ACC = RT; + + lo:8 = (( A & (0x0000FFFF00000000) ) >> 32) * (( B & (0x0000FFFF00000000) ) >> 32); + hi:8 = sext(lo:2); + lo = (( zext(hi) << 32) | zext(lo) ); + D = ACC + lo; + ACC = D; +} + + +# evmhegsmian RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Guarded, Signed, Modulo, Integer and Accumulate Negative +# evmhegsmian rD,rA,rB 101 1010 1001 +:evmhegsmian D,A,B is OP=4 & A & B & D & XOP_0_10=0x5A9 { +# u64 temp; +# temp.l = RA.S2 *si RB.S2; +# temp = EXTS(temp.l); +# RT = ACC - temp; +# ACC = RT; + + lo:8 = (( A & (0x0000FFFF00000000) ) >> 32) * (( B & (0x0000FFFF00000000) ) >> 32); + hi:8 = sext(lo:2); + lo = (( zext(hi) << 32) | zext(lo) ); + D = ACC - lo; + ACC = D; +} + +# evmhegumiaa RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Guarded, Unsigned, Modulo, Integer and Accumulate +# evmhegumiaa rD,rA,rB 101 0010 1000 +:evmhegumiaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x528 { +# u64 temp; +# temp.l = RA.S2 *ui RB.S2; +# temp = EXTZ(temp.l); +# RT = ACC + temp; +# ACC = RT; + + temp:$(REGISTER_SIZE) = (( A & (0x0000FFFF00000000) ) >> 32) * (( B & (0x0000FFFF00000000) ) >> 32); + temp = zext(temp:4); + D = ACC + temp; + ACC = D; +} + +# evmhegumian RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Guarded, Unsigned, Modulo, Integer and Accumulate Negative +# evmhegumian rD,rA,rB 101 1010 1000 +:evmhegumian D,A,B is OP=4 & A & B & D & XOP_0_10=0x5A8 { +# u64 temp; +# temp.l = RA.S2 *ui RB.S2; +# temp = EXTZ(temp); +# RT = ACC - temp; +# ACC = RT; + + temp:$(REGISTER_SIZE) = (( A & (0x0000FFFF00000000) ) >> 32) * (( B & (0x0000FFFF00000000) ) >> 32); + temp = zext(temp:4); + D = ACC - temp; + ACC = D; +} + +# evmhesmf RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Signed, Modulo, Fractional +# evmhesmf rD,rA,rB 100 0000 1011 +:evmhesmf D,A,B is OP=4 & A & B & D & XOP_0_10=0x40B { +# RT = RA.S0 *sf RB.S0; +# RT.S2 = RA.S2 *sf RB.S2; + + D = SignedFractionalMultiplication( (( A & (0x0000000000000000) ) >> 16) , (( B & (0x0000000000000000) ) >> 16) ); + D = (D & 0xFFFF) | ( (SignedFractionalMultiplication((( A & (0x0000FFFFFFFF0000) ) >> 16),(( B & (0x0000FFFFFFFF0000) ) >> 16)) ) << 16); +} + +# evmhesmfa RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Signed, Modulo, Fractional to Accumulator +# evmhesmfa rD,rA,rB 100 0010 1011 +:evmhesmfa D,A,B is OP=4 & A & B & D & XOP_0_10=0x42B { +# RT.l = RA.S0 *sf RB.S0; +# RT.h = RA.S2 *sf RB.S2; +# ACC = RT; + + D = SignedFractionalMultiplication( (( A & (0x0000000000000000) ) >> 16) , (( B & (0x0000000000000000) ) >> 16) ); + D = (D & 0xFFFF) | ( (SignedFractionalMultiplication((( A & (0x0000FFFFFFFF0000) ) >> 16),(( B & (0x0000FFFFFFFF0000) ) >> 16)) ) << 16); + ACC = D; +} + +# evmhesmfaaw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Signed, Modulo, Fractional and Accumulate into Words +# evmhesmfaaw rD,rA,rB 101 0000 1011 +:evmhesmfaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x50B { +# u64 temp; +# temp = RA.S0 *sf RB.S0; +# RT.l = ACC.l + temp.l; +# temp.l = RA.S2 *sf RB.S2; +# RT = ACC.h + temp.l; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) + SignedFractionalMultiplication( (( A & (0x0000000000000000) ) >> 16) , (( B & (0x0000000000000000) ) >> 16) ); + hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) + SignedFractionalMultiplication( (( A & (0x0000FFFFFFFF0000) ) >> 16) , (( B & (0x0000FFFFFFFF0000) ) >> 16) ); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmhesmfanw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Signed, Modulo, Fractional and Accumulate Negative into Words +# evmhesmfanw rD,rA,rB 101 1000 1011 +:evmhesmfanw D,A,B is OP=4 & A & B & D & XOP_0_10=0x58B { +# u64 temp; +# temp.l = RA.S0 *sf RB.S0; +# RT.l = ACC.l - temp.l; +# temp.l = RA.S2 *sf RB.S2; +# RT.h = ACC.h - temp; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) - SignedFractionalMultiplication( (( A & (0x0000000000000000) ) >> 16) , (( B & (0x0000000000000000) ) >> 16) ); + hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) - SignedFractionalMultiplication( (( A & (0x0000FFFFFFFF0000) ) >> 16) , (( B & (0x0000FFFFFFFF0000) ) >> 16) ); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmhesmi RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Signed, Modulo, Integer +# evmhesmi rD,rA,rB 100 0000 1001 +:evmhesmi D,A,B is OP=4 & A & B & D & XOP_0_10=0x409 { +# RT.l = RA.S0 *si RB.S0; +# RT.h = RA.S2 *si RB.S2; + + lo:$(REGISTER_SIZE) = (( A & (0x000000000000FFFF) ) ) * (( B & (0x000000000000FFFF) ) ); + hi:$(REGISTER_SIZE) = (( A & (0x0000FFFF00000000) ) >> 32) * (( B & (0x0000FFFF00000000) ) >> 32); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evmhesmia RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Signed, Modulo, Integer to Accumulator +# evmhesmia rD,rA,rB 100 0010 1001 +:evmhesmia D,A,B is OP=4 & A & B & D & XOP_0_10=0x429 { +# RT.l = RA.S0 *si RB.S0; +# RT.h = RA.S2 *si RB.s2; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( A & (0x000000000000FFFF) ) ) * (( B & (0x000000000000FFFF) ) ); + hi:$(REGISTER_SIZE) = (( A & (0x0000FFFF00000000) ) >> 32) * (( B & (0x0000FFFF00000000) ) >> 32); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmhesmiaaw rD,rA,rB 101 0000 1001 +# ISA-cmt: Vector Multiply Halfwords, Even, Signed, Modulo, Integer and Accumulate into Words +:evmhesmiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x509 { +# u64 temp; +# temp.l = RA.S0 *si RB.S0; +# RT.l = ACC.l + temp.l; +# temp.l = RA.S2 *si RB.S2; +# RT.h = ACC.h + temp.l; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( ACC & (0x00000000FFFFFFFF) ) ) + ((( A & (0x000000000000FFFF) ) ) * (( B & (0x000000000000FFFF) ) )); + hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) + ((( A & (0x0000FFFF00000000) ) >> 32) * (( B & (0x0000FFFF00000000) ) >> 32)); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmhesmianw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Signed, Modulo, Integer and Accumulate Negative into Words +# evmhesmianw rD,rA,rB 101 1000 1001 +:evmhesmianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x589 { +# u64 temp; +# temp.l = RA.S0 *si RB.S0; +# RT.l = ACC.l - temp.l; +# temp.l = RA.S2 *si RB.S2; +# RT.S2 = ACC.S2 - temp.l; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( ACC & (0x00000000FFFFFFFF) ) ) - ((( A & (0x000000000000FFFF) ) ) * (( B & (0x000000000000FFFF) ) )); + hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) - ((( A & (0x0000FFFF00000000) ) >> 32) * (( B & (0x0000FFFF00000000) ) >> 32)); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmhessf RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Signed, Saturate, Fractional +# evmhessf rD,rA,rB 100 0000 0011 +define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateFractional1; +define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateFractional2; +:evmhessf D,A,B is OP=4 & A & B & D & XOP_0_10=0x403 { + # TODO definition complicated +# SPEFSCR.OVH = movh; +# SPEFSCR.OV = movl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | movh; +# SPEFSCR.SOV = SPEFSCR.SOV | movl; + + D = VectorMultiplyHalfWordsEvenSignedSaturateFractional1(A,B); + flags:8 = VectorMultiplyHalfWordsEvenSignedSaturateFractional2(A,B); + + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmhessfa RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Signed, Saturate, Fractional to Accumulator +# evmhessfa rD,rA,rB 100 0010 0011 +define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateFractionalAccumulate1; +define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateFractionalAccumulate2; +:evmhessfa D,A,B is OP=4 & A & B & D & XOP_0_10=0x423 { +# SPEFSCR.OVH = movh; +# SPEFSCR.OV = movl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | movh; +# SPEFSCR.SOV = SPEFSCR.SOV | movl; + # TODO definition complicated + D = VectorMultiplyHalfWordsEvenSignedSaturateFractionalAccumulate1(A,B); + ACC = D; + flags:8 = VectorMultiplyHalfWordsEvenSignedSaturateFractionalAccumulate2(A,B); + + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmhessfaaw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Signed, Saturate, Fractional and Accumulate into Words +# evmhessfaaw rD,rA,rB 101 0000 0011 +define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateFractionalAndAccumulateIntoWords1; +define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateFractionalAndAccumulateIntoWords2; +:evmhessfaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x503 { +# SPEFSCR.OVH = ovh | movh +# SPEFSCR.OV = ovl| movl +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh | movh +# SPEFSCR.SOV = SPEFSCR.SOV | ovl| movl + # TODO definition complicated + D = VectorMultiplyHalfWordsEvenSignedSaturateFractionalAndAccumulateIntoWords1(A,B,ACC,spr200); + flags:8 = VectorMultiplyHalfWordsEvenSignedSaturateFractionalAndAccumulateIntoWords2(A,B,ACC,spr200); + ACC = D; + + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmhessfanw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Signed, Saturate, Fractional and Accumulate Negative into Words +# evmhessfanw rD,rA,rB 101 1000 0011 +define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateFractionalAndAccumulateNegativeIntoWords1; +define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateFractionalAndAccumulateNegativeIntoWords2; +:evmhessfanw D,A,B is OP=4 & A & B & D & XOP_0_10=0x583 { +# SPEFSCR.OVH = ovh | movh; +# SPEFSCR.OV = ovl| movl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh | movh; +# SPEFSCR.SOV = SPEFSCR.SOV | ovl| movl; + # TODO definition complicated + D = VectorMultiplyHalfWordsEvenSignedSaturateFractionalAndAccumulateNegativeIntoWords1(A,B,ACC,spr200); + flags:8 = VectorMultiplyHalfWordsEvenSignedSaturateFractionalAndAccumulateNegativeIntoWords2(A,B,ACC,spr200); + ACC = D; + + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmhessiaaw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Signed, Saturate, Integer and Accumulate into Words +# evmhessiaaw rD,rA,rB 101 0000 0001 +define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateIntegerAndAccumulateIntoWords1; +define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateIntegerAndAccumulateIntoWords2; +:evmhessiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x501 { +# SPEFSCR.OVH = ovh; +# SPEFSCR.OV = ovl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; +# SPEFSCR.SOV = SPEFSCR.SOV | ovl; + # TODO definition complicated + D = VectorMultiplyHalfWordsEvenSignedSaturateIntegerAndAccumulateIntoWords1(A,B,ACC,spr200); + flags:8 = VectorMultiplyHalfWordsEvenSignedSaturateIntegerAndAccumulateIntoWords2(A,B,ACC,spr200); + ACC = D; + + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmhessianw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Signed, Saturate, Integer and Accumulate Negative into Words +# evmhessianw rD,rA,rB 101 1000 0001 +define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateIntegerAndAccumulateNegativeIntoWords1; +define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateIntegerAndAccumulateNegativeIntoWords2; +:evmhessianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x581 { +# SPEFSCR.OVH = ovh; +# SPEFSCR.OV = ovl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; +# SPEFSCR.SOV = SPEFSCR.SOV | ovl; +# TODO definition complicated + D = VectorMultiplyHalfWordsEvenSignedSaturateIntegerAndAccumulateNegativeIntoWords1(A,B,ACC,spr200); + flags:8 = VectorMultiplyHalfWordsEvenSignedSaturateIntegerAndAccumulateNegativeIntoWords2(A,B,ACC,spr200); + ACC = D; + + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# ================================================================= +# Page D-13 + +# evmheumi RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Unsigned, Modulo, Integer +# evmheumi rD,rA,rB 100 0000 1000 +:evmheumi D,A,B is OP=4 & A & B & D & XOP_0_10=0x408 { +# RT.l = RA.S0 *ui RB.S0; +# RT.h = RA.S2 *ui RB.S2; + + lo:$(REGISTER_SIZE) = (( A & (0x0000000000000000) ) >> 16) * (( B & (0x0000000000000000) ) >> 16); + hi:$(REGISTER_SIZE) = (( A & (0x0000FFFFFFFF0000) ) >> 16) * (( B & (0x0000FFFFFFFF0000) ) >> 16); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evmheumia RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Unsigned, Modulo, Integer to Accumulator +# evmheumia rD,rA,rB 100 0010 1000 +:evmheumia D,A,B is OP=4 & A & B & D & XOP_0_10=0x428 { +# RT.l = RA.S0 *ui RB.S0; +# RT.h = RA.S2 *ui RB.S2; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( A & (0x0000000000000000) ) >> 16) * (( B & (0x0000000000000000) ) >> 16); + hi:$(REGISTER_SIZE) = (( A & (0x0000FFFFFFFF0000) ) >> 16) * (( B & (0x0000FFFFFFFF0000) ) >> 16); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmheumiaaw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Unsigned, Modulo, Integer and Accumulate into Words +# evmheumiaaw rD,rA,rB 101 0000 1000 +:evmheumiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x508 { +# u64 temp; +# temp.l = RA.S0 *ui RB.S0; +# RT.l = ACC.l + temp.l; +# temp.l = RA.S2 *ui RB.S2; +# RT.h = ACC.h + temp.l; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) + (( A & (0x0000000000000000) ) >> 16) * (( B & (0x0000000000000000) ) >> 16); + hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) + (( A & (0x0000FFFFFFFF0000) ) >> 16) * (( B & (0x0000FFFFFFFF0000) ) >> 16); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmheumianw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Unsigned, Modulo, Integer and Accumulate Negative into Words +# evmheumianw rD,rA,rB 101 1000 1000 +:evmheumianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x588 { +# u64 temp; +# temp.l = RA.S0 *ui RB.S0; +# RT.l = ACC.l - temp.l; +# temp.l = RA.S2 *ui RB.S2; +# RT.h = ACC.h - temp.l; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) - (( A & (0x0000000000000000) ) >> 16) * (( B & (0x0000000000000000) ) >> 16); + hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) - (( A & (0x0000FFFFFFFF0000) ) >> 16) * (( B & (0x0000FFFFFFFF0000) ) >> 16); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmheusiaaw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Unsigned, Saturate, Integer and Accumulate into Words +# evmheusiaaw rD,rA,rB 101 0000 0000 +define pcodeop VectorMultiplyHalfWordsEvenUnsignedSaturateIntegerAndAccumulateIntoWords1; +define pcodeop VectorMultiplyHalfWordsEvenUnsignedSaturateIntegerAndAccumulateIntoWords2; +:evmheusiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x500 { +# SPEFSCR.OVH = ovh; +# SPEFSCR.OV = ovl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; +# SPEFSCR.SOV = SPEFSCR.SOV | ovl; + # TODO definition complicated + D = VectorMultiplyHalfWordsEvenUnsignedSaturateIntegerAndAccumulateIntoWords1(A,B,ACC,spr200); + flags:8 = VectorMultiplyHalfWordsEvenUnsignedSaturateIntegerAndAccumulateIntoWords2(A,B,ACC,spr200); + ACC = D; + + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmheusianw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Even, Unsigned, Saturate, Integer and Accumulate Negative into Words +# evmheusianw rD,rA,rB 101 1000 0000 +define pcodeop evmheusianwOP1; +define pcodeop evmheusianwOP2; +:evmheusianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x580 { +# SPEFSCR.OVH = ovh; +# SPEFSCR.OV = ovl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; +# SPEFSCR.SOV = SPEFSCR.SOV | ovl; + # TODO definition complicated + D = evmheusianwOP1(A,B,ACC,spr200); + flags:8 = evmheusianwOP2(A,B,ACC,spr200); + ACC = D; + + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmhogsmfaa RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Guarded, Signed, Modulo, Fractional and Accumulate +# evmhogsmfaa rD,rA,rB 101 0010 1111 +:evmhogsmfaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x52F { +# u64 temp; +# temp = RA.S3 *gsf RB.S3; +# RT = ACC + temp; +# ACC = RT; + + D = ACC + GuardedSignedFractionalMultiplication( (( A & (0xFFFFFFFFFFFF0000) ) >> 16) , (( B & (0xFFFFFFFFFFFF0000) ) >> 16) ); + ACC = D; +} + +# evmhogsmfan RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Guarded, Signed, Modulo, Fractional and Accumulate Negative +# evmhogsmfan rD,rA,rB 101 1010 1111 +:evmhogsmfan D,A,B is OP=4 & A & B & D & XOP_0_10=0x5AF { +# u64 temp; +# temp = RA.S3 *gsf RB.S3; +# RT = ACC - temp; +# ACC = RT; + + D = ACC - GuardedSignedFractionalMultiplication( (( A & (0xFFFFFFFFFFFF0000) ) >> 16) , (( B & (0xFFFFFFFFFFFF0000) ) >> 16) ); + ACC = D; +} + +# evmhogsmiaa RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Guarded, Signed, Modulo, Integer and Accumulate +# evmhogsmiaa rD,rA,rB 101 0010 1101 +:evmhogsmiaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x52D { +# u64 temp; +# temp.l = RA.S3 *si RB.S3; +# temp = EXTS(temp.l); +# RT = ACC + temp; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( A & (0xFFFF000000000000) ) >> 48); + lo = sext(lo:2); + D = ACC + lo; + ACC = D; +} + +# evmhogsmian RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Guarded, Signed, Modulo, Integer and Accumulate Negative +# evmhogsmian rD,rA,rB 101 1010 1101 +:evmhogsmian D,A,B is OP=4 & A & B & D & XOP_0_10=0x5AD { +# u64 temp; +# temp.l = RA.S3 *si RB.S3; +# temp = EXTS(temp); +# RT = ACC - temp; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( A & (0xFFFF000000000000) ) >> 48); + lo = sext(lo:2); + D = ACC - lo; + ACC = D; +} + +# evmhogumiaa RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Guarded, Unsigned, Modulo, Integer and Accumulate +# evmhogumiaa rD,rA,rB 101 0010 1100 +:evmhogumiaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x52C { +# u64 temp; +# tempo.l = RA.S3 *ui RB.S3; +# temp = EXTZ(temp.l); +# RT = ACC + temp; +# ACC = RT; + + temp:8 = (( A & (0xFFFFFFFFFFFF0000) ) >> 16) * (( B & (0xFFFFFFFFFFFF0000) ) >> 16); + temp = zext( (( temp & (0x0000000000000000) ) >> 32) ); + D = ACC + temp; + ACC = D; +} + +# evmhogumian RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Guarded, Unsigned, Modulo, Integer and Accumulate Negative +# evmhogumian rD,rA,rB 101 1010 1100 +:evmhogumian D,A,B is OP=4 & A & B & D & XOP_0_10=0x5AC { +# u64 temp; +# temp.l = RA.S3 *ui RB.S3; +# temp = EXTZ(temp.l); +# RT = ACC - temp; +# ACC = RT; + + temp:8 = (( A & (0xFFFFFFFFFFFF0000) ) >> 16) * (( B & (0xFFFFFFFFFFFF0000) ) >> 16); + temp = zext( (( temp & (0x0000000000000000) ) >> 32) ); + D = ACC - temp; + ACC = D; +} + +# evmhosmf RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Modulo, Fractional +# evmhosmf rD,rA,rB 100 0000 1111 +:evmhosmf D,A,B is OP=4 & A & B & D & XOP_0_10=0x40F { +# RT.l = RA.S1 *sf RB.S1; +# RT.h = RA.S3 *sf RB.S3; + + lo:8 = SignedFractionalMultiplication( (( A & (0x00000000FFFF0000) ) >> 16) , (( B & (0x00000000FFFF0000) ) >> 16) ); + hi:8 = SignedFractionalMultiplication( (( A & (0xFFFFFFFFFFFF0000) ) >> 16) , (( B & (0xFFFFFFFFFFFF0000) ) >> 16) ); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evmhosmfa RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Modulo, Fractional to Accumulator +# evmhosmfa rD,rA,rB 100 0010 1111 +:evmhosmfa D,A,B is OP=4 & A & B & D & XOP_0_10=0x42F { +# RT.l = RA.S1 *sf RB.S1; +# RT.h = RA.S3 *sf RB.S3; +# ACC = RT; + + lo:8 = SignedFractionalMultiplication( (( A & (0x00000000FFFF0000) ) >> 16) , (( B & (0x00000000FFFF0000) ) >> 16) ); + hi:8 = SignedFractionalMultiplication( (( A & (0xFFFFFFFFFFFF0000) ) >> 16) , (( B & (0xFFFFFFFFFFFF0000) ) >> 16) ); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmhosmfaaw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Modulo, Fractional and Accumulate into Words +# evmhosmfaaw rD,rA,rB 101 0000 1111 +:evmhosmfaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x50F { +# u64 temp; +# temp.l = RA.S1 *sf RB.S1; +# RT.l = ACC.l + temp.l; +# temp.l = RA.S3 *sf RB.S3; +# RT.h = ACC.h + temp.l; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) + SignedFractionalMultiplication( (( A & (0x00000000FFFF0000) ) >> 16) , (( B & (0x00000000FFFF0000) ) >> 16) ); + hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) + SignedFractionalMultiplication( (( A & (0xFFFFFFFFFFFF0000) ) >> 16) , (( B & (0xFFFFFFFFFFFF0000) ) >> 16) ); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmhosmfanw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Modulo, Fractional and Accumulate Negative into Words +# evmhosmfanw rD,rA,rB 101 1000 1111 +:evmhosmfanw D,A,B is OP=4 & A & B & D & XOP_0_10=0x58F { +# u64 temp; +# temp.l = RA.S1 *sf RB.S1; +# RT.l = ACC.l - temp.l; +# temp.l = RA.S3 *sf RB.S3; +# RT.h = ACC.h - temp.l; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) - SignedFractionalMultiplication( (( A & (0x00000000FFFF0000) ) >> 16) , (( B & (0x00000000FFFF0000) ) >> 16) ); + hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) - SignedFractionalMultiplication( (( A & (0xFFFFFFFFFFFF0000) ) >> 16) , (( B & (0xFFFFFFFFFFFF0000) ) >> 16) ); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmhosmi RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Modulo, Integer +# evmhosmi rD,rA,rB 100 0000 1101 +:evmhosmi D,A,B is OP=4 & A & B & D & XOP_0_10=0x40D { +# RT.l = RA.S1 *si RB.S1; +# RT.h = RA.S3 *si RB.S3; + + lo:$(REGISTER_SIZE) = (( A & (0x00000000FFFF0000) ) >> 16) * (( B & (0x00000000FFFF0000) ) >> 16); + hi:$(REGISTER_SIZE) = (( A & (0xFFFF000000000000) ) >> 48) * (( B & (0xFFFF000000000000) ) >> 48); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evmhosmia RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Modulo, Integer to Accumulator +# evmhosmia rD,rA,rB 100 0010 1101 +:evmhosmia D,A,B is OP=4 & A & B & D & XOP_0_10=0x42D { +# RT.l = RA.S1 *si RB.S1; +# RT.h = RA.S3 *si RB.S3; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( A & (0x00000000FFFF0000) ) >> 16) * (( B & (0x00000000FFFF0000) ) >> 16); + hi:$(REGISTER_SIZE) = (( A & (0xFFFF000000000000) ) >> 48) * (( B & (0xFFFF000000000000) ) >> 48); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmhosmiaaw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Modulo, Integer and Accumulate into Words +# evmhosmiaaw rD,rA,rB 101 0000 1101 +:evmhosmiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x50D { +# u64 temp; +# temp.l = RA.S1 *si RB.S1; +# RT.l = ACC.l + temp.l; +# temp.l = RA.S3 *si RB.S3; +# RT.h = ACC.h + temp.l; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( ACC & (0x00000000FFFFFFFF) ) ) + ((( A & (0x00000000FFFF0000) ) >> 16) * (( B & (0x00000000FFFF0000) ) >> 16)); + hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) + ((( A & (0xFFFF000000000000) ) >> 48) * (( B & (0xFFFF000000000000) ) >> 48)); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evmhosmianw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Modulo, Integer and Accumulate Negative into Words +# evmhosmianw rD,rA,rB 101 1000 1101 +:evmhosmianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x58D { +# u64 temp; +# temp.l = RA.S1 *si RB.S1; +# RT.l = ACC.l - temp.l; +# temp.l = RA.S3 *si RB.SI; +# RT.h = ACC.h - temp.l; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( ACC & (0x00000000FFFFFFFF) ) ) - ((( A & (0x00000000FFFF0000) ) >> 16) * (( B & (0x00000000FFFF0000) ) >> 16)); + hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) - ((( A & (0xFFFF000000000000) ) >> 48) * (( B & (0xFFFF000000000000) ) >> 48)); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evmhossf RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Saturate, Fractional +# evmhossf rD,rA,rB 100 0000 0111 +define pcodeop VectorMultiplyHalfWordsOddSignedSaturateFractionalToAccumulator1; +define pcodeop VectorMultiplyHalfWordsOddSignedSaturateFractionalToAccumulator2; +:evmhossf D,A,B is OP=4 & A & B & D & XOP_0_10=0x407 { +# SPEFSCR.OVH = movh; +# SPEFSCR.OV = movl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | movh; +# SPEFSCR.SOV = SPEFSCR.SOV | movl; + # TODO definition complicated + D = VectorMultiplyHalfWordsOddSignedSaturateFractionalToAccumulator1(A,B); + flags:8 = VectorMultiplyHalfWordsOddSignedSaturateFractionalToAccumulator2(A,B); + + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmhossfa rD,rA,rB 100 0010 0111 +# ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Saturate, Fractional to Accumulator +define pcodeop VectorMultiplyHalfWordsOddSignedSaturateFractionalToAccumulator2a; +define pcodeop VectorMultiplyHalfWordsOddSignedSaturateFractionalToAccumulator2b; +:evmhossfa D,A,B is OP=4 & A & B & D & XOP_0_10=0x427 { +# SPEFSCR.OVH = movh; +# SPEFSCR.OV = movl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | movh; +# SPEFSCR.SOV = SPEFSCR.SOV | movl; + # TODO definition complicated + D = VectorMultiplyHalfWordsOddSignedSaturateFractionalToAccumulator2a(A,B); + flags:8 = VectorMultiplyHalfWordsOddSignedSaturateFractionalToAccumulator2b(A,B); + + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmhossfaaw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Saturate, Fractional and Accumulate into Words +# evmhossfaaw rD,rA,rB 101 0000 0111 +define pcodeop VectorMultiplyHalfWordsOddSignedSaturateFractionalAndAccumulateIntoWords1; +define pcodeop VectorMultiplyHalfWordsOddSignedSaturateFractionalAndAccumulateIntoWords2; +:evmhossfaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x507 { +# SPEFSCR.OVH = ovh | movh; +# SPEFSCR.OV = ovl| movl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh | movh; +# SPEFSCR.SOV = SPEFSCR.SOV | ovl| movl; + # TODO definition complicated + D = VectorMultiplyHalfWordsOddSignedSaturateFractionalAndAccumulateIntoWords1(A,B,ACC,spr200); + flags:8 = VectorMultiplyHalfWordsOddSignedSaturateFractionalAndAccumulateIntoWords2(A,B,ACC,spr200); + ACC = D; + + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmhossfanw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Saturate, Fractional and Accumulate Negative into Words +# evmhossfanw rD,rA,rB 101 1000 0111 +define pcodeop VectorMultiplyHalfWordsOddSignedSaturateFractionalAndAccumulateNegativeIntoWords1; +define pcodeop VectorMultiplyHalfWordsOddSignedSaturateFractionalAndAccumulateNegativeIntoWords2; +:evmhossfanw D,A,B is OP=4 & A & B & D & XOP_0_10=0x587 { +# SPEFSCR.OVH = ovh | movh; +# SPEFSCR.OV = ovl| movl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh | movh; +# SPEFSCR.SOV = SPEFSCR.SOV | ovl| movl; + # TODO definition complicated + D = VectorMultiplyHalfWordsOddSignedSaturateFractionalAndAccumulateNegativeIntoWords1(A,B,ACC,spr200); + flags:8 = VectorMultiplyHalfWordsOddSignedSaturateFractionalAndAccumulateNegativeIntoWords2(A,B,ACC,spr200); + ACC = D; + + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmhossiaaw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Saturate, Integer and Accumulate into Words +# evmhossiaaw rD,rA,rB 101 0000 0101 +define pcodeop VectorMultiplyHalfWordsOddSignedSaturateIntegerAndAccumulateIntoWords1; +define pcodeop VectorMultiplyHalfWordsOddSignedSaturateIntegerAndAccumulateIntoWords2; +:evmhossiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x505 { +# SPEFSCR.OVH = ovh; +# SPEFSCR.OV = ovl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; +# SPEFSCR.SOV = SPEFSCR.SOV | ovl; + # TODO definition complicated + D = VectorMultiplyHalfWordsOddSignedSaturateIntegerAndAccumulateIntoWords1(A,B,ACC,spr200); + flags:8 = VectorMultiplyHalfWordsOddSignedSaturateIntegerAndAccumulateIntoWords2(A,B,ACC,spr200); + ACC = D; + + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmhossianw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Saturate, Integer and Accumulate Negative into Words +# evmhossianw rD,rA,rB 101 1000 0101 +define pcodeop VectorMultiplyHalfWordsOddSignedSaturateIntegerAndAccumulateNegativeIntoWords1; +define pcodeop VectorMultiplyHalfWordsOddSignedSaturateIntegerAndAccumulateNegativeIntoWords2; +:evmhossianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x585 { +# SPEFSCR.OVH = ovh; +# SPEFSCR.OV = ovl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; +# SPEFSCR.SOV = SPEFSCR.SOV | ovl; + # TODO definition complicated + D = VectorMultiplyHalfWordsOddSignedSaturateIntegerAndAccumulateNegativeIntoWords1(A,B,ACC,spr200); + ACC = D; + flags:8 = VectorMultiplyHalfWordsOddSignedSaturateIntegerAndAccumulateNegativeIntoWords2(A,B,ACC,spr200); + + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmhoumi RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Unsigned, Modulo, Integer +# evmhoumi rD,rA,rB 100 0000 1100 +:evmhoumi D,A,B is OP=4 & A & B & D & XOP_0_10=0x40C { +# RT.l = RA.S1 *ui RB.S1; +# RT.h = RA.S3 *ui RB.S3; + + lo:$(REGISTER_SIZE) = (( A & (0x00000000FFFF0000) ) >> 16) * (( B & (0x00000000FFFF0000) ) >> 16); + hi:$(REGISTER_SIZE) = (( A & (0xFFFFFFFFFFFF0000) ) >> 16) * (( B & (0xFFFFFFFFFFFF0000) ) >> 16); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evmhoumia RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Unsigned, Modulo, Integer to Accumulator +# evmhoumia rD,rA,rB 100 0010 1100 +:evmhoumia D,A,B is OP=4 & A & B & D & XOP_0_10=0x42C { +# RT.l = RA.S1 *ui RB.S1; +# RT.h = RA.S3 *ui RB.S3; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( A & (0x00000000FFFF0000) ) >> 16) * (( B & (0x00000000FFFF0000) ) >> 16); + hi:$(REGISTER_SIZE) = (( A & (0xFFFFFFFFFFFF0000) ) >> 16) * (( B & (0xFFFFFFFFFFFF0000) ) >> 16); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmhoumiaaw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Unsigned, Modulo, Integer and Accumulate into Words +# evmhoumiaaw rD,rA,rB 101 0000 1100 +:evmhoumiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x50C { +# u64 temp; +# temp = RA.S1 *ui RB.S1; +# RT.l = ACC.l + temp.l; +# temp.l = RA.S3 *ui RB.S3; +# RT.h = ACC.h + temp.l; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) + ((( A & (0x00000000FFFF0000) ) >> 16) * (( B & (0x00000000FFFF0000) ) >> 16)); + hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) + ((( A & (0xFFFFFFFFFFFF0000) ) >> 16) * (( B & (0xFFFFFFFFFFFF0000) ) >> 16)); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmhoumianw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Unsigned, Modulo, Integer and Accumulate Negative into Words +# evmhoumianw rD,rA,rB 101 1000 1100 +:evmhoumianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x58C { +# u64 temp; +# temp = RA.S1 *ui RB.S1; +# RT.l = ACC.l - temp.l; +# temp.l = RA.S3 *ui RB.S3; +# RT.h = ACC.h - temp.l; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) - ((( A & (0x00000000FFFF0000) ) >> 16) * (( B & (0x00000000FFFF0000) ) >> 16)); + hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) - ((( A & (0xFFFFFFFFFFFF0000) ) >> 16) * (( B & (0xFFFFFFFFFFFF0000) ) >> 16)); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmhousiaaw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Unsigned, Saturate, Integer and Accumulate into Words +# evmhousiaaw rD,rA,rB 101 0000 0100 +define pcodeop VectorMultiplyHalfWordsOddUnsignedSaturateIntegerAndAccumulateIntoWords1; +define pcodeop VectorMultiplyHalfWordsOddUnsignedSaturateIntegerAndAccumulateIntoWords2; +:evmhousiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x504 { +# SPEFSCR.OVH = ovh; +# SPEFSCR.OV = ovl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; +# SPEFSCR.SOV = SPEFSCR.SOV | ovl; + # TODO definition complicated + D = VectorMultiplyHalfWordsOddUnsignedSaturateIntegerAndAccumulateIntoWords1(A,B,ACC,spr200); + ACC = D; + flags:8 = VectorMultiplyHalfWordsOddUnsignedSaturateIntegerAndAccumulateIntoWords2(A,B,ACC,spr200); + + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmhousianw RT,RA,RB +# ISA-cmt: Vector Multiply Halfwords, Odd, Unsigned, Saturate, Integer and Accumulate Negative into Words +# evmhousianw rD,rA,rB 101 1000 0100 +define pcodeop VectorMultiplyHalfWordsOddUnsignedSaturateIntegerAndAccumulateNegativeIntoWords1; +define pcodeop VectorMultiplyHalfWordsOddUnsignedSaturateIntegerAndAccumulateNegativeIntoWords2; +:evmhousianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x584 { +# SPEFSCR.OVH = ovh; +# SPEFSCR.OV = ovl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; +# SPEFSCR.SOV = SPEFSCR.SOV | ovl; + # TODO definition complicated + D = VectorMultiplyHalfWordsOddUnsignedSaturateIntegerAndAccumulateNegativeIntoWords1(A,B,ACC,spr200); + ACC = D; + flags:8 = VectorMultiplyHalfWordsOddUnsignedSaturateIntegerAndAccumulateNegativeIntoWords2(A,B,ACC,spr200); + + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# ================================================================= +# Page D-14 + +# evmra RT,RA +# ISA-cmt: Initialize Accumulator +# evmra rD,rA 100 1100 0100 +# defined evx.sinc +# define pcodeop InitializeAccumulator; +# :evmra D,A is OP=4 & A & D & XOP_0_10=0x4C4 { +# ACC = RA; +# RT = RA; + +# ACC = A; +# D = A; + +# InitializeAccumulator(D,A); +# } + +# evmwhsmf RT,RA,RB +# ISA-cmt: Vector Multiply Word High Signed, Modulo, Fractional +# evmwhsmf rD,rA,rB 100 0100 1111 +:evmwhsmf D,A,B is OP=4 & A & B & D & XOP_0_10=0x44F { +# u64 temp; +# temp = RA.l *sf RB.l; +# RT.l = temp.l; +# temp = RA.h *sf RB.h; +# RT.h = temp.l; + + lo:8 = SignedFractionalMultiplication( (( A & (0x0000000000000000) ) >> 32) , (( B & (0x0000000000000000) ) >> 32) ); + hi:8 = SignedFractionalMultiplication( (( A & (0xFFFFFFFF00000000) ) >> 32) , (( B & (0xFFFFFFFF00000000) ) >> 32) ); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evmwhsmfa RT,RA,RB +# ISA-cmt: Vector Multiply Word High Signed, Modulo, Fractional to Accumulator +# evmwhsmfa rD,rA,rB 100 0110 1111 +:evmwhsmfa D,A,B is OP=4 & A & B & D & XOP_0_10=0x46F { +# u64 temp; +# temp = RA.l *sf RB.l; +# RT.l = temp.l; +# temp = RA.h *sf RB.h; +# RT.h = temp.l; +# ACC = RT; + + lo:8 = SignedFractionalMultiplication( (( A & (0x0000000000000000) ) >> 32) , (( B & (0x0000000000000000) ) >> 32) ); + hi:8 = SignedFractionalMultiplication( (( A & (0xFFFFFFFF00000000) ) >> 32) , (( B & (0xFFFFFFFF00000000) ) >> 32) ); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmwhsmi RT,RA,RB +# ISA-cmt: Vector Multiply Word High Signed, Modulo, Integer +# evmwhsmi rD,rA,rB 100 0100 1101 +:evmwhsmi D,A,B is OP=4 & A & B & D & XOP_0_10=0x44D { +# u64 temp; +# temp = RA.l *si RB.l; +# RT.l = temp.l; +# temp = RA.h *si RB.h; +# RT.h = temp.l; + + lo:$(REGISTER_SIZE) = ((( A & (0x00000000FFFFFFFF) ) ) * (( B & (0x00000000FFFFFFFF) ) )) & 0xFFFFFFFF; + hi:$(REGISTER_SIZE) = ((( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32)) & 0xFFFFFFFF; + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evmwhsmia RT,RA,RB +# ISA-cmt: Vector Multiply Word High Signed, Modulo, Integer to Accumulator +# evmwhsmia rD,rA,rB 100 0110 1101 +:evmwhsmia D,A,B is OP=4 & A & B & D & XOP_0_10=0x46D { +# u64 temp; +# temp = RA.l *si RB.l; +# RT.l = temp.l; +# temp = RA.h *si RB.h; +# RT.h = temp.l; +# ACC = RT; + + lo:$(REGISTER_SIZE) = ((( A & (0x00000000FFFFFFFF) ) ) * (( B & (0x00000000FFFFFFFF) ) )) & 0xFFFFFFFF; + hi:$(REGISTER_SIZE) = ((( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32)) & 0xFFFFFFFF; + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmwhssf RT,RA,RB +# ISA-cmt: Vector Multiply Word High Signed, Saturate, Fractional +# evmwhssf rD,rA,rB 100 0100 0111 +define pcodeop VectorMultiplyWordHighSignedSaturateFractional1; +define pcodeop VectorMultiplyWordHighSignedSaturateFractional2; +:evmwhssf D,A,B is OP=4 & A & B & D & XOP_0_10=0x447 { +# SPEFSCR.OVH = movh; +# SPEFSCR.OV = movl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | movh; +# SPEFSCR.SOV = SPEFSCR.SOV | movl; + # TODO definition complicated + D = VectorMultiplyWordHighSignedSaturateFractional1(A,B); + flags:8 = VectorMultiplyWordHighSignedSaturateFractional2(A,B); + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmwhssfa RT,RA,RB +# ISA-cmt: Vector Multiply Word High Signed, Saturate, Fractional to Accumulator +# evmwhssfa rD,rA,rB 100 0110 0111 +define pcodeop VectorMultiplyWordHighSignedSaturateFractionalToAccumulator1; +define pcodeop VectorMultiplyWordHighSignedSaturateFractionalToAccumulator2; +:evmwhssfa D,A,B is OP=4 & A & B & D & XOP_0_10=0x467 { +# SPEFSCR.OVH = movh; +# SPEFSCR.OV = movl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | movh; +# SPEFSCR.SOV = SPEFSCR.SOV | movl; + # TODO definition complicated + D = VectorMultiplyWordHighSignedSaturateFractionalToAccumulator1(A,B); + ACC = D; + flags:8 = VectorMultiplyWordHighSignedSaturateFractionalToAccumulator2(A,B); + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmwhumi RT,RA,RB +# ISA-cmt: Vector Multiply Word High Unsigned, Modulo, Integer +# evmwhumi rD,rA,rB 100 0100 1100 +:evmwhumi D,A,B is OP=4 & A & B & D & XOP_0_10=0x44C { +# u64 temp; +# temp = RA.l *ui RB.l; +# RT.l = temp.l; +# temp = RA.h *ui RB.h; +# RT.h = temp.l; + + lo:$(REGISTER_SIZE) = (( A & (0x0000000000000000) ) >> 32) * (( B & (0x0000000000000000) ) >> 32); + lo = lo:4; + hi:$(REGISTER_SIZE) = (( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32); + hi = hi:4; + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evmwhumia RT,RA,RB +# ISA-cmt: Vector Multiply Word High Unsigned, Modulo, Integer to Accumulator +# evmwhumia rD,rA,rB 100 0110 1100 +:evmwhumia D,A,B is OP=4 & A & B & D & XOP_0_10=0x46C { +# u64 temp; +# temp = RA.l *ui RB.l; +# RT.l = temp.l; +# temp = RA.h *ui RB.h; +# RT.h = temp.l; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( A & (0x0000000000000000) ) >> 32) * (( B & (0x0000000000000000) ) >> 32); + lo = lo:4; + hi:$(REGISTER_SIZE) = (( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32); + hi = hi:4; + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmwlsmi rD,rA,rB +# ISA-cmt: Vector Multiply Word Low Signed, Modulo, Integer and Accumulate into Words +# define VectorMultiplyWordLowUnsigned,ModuloInteger; +# YYY No definition in manual + +# evmwhusiaaw rD,rA,rB 101 0100 0100 +# TODO Not in PowerISA Version 2.06 manual? +define pcodeop evmwhusiaawOP; +:evmwhusiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x544 { evmwhusiaawOP(D,A,B); } + +# evmwhusianw rD,rA,rB 101 1100 0100 +# TODO Not in PowerISA Version 2.06 manual? +define pcodeop evmwhusianwOP; +:evmwhusianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x5C4 { + evmwhusianwOP(D,A,B,ACC); +} + +# evmwlsmiaaw RT,RA,RB +# ISA-cmt: Vector Multiply Word Low Signed, Modulo, Integer and Accumulate into Words +# evmwlsmiaaw ?? +# u64 temp; +# temp = RA.l *si RB.l; +# RT.l = ACC.l + temp.h; +# temp = RA.h *si RB.h; +# RT.h = ACC.h + temp.h; +# ACC = RT; + +# evmwlsmianw RT,RA,RB +# ISA-cmt: Vector Multiply Word Low Signed, Modulo, Integer and Accumulate Negative in Words +# evmwlsmianw ?? +# u64 temp; +# temp = RA.l *si RB.l; +# RT.l = ACC.l - temp.h; +# temp = RA.h *si RB.h; +# RT.h = ACC.h - temp.h; +# ACC = RT; + +# evmwlssiaaw RT,RA,RB +# ISA-cmt: Vector Multiply Word Low Signed, aturate, Integer and Accumulate into Words +# evmwlssiaaw rD,rA,rB 101 0100 0001 +define pcodeop VectorMultiplyWordLowSignedSaturateIntegerAndAccumulateInWords1; +define pcodeop VectorMultiplyWordLowSignedSaturateIntegerAndAccumulateInWords2; +:evmwlssiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x541 { +# SPEFSCR.OVH = ovh; +# SPEFSCR.OV = ovl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; +# SPEFSCR.SOV = SPEFSCR.SOV | ovl; + # TODO definition complicated + D = VectorMultiplyWordLowSignedSaturateIntegerAndAccumulateInWords1(A,B,ACC,spr200); + ACC = D; + flags:8 = VectorMultiplyWordLowSignedSaturateIntegerAndAccumulateInWords2(A,B,ACC,spr200); + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmwlumi RT,RA,RB +# ISA-cmt: Vector Multiply Word Low Unsigned, Modulo, Integer +# evmwlumi rD,rA,rB 100 0100 1000 +:evmwlumi D,A,B is OP=4 & A & B & D & XOP_0_10=0x448 { +# u64 temp; +# temp = RA.l *ui RB.l; +# RT.l = temp.h; +# temp = RA.h *ui RB.h; +# RT.h = temp.h; + + lo:8 = (( A & (0x0000000000000000) ) >> 32) * (( B & (0x0000000000000000) ) >> 32); + lo = (( lo & (0xFFFFFFFF00000000) ) >> 32); + hi:8 = (( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32); + lo = (( hi & (0xFFFFFFFF00000000) ) >> 32); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evmwlumia RT,RA,RB +# ISA-cmt: Vector Multiply Word Low Unsigned, Modulo, Integer to Accumulator +# evmwlumia rD,rA,rB 100 0110 1000 +:evmwlumia D,A,B is OP=4 & A & B & D & XOP_0_10=0x468 { +# u64 temp; +# temp = RA.l *ui RB.l; +# RT.l = temp.h; +# temp = RA.h *ui RB.h; +# RT.h = temp.h; +# ACC = RT; + + lo:8 = (( A & (0x0000000000000000) ) >> 32) * (( B & (0x0000000000000000) ) >> 32); + lo = (( lo & (0xFFFFFFFF00000000) ) >> 32); + hi:8 = (( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32); + lo = (( hi & (0xFFFFFFFF00000000) ) >> 32); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmwlumiaaw RT,RA,RB +# ISA-cmt: Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate into Words +# evmwlumiaaw rD,rA,rB 101 0100 1000 +:evmwlumiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x548 { +# u64 temp; +# temp = RA.l *ui RB.l; +# RT.l = ACC.l + temp.h; +# temp = RA.h *ui RB.h; +# RT.h = ACC.h + temp.h; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) + ((( A & (0x0000000000000000) ) >> 32) * (( B & (0x0000000000000000) ) >> 32)); + hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) + ((( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32)); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmwlumianw RT,RA,RB +# ISA-cmt: Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate Negative in Words +# evmwlumianw rD,rA,rB 101 1100 1000 +:evmwlumianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x5C8 { +# u64 temp; +# temp = RA.l *ui RB.l; +# RT.l = ACC.l - temp.h; +# temp = RA.h *ui RB.h; +# RT.h = ACC.h - temp.h; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) - ((( A & (0x0000000000000000) ) >> 32) * (( B & (0x0000000000000000) ) >> 32)); + hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) - ((( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32)); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evmwlusiaaw RT,RA,RB +# ISA-cmt: Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate into Words +# evmwlusiaaw rD,rA,rB 101 0100 0000 +define pcodeop VectorMultiplyWordLowUnsignedSaturateIntegerAndAccumulateInWords1; +define pcodeop VectorMultiplyWordLowUnsignedSaturateIntegerAndAccumulateInWords2; +:evmwlusiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x540 { +# SPEFSCR.OVH = ovh; +# SPEFSCR.OV = ovl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; +# SPEFSCR.SOV = SPEFSCR.SOV | ovl; + # TODO definition complicated + D = VectorMultiplyWordLowUnsignedSaturateIntegerAndAccumulateInWords1(A,B,ACC,spr200); + ACC = D; + flags:8 = VectorMultiplyWordLowUnsignedSaturateIntegerAndAccumulateInWords2(A,B,ACC,spr200); + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmwlusianw RT,RA,RB +# ISA-cmt: Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate Negative in Words +# evmwlusianw rD,rA,rB 101 1100 0000 +define pcodeop VectorMultiplyWordLowUnsignedSaturateIntegerAndAccumulateNegativeInWords1; +define pcodeop VectorMultiplyWordLowUnsignedSaturateIntegerAndAccumulateNegativeInWords2; +:evmwlusianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x5C0 { +# SPEFSCR.OVH = ovh; +# SPEFSCR.OV = ovl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; +# SPEFSCR.SOV = SPEFSCR.SOV | ovl; + # TODO definition complicated + D = VectorMultiplyWordLowUnsignedSaturateIntegerAndAccumulateNegativeInWords1(D,A,B,ACC,spr200); + ACC = D; + flags:8 = VectorMultiplyWordLowUnsignedSaturateIntegerAndAccumulateNegativeInWords2(D,A,B,ACC,spr200); + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmwsmf RT,RA,RB +# ISA-cmt: Vector Multiply Word Signed, Modulo, Fractional +# evmwsmf rD,rA,rB 100 0101 1011 +:evmwsmf D,A,B is OP=4 & A & B & D & XOP_0_10=0x45B { +# RT = RA.h *sf RB.h; + + D = SignedFractionalMultiplication( (( A & (0xFFFFFFFF00000000) ) >> 32) , (( B & (0xFFFFFFFF00000000) ) >> 32) ); +} + +# evmwsmfa RT,RA,RB +# ISA-cmt: Vector Multiply Word Signed, Modulo, Fractional to Accumulator +# evmwsmfa rD,rA,rB 100 0111 1011 +:evmwsmfa D,A,B is OP=4 & A & B & D & XOP_0_10=0x47B { +# RT = RA.h *sf RB.h; +# ACC = RT; + + D = SignedFractionalMultiplication( (( A & (0xFFFFFFFF00000000) ) >> 32) , (( B & (0xFFFFFFFF00000000) ) >> 32) ); + ACC = D; +} + +# evmwsmfaa RT,RA,RB +# ISA-cmt: Vector Multiply Word Signed, Modulo, Fractional and Accumulate +# evmwsmfaa rD,rA,rB 101 0101 1011 101 0101 1011 +:evmwsmfaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x55B { +# u64 temp; +# temp = RA.h *sf RB.h; +# RT = ACC + temp; +# ACC = RT; + + D = ACC + ( SignedFractionalMultiplication( (( A & (0xFFFFFFFF00000000) ) >> 32) , (( B & (0xFFFFFFFF00000000) ) >> 32) ) ); + ACC = D; +} + +# evmwsmfan RT,RA,RB +# ISA-cmt: Vector Multiply Word Signed, Modulo, Fractional and Accumulate Negative +# evmwsmfan rD,rA,rB 101 1101 1011 +:evmwsmfan D,A,B is OP=4 & A & B & D & XOP_0_10=0x5DB { +# u64 temp; +# temp = RA.h *sf RB.h; +# RT = ACC - temp; +# ACC = RT; + + D = ACC - ( SignedFractionalMultiplication( (( A & (0xFFFFFFFF00000000) ) >> 32) , (( B & (0xFFFFFFFF00000000) ) >> 32) ) ); + ACC = D; +} + +# evmwsmi RT,RA,RB +# ISA-cmt: Vector Multiply Word Signed, Modulo, Integer +# evmwsmi rD,rA,rB 100 0101 1001 +# evmwsmi confict with machhwo. +# :evmwsmi D,A,B is OP=4 & A & B & D & XOP_0_10=0x459 { +# RT = RA.h *si RB.h; + +# } + +# evmwsmia RT,RA,RB +# ISA-cmt: Vector Multiply Word Signed, Modulo, Integer to Accumulator +# evmwsmia rD,rA,rB 100 0111 1001 +:evmwsmia D,A,B is OP=4 & A & B & D & XOP_0_10=0x479 { +# RT = RA.h *si RB.h; +# ACC = RT; + + D = (( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32); + ACC = D; +} + +# evmwsmiaa RT,RA,RB +# ISA-cmt: Vector Multiply Word Signed, Modulo, Integer and Accumulate +# evmwsmiaa rD,rA,rB 101 0101 1001 +# YYY duplicate??? +# define pcodeop VectorMultiplyWordSignedModuloIntegerAndAccumulate2; +# u64 temp; +# temp = RA.h *si RB.h; +# RT = ACC + temp; +# ACC = RT; + +# :evmwsmiaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x559 { +# u64 temp; +# temp = RA.h *si RB.h; +# RT = ACC + temp; +# ACC = RT; +#} + +# evmwsmian RT,RA,RB +# ISA-cmt: Vector Multiply Word Signed, Modulo, Integer and Accumulate Negative +# evmwsmian rD,rA,rB 101 1101 1001 +# evmwsmian confict with macchwso. +# ppc_instructions.sinc :macchwso. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=236 & Rc=1 +# define pcodeop VectorMultiplyWordSignedModuloIntegerAndAccumulateNegative; +# :evmwsmian D,A,B is OP=4 & A & B & D & XOP_0_10=0x5D9 { +# u64 temp; +# temp = RA.h *si RB.h; +# RT = ACC - temp; +# ACC = RT; +# } + +# evmwssf RT,RA,RB +# ISA-cmt: Vector Multiply Word Signed, Saturate, Fractional +# evmwssf rD,rA,rB 100 0101 0011 +define pcodeop VectorMultiplyWordSignedSaturateFractional1; +define pcodeop VectorMultiplyWordSignedSaturateFractional2; +:evmwssf D,A,B is OP=4 & A & B & D & XOP_0_10=0x453 { +# SPEFSCR.OVH = 0; +# SPEFSCR.OV = mov; +# SPEFSCR.SOV = SPEFSCR.SOV | mov; + # TODO definition + D = VectorMultiplyWordSignedSaturateFractional1(D,A,B,ACC); + ACC = D; + flags:8 = VectorMultiplyWordSignedSaturateFractional2(D,A,B,ACC); + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmwssfa RT,RA,RB +# ISA-cmt: Vector Multiply Word Signed, Saturate, Fractional to Accumulator +# evmwssfa rD,rA,rB 100 0111 0011 +define pcodeop VectorMultiplyWordSignedSaturateFractionalAndAccumulate1a; +define pcodeop VectorMultiplyWordSignedSaturateFractionalAndAccumulate1b; +:evmwssfa D,A,B is OP=4 & A & B & D & XOP_0_10=0x473 { +# SPEFSCR.OVH = 0; +# SPEFSCR.OV = mov; +# SPEFSCR.SOV = SPEFSCR.SOV | mov; + # TODO definition + D = VectorMultiplyWordSignedSaturateFractionalAndAccumulate1a(D,A,B,ACC); + ACC = D; + flags:8 = VectorMultiplyWordSignedSaturateFractionalAndAccumulate1b(D,A,B,ACC); + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmwssfaa RT,RA,RB +# ISA-cmt: Vector Multiply Word Signed, Saturate, Fractional and Accumulate +# evmwssfaa rD,rA,rB 101 0101 0011 +define pcodeop VectorMultiplyWordSignedSaturateFractionalAndAccumulate2a; +define pcodeop VectorMultiplyWordSignedSaturateFractionalAndAccumulate2b; +:evmwssfaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x553 { +# SPEFSCR.OVH = 0; +# SPEFSCR.OV = ov | mov; +# SPEFSCR.SOV = SPEFSCR.SOV | ov | mov; + # TODO definition + D = VectorMultiplyWordSignedSaturateFractionalAndAccumulate2a(A,B,ACC); + flags:8 = VectorMultiplyWordSignedSaturateFractionalAndAccumulate2b(A,B,ACC); + ACC = D; + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmwssfan RT,RA,RB +# ISA-cmt: Vector Multiply Word Signed, Saturate, Fractional and Accumulate Negative +# evmwssfan rD,rA,rB 101 1101 0011 +define pcodeop VectorMultiplyWordSignedSaturateFractionalAndAccumulateNegative1; +define pcodeop VectorMultiplyWordSignedSaturateFractionalAndAccumulateNegative2; +:evmwssfan D,A,B is OP=4 & A & B & D & XOP_0_10=0x5D3 { +# SPEFSCR.OVH = 0; +# SPEFSCR.OV = ov | mov; +# SPEFSCR.SOV = SPEFSCR.SOV | ov | mov; + # TODO definition + D = VectorMultiplyWordSignedSaturateFractionalAndAccumulateNegative1(A,B,ACC,spr200); + flags:8 = VectorMultiplyWordSignedSaturateFractionalAndAccumulateNegative2(A,B,ACC,spr200); + ACC = D; + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evmwumi RT,RA,RB +# ISA-cmt: Vector Multiply Word Unsigned, Modulo, Integer +# evmwumi rD,rA,rB 100 01A1 1000 A=0 +# evmwumi confict with machhwo +# define pcodeop VectorMultiplyWordUnsignedModuloInteger; +# :evmwumi D,A,B is OP=4 & A & B & D & XOP_0_10=0x458 { +# RT = RA.h *ui RB.h; +# VectorMultiplyWordUnsignedModuloInteger(D,A,B,ACC); +# } + +# evmwumia RT,RA,RB +# ISA-cmt: Vector Multiply Word Unsigned, Modulo, Integer to Accumulator +# evmwumia rD,rA,rB 100 01A1 1000 A=1 +:evmwumia D,A,B is OP=4 & A & B & D & XOP_0_10=0x478 { +# RT = RA.h *ui RB.h; +# ACC = RT; + + D = (( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32); + ACC = D; +} + +# evmwumiaa RT,RA,RB +# ISA-cmt: Vector Multiply Word Unsigned, Modulo, Integer and Accumulate +# evmwumiaa rD,rA,rB 101 0101 1000 +# evmwumiaa confict with macchwo +# :evmwumiaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x558 { +# u64 temp; +# temp = RA.h *ui RB.h; +# RT = ACC + temp; +# ACC = RT; + +# VectorMultiplyWordUnsignedModuloIntegerAndAccumulate2(D,A,B,ACC); +# } + +# evmwumian RT,RA,RB +# ISA-cmt: Vector Multiply Word Unsigned, Modulo, Integer and Accumulate Negative +# evmwumian rD,rA,rB 101 1101 1000 +# evmwumian confict with macchwso +# :evmwumian D,A,B is OP=4 & A & B & D & XOP_0_10=0x5D8 { +# u64 temp; +# temp = RA.h *ui RB.h; +# RT = ACC - temp; +# ACC = RT; + +# VectorMultiplyWordUnsignedModuloIntegerAndAccumulateNegative(D,A,B,ACC); +# } + +# ================================================================= +# Page D-15 + +# evnand RT,RA,RB +# ISA-cmt: Vector NAND +# evnand rD,rA,rB 010 0001 1110 +:evnand D,A,B is OP=4 & A & B & D & XOP_0_10=0x21E { +# RT.l = ONESCOMP(RA.l & RB.l); +# RT.h = ONESCOMP(RA.h & RB.h); + + lo:$(REGISTER_SIZE) = ~ ( (( A & (0x00000000FFFFFFFF) ) ) & (( B & (0x00000000FFFFFFFF) ) ) ); + hi:$(REGISTER_SIZE) = ~ ( (( A & (0xFFFFFFFF00000000) ) >> 32) & (( B & (0xFFFFFFFF00000000) ) >> 32) ); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evneg RT,RA +# ISA-cmt: Vector Negate +# evneg rD,rA 010 0000 1001 +:evneg D,A is OP=4 & A & D & XOP_0_10=0x209 & BITS_11_15=0 { +# RT.l = NEG(RA.l); +# RT.h = NEG(RA.h); + + lo:$(REGISTER_SIZE) = - (( A & (0x00000000FFFFFFFF) ) ); + hi:$(REGISTER_SIZE) = - (( A & (0xFFFFFFFF00000000) ) >> 32); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evnor RT,RA,RB +# ISA-cmt: Vector NOR +# evnor rD,rA,rB 010 0001 1000 +:evnor D,A,B is OP=4 & A & B & D & XOP_0_10=0x218 { +# RT.l = ONESCOMP(RA.l | RB.l); +# RT.h = ONESCOMP(RA.h | RB.h); + + lo:$(REGISTER_SIZE) = ~ ( (( A & (0x00000000FFFFFFFF) ) ) | (( B & (0x00000000FFFFFFFF) ) ) ); + hi:$(REGISTER_SIZE) = ~ ( (( A & (0xFFFFFFFF00000000) ) >> 32) | (( B & (0xFFFFFFFF00000000) ) >> 32) ); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evnot => evnor + +# evor RT,RA,RB +# ISA-cmt: Vector OR +# evor rD,rA,rB 010 0001 0111 +:evor D,A,B is OP=4 & A & B & D & XOP_0_10=0x217 { +# RT.l = RA.l | RB.l; +# RT.h = RA.h | RB.h; + + lo:$(REGISTER_SIZE) = (( A & (0x00000000FFFFFFFF) ) ) | (( B & (0x00000000FFFFFFFF) ) ); + hi:$(REGISTER_SIZE) = (( A & (0xFFFFFFFF00000000) ) >> 32) | (( B & (0xFFFFFFFF00000000) ) >> 32); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evorc RT,RA,RB +# ISA-cmt: Vector OR with Complement +# evorc rD,rA,rB 010 0001 1011 +:evorc D,A,B is OP=4 & A & B & D & XOP_0_10=0x21B { +# RT.l = RA.l | ONESCOMP(RB.l); +# RT.h = RA.h | ONESCOMP(RB.h); + + lo:$(REGISTER_SIZE) = (( A & (0x00000000FFFFFFFF) ) ) | (~ (( B & (0x00000000FFFFFFFF) ) )); + hi:$(REGISTER_SIZE) = (( A & (0xFFFFFFFF00000000) ) >> 32) | (~ (( B & (0xFFFFFFFF00000000) ) >> 32)); + D = (( zext(hi) << 32) | zext(lo) ); +} + +define pcodeop ROTL64; + +# evrlw RT,RA,RB +# ISA-cmt: Vector Rotate Left Word +# evrlw rD,rA,rB 010 0010 1000 +:evrlw D,A,B is OP=4 & A & B & D & XOP_0_10=0x228 { +# nh = RB.bsub(27:31); +# nl = RB.bsub(59:63); +# RT.l = ROTL(RA.l, nh); +# RT.h = ROTL(RA.h, nl); + + nh:$(REGISTER_SIZE) = ((B & 0x00000000f8000000) >> 27); + nl:$(REGISTER_SIZE) = ((B & 0xf800000000000000) >> 59); + lo:8 = ROTL64( (( A & (0x00000000FFFFFFFF) ) ) ,nh); + hi:8 = ROTL64( (( A & (0xFFFFFFFF00000000) ) >> 32) ,nl); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evrlwi RT,RA,UI +# ISA-cmt: Vector Rotate Left Word Immediate +# evrlwi rD,rA,EVUIMM 010 0010 1010 +:evrlwi D,A,EVUIMM is OP=4 & A & D & EVUIMM & XOP_0_10=0x22A { +# n = UI; +# RT.l = ROTL(RA.l, n); +# RT.h = ROTL(RA.h, n); + + n:8 = EVUIMM; + lo:8 = ROTL64( (( A & (0x00000000FFFFFFFF) ) ) ,n); + hi:8 = ROTL64( (( A & (0xFFFFFFFF00000000) ) >> 32) ,n); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evrndw RT,RA +# ISA-cmt: Vector Round Word +# evrndw rD,rA 010 0000 1100 +:evrndw D,A is OP=4 & A & D & UIMM & XOP_0_10=0x20C { +# RT.l = (RA.l + 0x00008000) & 0xFFFF0000; +# RT.h = (RA.h + 0x00008000) & 0xFFFF0000; + + lo:$(REGISTER_SIZE) = ((( A & (0x00000000FFFFFFFF) ) ) + 0x00008000) & 0xFFFF0000; + hi:$(REGISTER_SIZE) = ((( A & (0x00FFFFFFFF00000000) ) >> 32) + 0x00008000) & 0xFFFF0000; + D = (( zext(hi) << 32) | zext(lo) ); +} + +# SPECIAL ** YYY +# evsel RT,RA,RB,BFA +# ISA-cmt: Vector Select +# evsel rD,rA,rB,crS 0100 1111 +# define pcodeop VectorSelect; +# :evsel D,A,B,crS is OP=4 & A & B & D & crS & XOP_3_10=0x4F { + # TODO definition complicated +# VectorSelect(D,A,B,crS); +# } + +# evslw RT,RA,RB +# ISA-cmt: Vector Shift Left Word +# evslw rD,rA,rB 010 0010 0100 +:evslw D,A,B is OP=4 & A & B & D & XOP_0_10=0x224 { +# nh = RB.bsub(26:31); +# nl = RB.bsub(58:63); +# RT.l = SL(RA.l,nh); +# RT.h = SL(RA.h,nl); + + nh:$(REGISTER_SIZE) = ((B & 0x00000000fc000000) >> 26); + nl:$(REGISTER_SIZE) = ((B & 0xfc00000000000000) >> 58); + lo:$(REGISTER_SIZE) = ((( A & (0x00000000FFFFFFFF) ) ) << nh); + hi:$(REGISTER_SIZE) = ((( A & (0xFFFFFFFF00000000) ) >> 32) << nl); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# c RT,RA,UI +# ISA-cmt: Vector Shift Left Word Immediate +# evslwi rD,rA,EVUIMM 010 0010 0110 +:evslwi D,A,EVUIMM is OP=4 & A & D & EVUIMM & XOP_0_10=0x226 { +# n = UI; +# RT.l = SL(RA.l, n); +# RT.h = SL(RA.h, n); + + n:8 = EVUIMM; + lo:8 = (( A & (0x00000000FFFFFFFF) ) ) << n; + hi:8 = (( A & (0xFFFFFFFF00000000) ) >> 32) << n; + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evsplatfi RT,SI +# ISA-cmt: Vector Splat Fractional Immediate +# evsplatfi rD,BU_SIMM 010 0010 1011 +define pcodeop VectorSplatFractionalImmediate; +:evsplatfi D,BU_SIMM is OP=4 & D & BU_SIMM & XOP_0_10=0x22B { + # TODO definition +# RT0:31 ô€‰ SI || 270 +# RT32:63 ô€‰ SI || 270 +# The value specified by SI is padded with trailing zeros +# and placed in both elements of RT. The SI ends up in +# bit positions RT0:4 and RT32:36. + + D = VectorSplatFractionalImmediate(); +} + + +# BU_SIMMt: is BU_SIMM [ val = BU_SIMM; ] { tmp:8 = sext(BU_SIMM); export tmp; } + +# evsplati RT,SI +# ISA-cmt: Vector Splat Immediate +# evsplati rD,BU_SIMM 010 0010 1001 +define pcodeop VectorSplatImmediate; +:evsplati D,BU_SIMM is OP=4 & D & BU_SIMM & XOP_0_10=0x229 { +# RT.l = EXTS(SI); +# RT.h = EXTS(SI); + +# lo:8 = BU_SIMMt; # sign or zext +# hi:8 = BU_SIMM; +# D = 64From2_32(hi,lo); + + D = VectorSplatImmediate(); +} + +# evsrwis RT,RA,UI +# ISA-cmt: Vector Shift Right Word Immediate Signed +# evsrwis rD,rA,EVUIMM 010 0010 0011 +define pcodeop VectorShiftRightWordImmediateSigned; +:evsrwis D,A,EVUIMM is OP=4 & A & D & EVUIMM & XOP_0_10=0x223 { + # TODO definition +# n ô€‰ UI +# RT0:31 ô€‰ EXTS((RA)0:31-n) +# RT32:63 ô€‰ EXTS((RA)32:63-n) +# Both high and low elements of RA are shifted right by +# the 5-bit UI value. Bits in the most significant positions +# vacated by the shift are filled with a copy of the sign bit. + + D = VectorShiftRightWordImmediateSigned(A); +} + +# evsrwiu RT,RA,UI +# ISA-cmt: Vector Shift Right Word Immediate Unsigned +# evsrwiu rD,rA,EVUIMM 010 0010 0010 +define pcodeop VectorShiftRightWordImmediateUnsigned; +:evsrwiu D,A,EVUIMM is OP=4 & A & D & EVUIMM & XOP_0_10=0x222 { + # TODO definition +# n ô€‰ UI +# RT0:31 ô€‰ EXTZ((RA)0:31-n) +# RT32:63 ô€‰ EXTZ((RA)32:63-n) +# Both high and low elements of RA are shifted right by +# the 5-bit UI value; zeros are shifted into the most significant +# position. + D = VectorShiftRightWordImmediateUnsigned(A); +} + +# evsrws RT,RA,RB +# ISA-cmt: Vector Shift Right Word Signed +# evsrws rD,rA,rB 010 0010 0001 +:evsrws D,A,B is OP=4 & A & B & D & XOP_0_10=0x221 { +# nh = RB.bsub(26:31); +# hl = RB.bsub(58:63); +# RT.l = EXTS(RA.bsub(0:32-nh)); +# RT.h = EXTS(RA.bsub(32:63-nl)); + + nh:$(REGISTER_SIZE) = 32-((B & 0x00000000fc000000) >> 26); + nl:$(REGISTER_SIZE) = 63-((B & 0xfc00000000000000) >> 58); + tmp:$(REGISTER_SIZE) = (A & ((0xFFFFFFFFFFFFFFFF >> (64 - ((nh) - (0) + 1))) << (0)) >> 0); + lo:8 = sext(tmp:4); + tmp = (A & ((0xFFFFFFFFFFFFFFFF >> (64 - ((nl) - (32) + 1))) << (32)) >> 32); + hi:8 = sext(tmp:4); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evsrwu RT,RA,RB +# ISA-cmt: Vector Shift Right Word Unsigned +# evsrwu rD,rA,rB 010 0010 0000 +:evsrwu D,A,B is OP=4 & A & B & D & XOP_0_10=0x220 { +# nh = RB.bsub(26:31); +# nl = RB.bsub(58:63); +# RT.l = EXTZ(RA.bsub(0:32-nh)); +# RT.h = EXTZ(RA.bsub(32:63-nl)); + + nh:$(REGISTER_SIZE) = 32-((B & 0x00000000fc000000) >> 26); + nl:$(REGISTER_SIZE) = 63-((B & 0xfc00000000000000) >> 58); + tmp:$(REGISTER_SIZE) = (A & ((0xFFFFFFFFFFFFFFFF >> (64 - ((nh) - (0) + 1))) << (0)) >> 0); + lo:8 = zext(tmp:4); + tmp = (A & ((0xFFFFFFFFFFFFFFFF >> (64 - ((nl) - (32) + 1))) << (32)) >> 32); + hi:8 = zext(tmp:4); + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evstdd RS,D(RA) +# ISA-cmt: Vector Store Double of Double +# evstdd rD,rA,EVUIMM_8 011 0010 0001 +# defined evx.sinc EJ XXX +#define pcodeop VectorStoreDoubleOfDouble; +#:evstdd D,A,EVUIMM_8 is OP=4 & A & D & EVUIMM_8 & XOP_0_10=0x321 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + EXTZ(UI*8); +# MEM(EA,8) = RS; + +# VectorStoreDoubleOfDouble(D,A); +# } + +# evstddx RS,RA,RB +# ISA-cmt: Vector Store Double of Double Indexed +# evstddx rS,rA,rB 011 0010 0000 +# defined evx.sinc EJ XXX +# define pcodeop VectorStoreDoubleOfDoubleIndexed; +# :evstddx S,A,B is OP=4 & A & S & B & XOP_0_10=0x320 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + RB; +# MEM(EA,8) = RS; + +# VectorStoreDoubleOfDoubleIndexed(S,A,B); +# } +# :evstddx RS,RA_OR_ZERO,RB is OP=4 & RS & RA_OR_ZERO & RB & XOP_0_10=800 +# { +# ea = RA_OR_ZERO + RB; +# *:8 ($(EATRUNC)) = RS; +# } + +# evstdh RS,D(RA) +# ISA-cmt: Vector Store Double of Four Halfwords +# evstdh rS,rA,EVUIMM_8 011 0010 0101 +:evstdh S,EVUIMM_8_RAt is OP=4 & A & S & EVUIMM_8 & EVUIMM_8_RAt & XOP_0_10=0x325 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + EXTZ(UI*8); +# MEM(EA,2) = RS.S0; +# MEM(EA+2,2) = RS.S1; +# MEM(EA+4,2) = RS.S2; +# MEM(EA+6,2) = RS.S3; + + EA:$(REGISTER_SIZE) = A + zext(EVUIMM_8_RAt); + *:2 (EA) = *:2 ((S) & $(MEMMASK)); + *:2 (EA+2) = *:2 ((S+2) & $(MEMMASK)); + *:2 (EA+4) = *:2 ((S+4) & $(MEMMASK)); + *:2 (EA+6) = *:2 ((S+4) & $(MEMMASK)); +} + +# evstdhx RS,RA,RB +# ISA-cmt: Vector Store Double of Four Halfwords Indexed +# evstdhx rS,rA,rB 011 0010 0100 +:evstdhx S,A,B is OP=4 & A & B & S & XOP_0_10=0x324 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + RB; +# MEM(EA,2) = RS.S0; +# MEM(EA+2,2) = RS.S1; +# MEM(EA+4,2) = RS.S2; +# MEM(EA+6,2) = RS.S3; + + EA:$(REGISTER_SIZE) = A + B; + *:2 (EA) = *:2 ((S) & $(MEMMASK)); + *:2 (EA+2) = *:2 ((S+2) & $(MEMMASK)); + *:2 (EA+4) = *:2 ((S+4) & $(MEMMASK)); + *:2 (EA+6) = *:2 ((S+6) & $(MEMMASK)); +} + +# evstdw RS,D(RA) +# ISA-cmt: Vector Store Double of Two Words +# evstdw rS,rA,EVUIMM_8 011 0010 0011 +:evstdw S,EVUIMM_8_RAt is OP=4 & A & S & EVUIMM_8 & EVUIMM_8_RAt & XOP_0_10=0x323 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + EXTZ(UI*8); +# MEM(EA,4) = RS.l; +# MEM(EA+4,4) = RS.h; + + EA:$(REGISTER_SIZE) = A + zext(EVUIMM_8_RAt); + *:4 (EA) = *:4 ((S) & $(MEMMASK)); + *:4 (EA+4) = *:4 ((S+4) & $(MEMMASK)); +} + +# evstdwx RS,RA,RB +# ISA-cmt: Vector Store Double of Two Words Indexed +# evstdwx rS,rA,rB 011 0010 0010 +:evstdwx S,A,B is OP=4 & A & B & S & XOP_0_10=0x322 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + RB; +# MEM(EA,4) = RS.l; +# MEM(EA+4,4) = RS.h; + + EA:$(REGISTER_SIZE) = A + B; + *:4 (EA) = *:4 ((S) & $(MEMMASK)); + *:4 (EA+4) = *:4 ((S+4) & $(MEMMASK)); +} + +# evstwhe RS,D(RA) +# ISA-cmt: Vector Store Word of Two Halfwords from Even +# evstwhe rS,rA,EVUIMM_4 011 0011 0001 +:evstwhe S,EVUIMM_4_RAt is OP=4 & A & S & EVUIMM_4_RAt & XOP_0_10=0x331 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + EXTZ(UI*4); +# MEM(EA,2) = RS.S0; +# MEM(EA+2,2) = RS.S2; + + EA:$(REGISTER_SIZE) = A + zext(EVUIMM_4_RAt); + *:2 (EA) = *:2 ((S) & $(MEMMASK)); + *:2 (EA+2) = *:2 ((S+2) & $(MEMMASK)); +} + +# evstwhex RS,RA,RB +# ISA-cmt: Vector Store Word of Two Halfwords from Even Indexed +# evstwhex rS,rA,rB 011 0011 0000 +:evstwhex S,A,B is OP=4 & A & B & S & XOP_0_10=0x330 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + RB; +# MEM(EA,2) = RS.S0; +# MEM(EA+2,2) = RS.S2; + + EA:$(REGISTER_SIZE) = A + B; + *:2 (EA) = *:2 ((S) & $(MEMMASK)); + *:2 (EA+2) = *:2 ((S+2) & $(MEMMASK)); +} + +# evstwho RS,D(RA) +# ISA-cmt: Vector Store Word of Two Halfwords from Odd +# evstwho rS,rA,EVUIMM_4 011 0011 0101 +:evstwho S,EVUIMM_4_RAt is OP=4 & A & S & EVUIMM_4_RAt & XOP_0_10=0x335 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + EXTZ(UI*4); +# MEM(EA,2) = RS.S1; +# MEM(EA+2,2) = RS.S3; + + EA:$(REGISTER_SIZE) = A + zext(EVUIMM_4_RAt); + *:2 (EA) = *:2 ((S+2) & $(MEMMASK)); + *:2 (EA+2) = *:2 ((S+6) & $(MEMMASK)); +} + +# evstwhox RS,RA,RB +# ISA-cmt: Vector Store Word of Two Halfwords from Odd Indexed +# evstwhox rS,rA,rB 011 0011 0100 +:evstwhox S,A,B is OP=4 & A & B & S & XOP_0_10=0x334 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + RB; +# MEM(EA,2) = RS.S1; +# MEM(EA+2,2) = RS.S3; + + EA:$(REGISTER_SIZE) = A + B; + *:2 (EA) = *:2 ((S+2) & $(MEMMASK)); + *:2 (EA+2) = *:2 ((S+6) & $(MEMMASK)); +} + +# evstwwe RS,D(RA) +# ISA-cmt: Vector Store Word of Word from Even +# evstwwe rS,rA,EVUIMM_4 011 0011 1001 +#define pcodeop VectorStoreWordOfWordFromEven; +#:evstwwe S,EVUIMM_4_RAt is OP=4 & A & S & EVUIMM_4_RAt & XOP_0_10=0x339 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + EXTZ(UI*4); +# MEM(EA,4) = RS; + +# VectorStoreWordOfWordFromEven(S,A); +# } + +# evstwwex RS,RA,RB +# ISA-cmt: Vector Store Word of Word from Even Indexed +# evstwwex rS,rA,rB 011 0011 1000 +#define pcodeop VectorStoreWordOfWordFromEvenIndexed; +#:evstwwex S,A,B is OP=4 & A & B & S & XOP_0_10=0x338 { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + RB; +# MEM(EA,4) = RS; + +# VectorStoreWordOfWordFromEvenIndexed(S,A,B); +# } + +# evstwwo RS,D(RA) +# ISA-cmt: Vector Store Word of Word from Odd +# evstwwo rS,rA,EVUIMM_4 011 0011 1101 +:evstwwo S,EVUIMM_4_RAt is OP=4 & A & S & EVUIMM_4_RAt & XOP_0_10=0x33D { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + EXTZ(UI*4); +# MEM(EA,4) = RS.h; + + EA:$(REGISTER_SIZE) = A + zext(EVUIMM_4_RAt); + *:4 (EA) = *:4 ((S+4) & $(MEMMASK)); +} + +# evstwwox RS,RA,RB +# ISA-cmt: Vector Store Word of Word from Odd Indexed +# evstwwox rS,rA,rB 011 0011 1100 +:evstwwox S,A,B is OP=4 & A & B & S & XOP_0_10=0x33C { +# if (RA == 0) { +# b = 0; +# } else { +# b = RA; +# } +# EA = b + RB; +# MEM(EA,4) = RS.h; + + EA:$(REGISTER_SIZE) = A + B; + *:4 (EA) = *:4 ((S+4) & $(MEMMASK)); +} + +# evsubfsmiaaw RT,RA +# ISA-cmt: Vector Subtract Signed, Modulo, Integer to Accumulator Word +# evsubfsmiaaw rD,rA 100 0011 1011 +:evsubfsmiaaw D,A is OP=4 & A & D & XOP_0_10=0x4CB & BITS_11_15=0 { +# RT.l = ACC.l - RA.l; +# RT.h = ACC.h - RA.h; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( ACC & (0x00000000FFFFFFFF) ) ) - (( A & (0x00000000FFFFFFFF) ) ); + hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) - (( A & (0xFFFFFFFF00000000) ) >> 32); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# ================================================================= +# Page D-16 + +# evsubfssiaaw RT,RA +# ISA-cmt: Vector Subtract Signed, Saturate, Integer to Accumulator Word +# evsubfssiaaw rD,rA 100 1100 0011 +define pcodeop VectorSubtractSignedSaturateIntegerToAccumulatorWord1; +define pcodeop VectorSubtractSignedSaturateIntegerToAccumulatorWord2; +:evsubfssiaaw D,A is OP=4 & A & D & XOP_0_10=0x4C3 & BITS_11_15=0 { +# SPEFSCR.OVH = ovh; +# SPEFSCR.OV = ovl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; +# SPEFSCR.SOV = SPEFSCR.SOV | ovl; + # TODO definition complicated + D = VectorSubtractSignedSaturateIntegerToAccumulatorWord1(A,ACC); + flags:8 = VectorSubtractSignedSaturateIntegerToAccumulatorWord2(A,ACC,spr200); + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evsubfumiaaw RT,RA +# ISA-cmt: Vector Subtract Unsigned, Modulo, Integer to Accumulator Word +# evsubfumiaaw rD,rA 100 1100 1010 +:evsubfumiaaw D,A is OP=4 & A & D & XOP_0_10=0x4CA & BITS_11_15=0 { +# RT.l = ACC.l - RA.l; +# RT.h = ACC.h - RA.h; +# ACC = RT; + + lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) - (( A & (0x0000000000000000) ) >> 32); + hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) - (( A & (0xFFFFFFFF00000000) ) >> 32); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evsubfusiaaw RT,RA +# ISA-cmt: Vector Subtract Unsigned, Saturate, Integer to Accumulator Word +# evsubfusiaaw rD,rA 100 1100 0010 +define pcodeop VectorSubtractUnsignedSaturateIntegerToAccumulatorWord1; +define pcodeop VectorSubtractUnsignedSaturateIntegerToAccumulatorWord2; +# SPEFSCR.OVH = ovh; +# SPEFSCR.OV = ovl; +# SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; +# SPEFSCR.SOV = SPEFSCR.SOV | ovl; +:evsubfusiaaw D,A is OP=4 & A & D & XOP_0_10=0x4C2 & BITS_11_15=0 { + # TODO definition complicated + VectorSubtractUnsignedSaturateIntegerToAccumulatorWord1(D,A,ACC,spr200); + flags:8 = VectorSubtractUnsignedSaturateIntegerToAccumulatorWord2(D,A,ACC,spr200); + spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); + spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); + spr200 = spr200 | (flags & (0x100000000)); + spr200 = spr200 | (flags & (0x1000000000000)); +} + +# evsubfw RT,RA,RB +# ISA-cmt: Vector Subtract from Word +# evsubfw rD,rA,rB 010 0000 0100 +:evsubfw D,A,B is OP=4 & A & B & D & XOP_0_10=0x204 { +# RT.l = RB.l - RA.l; +# RT.h = RB.h - RA.h; + + lo:$(REGISTER_SIZE) = (( B & (0x00000000FFFFFFFF) ) ) - (( A & (0x00000000FFFFFFFF) ) ); + hi:$(REGISTER_SIZE) = (( B & (0xFFFFFFFF00000000) ) >> 32) - (( A & (0xFFFFFFFF00000000) ) >> 32); + D = (( zext(hi) << 32) | zext(lo) ); + ACC = D; +} + +# evsubifw RT,UI,RB +# ISA-cmt: Vector Subtract Immediate from Word +# evsubifw rD,UIMM,rB 010 0000 0110 +:evsubifw D,BITS_16_20,B is OP=4 & D & BITS_16_20 & B & XOP_0_10=0x206 { +# RT.l = RB.l - EXTZ(UI); +# RT.h = RB.h - EXTZ(UI); + + tmp:8 = BITS_16_20*1; + lo:$(REGISTER_SIZE) = (( B & (0x0000000000000000) ) >> 32) - tmp; + hi:$(REGISTER_SIZE) = (( B & (0xFFFFFFFF00000000) ) >> 32) - tmp; + D = (( zext(hi) << 32) | zext(lo) ); +} + +# evsubiw => evsubifw + +# evsubw => evsubfw + +# evxor RT,RA,RB +# ISA-cmt: Vector XOR +# evxor rD,rA,rB 010 0001 0110 +# defined evx.sinc EJ XXX +# define pcodeop VectorXOR; +# : D = 64From2_32(hi,lo); +# :evxor S,A,B is OP=4 & S & A & B & XOP_0_10=0x216 { +# RT.l = RA.l ^ RB.l; +# RT.h = RA.h ^ RB.h; + +# VectorXOR(S,A,B); +# lo = A:2 ^ B:2; +# hi = A(2) ^ B(2); +# D:2 = lo; +# D(2) = hi; +# } + +# TODO evmwlssianw RT,RA,RB +# TODO complicated + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/SPE_EFSD.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/SPE_EFSD.sinc new file mode 100644 index 00000000..e7d6aaba --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/SPE_EFSD.sinc @@ -0,0 +1,798 @@ +# Based on "PowerISA Version 2.06 Revision B" document dated July 23, 2010 +# Category: SPE.Embedded Float Scalar Double + +# version 1.0 + +# ================================================================= +# Page 576 + +# efdabs rT,rA +# ISA-cmt: efdabs - Floating-Point Double-Precision Absolute Value +# ISA-info: efdabs - Form "EVX" Page 576 Category "SP.FD" +# binutils: e500.d: 34: 10 a4 02 e4 efdabs r5,r4 +:efdabs D,A is OP=4 & D & A & BITS_11_15=0 & XOP_0_10=740 +{ + D = abs( A ); +} + +# ================================================================= +# Page 577 + +# efdadd rT,rA,rB +# ISA-cmt: efdadd - Floating-Point Double-Precision Add +# ISA-info: efdadd - Form "EVX" Page 577 Category "SP.FD" +# binutils: e500.d: 40: 10 a4 1a e0 efdadd r5,r4,r3 +:efdadd D,A,B is OP=4 & D & A & B & XOP_0_10=736 +{ + D = A f+ B; + setSPEFSCRAddFlags_L( A, B, D ); +} + + +# ================================================================= +# Page 582 + +# efdcfs rT,rB +# ISA-cmt: efdcfs - Floating-Point Double-Precision Convert from Single-Precision +# ISA-info: efdcfs - Form "EVX" Page 582 Category "SP.FD" +# binutils: e500.d: a4: 10 a0 22 ef efdcfs r5,r4 +:efdcfs D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=751 +{ + D = float2float( B:4 ); + setSPEFSCR_L( D ); + setSummarySPEFSCR(); +} + + +# ================================================================= +# Page 580 + +# efdcfsf rT,rB +# ISA-cmt: efdcfsf - Convert Floating-Point Double-Precision from Signed Fraction +# ISA-info: efdcfsf - Form "EVX" Page 580 Category "SP.FD" +# binutils: e500.d: 7c: 10 a0 22 f3 efdcfsf r5,r4 +:efdcfsf D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=755 +{ + # load fractional divisor as a float + tmpA:4 = 0x80000000; + tmpA = int2float( tmpA ); + setSPEFSCR_L( tmpA ); + + # check if negative + if ( ( B:4 & 0x80000000 ) != 0 ) goto ; + + # float the fractional portion of register B + tmpB:4 = int2float( B:4 ); + setSPEFSCR_L( tmpB ); + tmpB = tmpB f/ tmpA; + setSPEFSCRDivFlags_L( tmpB, tmpA, tmpB ); + + goto ; + + + + # float the fractional portion of register B, 2's complement negate + tmpB = int2float( -( B:4 ) ); + setSPEFSCR_L( tmpB ); + tmpB = tmpB f/ tmpA; + setSPEFSCRDivFlags_L( tmpB, tmpA, tmpB ); + + # negate the float + tmpB = f-( tmpB ); + setSPEFSCR_L( tmpB ); + + + + tmpC:8 = float2float( tmpB ); + setSPEFSCR_L( tmpC ); + + setSummarySPEFSCR(); + + D = tmpC; +} + + +# ================================================================= +# Page 579 + +# efdcfsi rT,rB +# ISA-cmt: efdcfsi - Convert Floating-Point Double-Precision from Signed Integer +# ISA-info: efdcfsi - Form "EVX" Page 579 Category "SP.FD" +# binutils: e500.d: 6c: 10 a0 22 f1 efdcfsi r5,r4 +:efdcfsi D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=753 +{ + # check if negative + if ( ( B:4 & 0x80000000 ) != 0 ) goto ; + + # float the integer portion of register B + tmpB:8 = int2float( B:4 ); + setSPEFSCR_L( tmpB ); + + goto ; + + + + # float the integer portion of register B, 2's complement negate + tmpB = int2float( -( B:4 ) ); + setSPEFSCR_L( tmpB ); + + # negate the float + tmpB = f-( tmpB ); + setSPEFSCR_L( tmpB ); + + + + setSummarySPEFSCR(); + + D = tmpB; +} + + +# ================================================================= +# Page 580 + +# efdcfsid rT,rB +# ISA-cmt: efdcfsid - Convert Floating-Point Double-Precision from Signed Integer Doubleword +# ISA-info: efdcfsid - Form "EVX" Page 580 Category "SP.FD" +# binutils: e500.d: 70: 10 a0 22 e3 efdcfsid r5,r4 +:efdcfsid D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=739 +{ + # check if negative + if ( ( B & 0x8000000000000000 ) != 0 ) goto ; + + # float the integer portion of register B + tmpB:8 = int2float( B ); + setSPEFSCR_L( tmpB ); + + goto ; + + + + # float the integer portion of register B, 2's complement negate + tmpB = int2float( -( B ) ); + setSPEFSCR_L( tmpB ); + + # negate the float + tmpB = f-( tmpB ); + setSPEFSCR_L( tmpB ); + + + + setSummarySPEFSCR(); + + D = tmpB; +} + + +# ================================================================= +# Page 580 + +# efdcfuf rT,rB +# ISA-cmt: efdcfuf - Convert Floating-Point Double-Precision from Unsigned Fraction +# ISA-info: efdcfuf - Form "EVX" Page 580 Category "SP.FD" +# binutils: e500.d: 80: 10 a0 22 f2 efdcfuf r5,r4 +:efdcfuf D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=754 +{ + # load fractional divisor as a float + tmpA:8 = 0x0000000100000000; + tmpA = int2float( tmpA ); + setSPEFSCR_L( tmpA ); + + # float the fractional portion of register B + tmpB:8 = int2float( B:4 ); + setSPEFSCR_L( tmpB ); + tmpB = tmpB f/ tmpA; + setSPEFSCRDivFlags_L( tmpB, tmpA, tmpB ); + + D = tmpB; +} + + +# ================================================================= +# Page 579 + +#efdcfui rT,rB +# ISA-cmt: efdcfui - Convert Floating-Point Double-Precision from Unsigned Integer +# ISA-info: efdcfui - Form "EVX" Page 579 Category "SP.FD" +# binutils: e500.d: 74: 10 a0 22 f0 efdcfui r5,r4 +:efdcfui D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=752 +{ + tmp:8 = int2float( B:4 ); + setSPEFSCR_L( tmp ); + + setSummarySPEFSCR(); + + D = tmp; +} + + +# ================================================================= +# Page 580 + +#efdcfuid rT,rB +# ISA-cmt: efdcfuid - Convert Floating-Point Double-Precision from Unsigned Integer Doubleword +# ISA-info: efdcfuid - Form "EVX" Page 580 Category "SP.FD" +# binutils: e500.d: 78: 10 a0 22 e2 efdcfuid r5,r4 +:efdcfuid D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=738 +{ + tmp:8 = int2float( B ); + setSPEFSCR_L( tmp ); + + setSummarySPEFSCR(); + + D = tmp; +} + + +# ================================================================= +# Page 578 + +# efdcmpeq CRFD,rA,rB +# ISA-cmt: efdcmpeq - Floating-Point Double-Precision Compare Equal +# ISA-info: efdcmpeq - Form "EVX" Page 578 Category "SP.FD" +# binutils: e500.d: 58: 12 84 1a ee efdcmpeq cr5,r4,r3 +:efdcmpeq CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=750 +{ + CRFD = A f== B; +} + + +# ================================================================= +# Page 578 + +# efdcmpgt CRFD,rA,rB +# ISA-cmt: efdcmpgt - Floating-Point Double-Precision Compare Greater Than +# ISA-info: efdcmpgt - Form "EVX" Page 578 Category "SP.FD" +# binutils: e500.d: 50: 12 84 1a ec efdcmpgt cr5,r4,r3 +:efdcmpgt CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=748 +{ + CRFD = A f> B; +} + + +# ================================================================= +# Page 578 + +# efdcmplt CRFD,rA,rB +# ISA-cmt: efdcmplt - Floating-Point Double-Precision Compare Less Than +# ISA-info: efdcmplt - Form "EVX" Page 578 Category "SP.FD" +# binutils: e500.d: 54: 12 84 1a ed efdcmplt cr5,r4,r3 +:efdcmplt CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=749 +{ + CRFD = A f< B; +} + + +# ================================================================= +# Page 578 + +# efdctsf rT,rB +# ISA-cmt: efdctsf - Convert Floating-Point Double-Precision to Signed Fraction +# ISA-info: efdctsf - Form "EVX" Page 582 Category "SP.FD" +# binutils: e500.d: 9c: 10 a0 22 f7 efdctsf r5,r4 +:efdctsf D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=759 +{ + # multiply by 0x8000 0000 0000 0000 to scale the fraction up to integer range + + # load fractional multiplier as a float + tmpM:8 = 0x8000000000000000; + tmpM = int2float( tmpM ); + setSPEFSCR_L( tmpM ); + + # load saturation limit as a float + tmpL:8 = 0x8000000000000000 - 1; + tmpL = int2float( tmpL ); + setSPEFSCR_L( tmpL ); + + # scale the saturation limit to a fractional float + tmpL = tmpL f/ tmpM; + setSPEFSCRDivFlags_L( tmpL, tmpM, tmpL ); + + tmpB:8 = B; + + # check if less than or equal to positive saturation limit + if ( tmpB f<= tmpL ) goto ; + + # set to positive saturation + tmpB = tmpL; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + goto ; + + + + # check if greater than or equal to negative saturation limit + tmpL = f-( tmpL ); + if ( tmpB f>= tmpL ) goto ; + + # set to negative saturation + tmpB = tmpL; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + # scale the fractional portion up to integer side of mantissa + tmpB = tmpB f* tmpM; + setSPEFSCRMulFlags_L( tmpB, tmpM, tmpB ); + + # truncate back to signed fraction format + tmpC:8 = trunc( tmpB ); + setSPEFSCR_L( tmpB ); + + setSummarySPEFSCR(); + + D = tmpC; +} + + +# ================================================================= +# Page 580 + +# efdctsi rT,rB +# ISA-cmt: efdctsi - Convert Floating-Point Double-Precision to Signed Integer +# ISA-info: efdctsi - Form "EVX" Page 580 Category "SP.FD" +# binutils: e500.d: 84: 10 a0 22 f5 efdctsi r5,r4 +:efdctsi D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=757 +{ + # create zero float constant + tmpA:8 = 0; + tmpA = int2float( tmpA ); + + # check if negative + if ( B f< tmpA ) goto ; + + tmpB:8 = round( B ); + setSPEFSCR_L( tmpB ); + + # limit to positive saturation + if ( tmpB <= 0x000000007FFFFFFF ) goto ; + tmpB = 0x000000007FFFFFFF; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + goto ; + + + + # negate the float + tmpB = round( f-( B ) ); + setSPEFSCR_L( tmpB ); + + # limit to negative saturation + if ( tmpB <= 0x0000000080000000 ) goto ; + tmpB = 0x0000000080000000; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + # negate the signed int + tmpB = -( tmpB ); + + + + setSummarySPEFSCR(); + + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB:4 ); +} + + +# ================================================================= +# Page 581 + +# efdctsidz rT,rB +# ISA-cmt: efdctsidz - Convert Floating-Point Double-Precision to Signed Integer Doubleword with Round toward Zero +# ISA-info: efdctsidz - Form "EVX" Page 581 Category "SP.FD" +# binutils: e500.d: 88: 10 a0 22 eb efdctsidz r5,r4 +# Note: This may not work correctly as the number approaches saturation; too little (16 digits) precision in mantissa +:efdctsidz D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=747 +{ + # create zero float constant + tmpA:8 = 0; + tmpA = int2float( tmpA ); + + tmpB:8 = B; + + # check if negative + if ( tmpB f< tmpA ) goto ; + + # load saturation limit as a float + tmpL:8 = 0x8000000000000000 - 1; + tmpL = int2float( tmpL ); + setSPEFSCR_L( tmpL ); + + # limit to saturation + if ( tmpB <= tmpL ) goto ; + tmpB = tmpL; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + tmpB = trunc( tmpB ); + setSPEFSCR_L( tmpB ); + + goto ; + + + + # load saturation limit as a float + tmpL = 0x8000000000000000; + tmpL = int2float( tmpL ); + setSPEFSCR_L( tmpL ); + + # negate float (make positive) + tmpB = f-( tmpB ); + + # limit to saturation + if ( tmpB <= tmpL ) goto ; + tmpB = tmpL; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + tmpB = trunc( tmpB ); + setSPEFSCR_L( tmpB ); + + # negate the signed int + tmpB = -( tmpB ); + + + + setSummarySPEFSCR(); + + D = tmpB; +} + + +# ================================================================= +# Page 582 + +# efdctsiz rT,rB +# ISA-cmt: efdctsiz - Convert Floating-Point Double-Precision to Signed Integer with Round toward Zero +# ISA-info: efdctsiz - Form "EVX" Page 582 Category "SP.FD" +# binutils: e500.d: 8c: 10 a0 22 fa efdctsiz r5,r4 +:efdctsiz D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=762 +{ + # create zero float constant + tmpA:8 = 0; + tmpA = int2float( tmpA ); + + # check if negative + if ( B f< tmpA ) goto ; + + tmpB:8 = trunc( B ); + setSPEFSCR_L( tmpB ); + + # limit to positive saturation + if ( tmpB <= 0x000000007FFFFFFF ) goto ; + tmpB = 0x000000007FFFFFFF; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + goto ; + + + + # negate the float + tmpB = trunc( f-( B ) ); + setSPEFSCR_L( tmpB ); + + # limit to negative saturation + if ( tmpB <= 0x0000000080000000 ) goto ; + tmpB = 0x0000000080000000; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + # negate the signed int + tmpB = -( tmpB ); + + + + setSummarySPEFSCR(); + + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB:4 ); +} + + +# ================================================================= +# Page 582 + +# efdctuf rT,rB +# ISA-cmt: efdctuf - Convert Floating-Point Double-Precision to Unsigned Fraction +# ISA-info: efdctuf - Form "EVX" Page 582 Category "SP.FD" +# binutils: e500.d: a0: 10 a0 22 f6 efdctuf r5,r4 +:efdctuf D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=758 +{ + # multiply by 0x0000 0001 0000 0000 to scale the fraction up to integer range + + # load fractional multiplier as a float + tmpM:8 = 0x0000000100000000; + tmpM = int2float( tmpM ); + setSPEFSCR_L( tmpM ); + + # load saturation limit as a float + tmpL:8 = 0x0000000100000000 - 1; + tmpL = int2float( tmpL ); + setSPEFSCR_L( tmpL ); + + # scale the saturation limit to a fractional float + tmpL = tmpL f/ tmpM; + setSPEFSCRDivFlags_L( tmpL, tmpM, tmpL ); + + # get B float up to 64 bit width + tmpB:8 = B; + setSPEFSCR_L( tmpB ); + + # check if less than or equal to positive saturation limit + if ( tmpB f<= tmpL ) goto ; + + # set to saturation + tmpB = tmpL; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + # scale the fractional portion up to integer side of mantissa + tmpB = tmpB f* tmpM; + setSPEFSCRMulFlags_L( tmpB, tmpM, tmpB ); + + # truncate back to integer + tmpC:4 = trunc( tmpB ); + setSPEFSCR_L( tmpC ); + + setSummarySPEFSCR(); + + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpC ); +} + + +# ================================================================= +# Page 580 + +# efdctui rT,rB +# ISA-cmt: efdctui - Convert Floating-Point Double-Precision to Unsigned Integer +# ISA-info: efdctui - Form "EVX" Page 580 Category "SP.FD" +# binutils: e500.d: 90: 10 a0 22 f4 efdctui r5,r4 +:efdctui D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=756 +{ + tmpB:8 = B; + + # load saturation limit as a float + tmpL:8 = 0x00000000FFFFFFFF; + tmpL = int2float( tmpL ); + setSPEFSCR_L( tmpL ); + + # limit to saturation + if ( tmpB f<= tmpL ) goto ; + tmpB = tmpL; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + # round back to integer + tmpC:4 = trunc(round( tmpB )); + setSPEFSCR_L( tmpB ); + + setSummarySPEFSCR(); + + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpC ); +} + + +# ================================================================= +# Page 581 + +# efdctuidz rT,rB +# ISA-cmt: efdctuidz - Convert Floating-Point Double-Precision to Unsigned Integer Doubleword with Round toward Zero +# ISA-info: efdctuidz - Form "EVX" Page 581 Category "SP.FD" +# binutils: e500.d: 94: 10 a0 22 ea efdctuidz r5,r4 +:efdctuidz D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=746 +{ + tmpB:8 = B; + + # load saturation limit as a float + tmpL:8 = 0xFFFFFFFFFFFFFFFF; + tmpL = int2float( tmpL ); + setSPEFSCR_L( tmpL ); + + # limit to saturation + if ( tmpB f<= tmpL ) goto ; + tmpB = tmpL; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + tmpB = trunc( tmpB ); + + setSummarySPEFSCR(); + + D = tmpB; +} + + +# ================================================================= +# Page 582 + +# efdctuiz rT,rB +# ISA-cmt: efdctuiz - Convert Floating-Point Double-Precision to Unsigned Integer with Round toward Zero +# ISA-info: efdctuiz - Form "EVX" Page 582 Category "SP.FD" +# binutils: e500.d: 98: 10 a0 22 f8 efdctuiz r5,r4 +:efdctuiz D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=760 +{ + tmpB:8 = B; + + # load saturation limit as a float + tmpL:8 = 0x00000000FFFFFFFF; + tmpL = int2float( tmpL ); + setSPEFSCR_L( tmpL ); + + # limit to saturation + if ( tmpB f<= tmpL ) goto ; + tmpB = tmpL; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + tmpB = trunc( tmpB ); + + setSummarySPEFSCR(); + + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB:4 ); +} + + +# ================================================================= +# Page 577 + +# efddiv rT,rA,rB +# ISA-cmt: efddiv - Floating-Point Double-Precision Divide +# ISA-info: efddiv - Form "EVX" Page 577 Category "SP.FD" +# binutils: e500.d: 4c: 10 a4 1a e9 efddiv r5,r4,r3 +:efddiv D,A,B is OP=4 & D & A & B & XOP_0_10=745 +{ + D = A f/ B; + setSPEFSCRDivFlags_L( A, B, D ); +} + + +# ================================================================= +# Page 577 + +# efdmul rT,rA,rB +# ISA-cmt: efdmul - Floating-Point Double-Precision Multiply +# ISA-info: efdmul - Form "EVX" Page 577 Category "SP.FD" +# binutils: e500.d: 48: 10 a4 1a e8 efdmul r5,r4,r3 +:efdmul D,A,B is OP=4 & D & A & B & XOP_0_10=744 +{ + D = A f* B; + setSPEFSCRMulFlags_L( A, B, D ); +} + + +# ================================================================= +# Page 576 + +# efdnabs rT,rA +# ISA-cmt: efdnabs - Floating-Point Double-Precision Negative Absolute Value +# ISA-info: efdnabs - Form "EVX" Page 576 Category "SP.FD" +# binutils: e500.d: 38: 10 a4 02 e5 efdnabs r5,r4 +:efdnabs D,A is OP=4 & D & A & BITS_11_15=0 & XOP_0_10=741 +{ + D = f- ( abs( A ) ); +} + + +# ================================================================= +# Page 577 + +# efdneg rT,rA +# ISA-cmt: efdneg - Floating-Point Double-Precision Negate +# ISA-info: efdneg - Form "EVX" Page 576 Category "SP.FD" +# binutils: e500.d: 3c: 10 a4 02 e6 efdneg r5,r4 +:efdneg D,A is OP=4 & D & A & BITS_11_15=0 & XOP_0_10=742 +{ + D = f-( A ); +} + + +# ================================================================= +# Page 577 + +# efdsub rT,rA,rB +# ISA-cmt: efdsub - Floating-Point Double-Precision Subtract +# ISA-info: efdsub - Form "EVX" Page 577 Category "SP.FD" +# binutils: e500.d: 44: 10 a4 1a e1 efdsub r5,r4,r3 +:efdsub D,A,B is OP=4 & D & A & B & XOP_0_10=737 +{ + D = A f- B; + setSPEFSCRSubFlags_L( A, B, D ); +} + + +# ================================================================= +# Page 579 + +# efdtsteq CRFD,rA,rB +# ISA-cmt: efdtsteq - Floating-Point Double-Precision Test Equal +# ISA-info: efdtsteq - Form "EVX" Page 579 Category "SP.FD" +# binutils: e500.d: 68: 12 84 1a fe efdtsteq cr5,r4,r3 +:efdtsteq CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=766 +{ + CRFD = A f== B; +} + + +# ================================================================= +# Page 578 + +# efdtstgt CRFD,rA,rB +# ISA-cmt: efdtstgt - Floating-Point Double-Precision Test Greater Than +# ISA-info: efdtstgt - Form "EVX" Page 578 Category "SP.FD" +# binutils: e500.d: 5c: 12 84 1a fc efdtstgt cr5,r4,r3 +# binutils: e500.d: 60: 12 84 1a fc efdtstgt cr5,r4,r3 +:efdtstgt CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=764 +{ + CRFD = A f> B; +} + + +# ================================================================= +# Page 579 + +# efdtstlt CRFD,rA,rB +# ISA-cmt: efdtstlt - Floating-Point Double-Precision Test Less Than +# ISA-info: efdtstlt - Form "EVX" Page 579 Category "SP.FD" +# binutils: e500.d: 64: 12 84 1a fd efdtstlt cr5,r4,r3 +:efdtstlt CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=765 +{ + CRFD = A f< B; +} + + +# ================================================================= +# Page 583 + +# efscfd rT,rB +# ISA-cmt: efscfd - Floating-Point Single-Precision Convert from Double-Precision +# ISA-info: efscfd - Form "EVX" Page 583 Category "SP.FD" +# binutils: e500.d: 30: 10 a0 22 cf efscfd r5,r4 +:efscfd D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=719 +{ + tmpB:4 = float2float( B ); + setSPEFSCR_L( tmpB ); + setSummarySPEFSCR(); + + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB ); +} diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/SPE_EFV.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/SPE_EFV.sinc new file mode 100644 index 00000000..b71ddd36 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/SPE_EFV.sinc @@ -0,0 +1,1491 @@ +# Based on "PowerISA Version 2.06 Revision B" document dated July 23, 2010 +# Category: SPE.Embedded Float Vector Instructions + + +# ================================================================= +# Page 561 + +# evfsabs rT,rA +# ISA-cmt: evfsabs - Vector Floating-Point Single-Precision Absolute Value +# ISA-info: evfsabs - Form "EVX" Page 561 Category "SP.FV" +# binutils: mytest.d: 1e0: 10 22 02 84 evfsabs r1,r2 +:evfsabs D,A is OP=4 & D & A & BITS_11_15=0 & XOP_0_10=644 +{ + # + # low section + # + tmpA:4 = abs( A:4 ); + + # + # high section + # + tmpB:4 = abs( A(4) ); + + # move results into upper and lower words + tmpC:8 = zext( tmpB ); + tmpC = ( tmpC << 32 ) | zext( tmpA ); + + D = tmpC; +} + +# ================================================================= +# Page 562 + +# evfsadd rT,rA,rB +# ISA-cmt: evfsadd - Vector Floating-Point Single-Precision Add +# ISA-info: evfsadd - Form "EVX" Page 562 Category "SP.FV" +# binutils: mytest.d: 1d8: 10 22 1a 80 evfsadd r1,r2,r3 +:evfsadd D,A,B is OP=4 & D & A & B & XOP_0_10=640 +{ + # + # low section + # + tmpA:4 = A:4 f+ B:4; + setSPEFSCRAddFlags_L( A:4, B:4, tmpA ); + + # + # high section + # + tmpB:4 = A(4) f+ B(4); + + # SLEIGH had a problem with using A(4) and B(4) directly here + tmpD:4 = A(4); + tmpE:4 = B(4); + setSPEFSCRAddFlags_H( tmpD, tmpE, tmpB ); + + # move results into upper and lower words + tmpC:8 = zext( tmpB ); + tmpC = ( tmpC << 32 ) | zext( tmpA ); + + D = tmpC; +} + + +# ================================================================= +# Page 566 + +# evfscfsf rT,rB +# ISA-cmt: evfscfsf - Vector Convert Floating-Point Single-Precision from Signed Fraction +# ISA-info: evfscfsf - Form "EVX" Page 566 Category "SP.FV" +# binutils: mytest.d: 20c: 10 20 12 93 evfscfsf r1,r2 +:evfscfsf D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=659 +{ + # load fractional divisor as a float + tmpA:4 = 0x80000000; + tmpA = int2float( tmpA ); + setSPEFSCR_L( tmpA ); + + # + # low section + # + tmpE:4 = B:4; + + # check if negative + if ( ( tmpE & 0x80000000 ) != 0 ) goto ; + + # float the fractional portion of register B + tmpB:4 = int2float( tmpE ); + setSPEFSCR_L( tmpB ); + tmpC:4 = tmpB f/ tmpA; + setSPEFSCRDivFlags_L( tmpB, tmpA, tmpC ); + + goto ; + + + + # float the fractional portion of register B, 2's complement negate + tmpB = int2float( -( tmpE ) ); + setSPEFSCR_L( tmpB ); + tmpC = tmpB f/ tmpA; + setSPEFSCRDivFlags_L( tmpB, tmpA, tmpC ); + + # negate the float + tmpC = f-( tmpC ); + setSPEFSCR_L( tmpC ); + + + + setSummarySPEFSCR(); + + # + # high section + # + tmpE = B(4); + + # check if negative + if ( ( tmpE & 0x80000000 ) != 0 ) goto ; + + # float the fractional portion of register B + tmpB = int2float( tmpE ); + setSPEFSCR_H( tmpB ); + tmpD:4 = tmpB f/ tmpA; + setSPEFSCRDivFlags_H( tmpB, tmpA, tmpD ); + + goto ; + + + + # float the fractional portion of register B, 2's complement negate + tmpB = int2float( -( tmpE ) ); + setSPEFSCR_H( tmpB ); + tmpD = tmpB f/ tmpA; + setSPEFSCRDivFlags_H( tmpB, tmpA, tmpD ); + + # negate the float + tmpD = f-( tmpD ); + setSPEFSCR_H( tmpD ); + + + + setSummarySPEFSCR(); + + + # move results into upper and lower words + tmpZ:8 = zext( tmpD ); + tmpZ = ( tmpZ << 32 ) | zext( tmpC ); + + D = tmpZ; +} + + +# ================================================================= +# Page 566 + +# evfscfsi rT,rB +# ISA-cmt: evfscfsi - Vector Convert Floating-Point Single-Precision from Signed Integer +# ISA-info: evfscfsi - Form "EVX" Page 566 Category "SP.FV" +# binutils: mytest.d: 204: 10 20 12 91 evfscfsi r1,r2 +:evfscfsi D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=657 +{ + # + # low section + # + tmpE:4 = B:4; + + # check if negative + if ( ( tmpE & 0x80000000 ) != 0 ) goto ; + + # float the integer portion of register B + tmpB:4 = int2float( tmpE ); + setSPEFSCR_L( tmpB ); + + goto ; + + + + # float the integer portion of register B, 2's complement negate + tmpB = int2float( -( tmpE ) ); + setSPEFSCR_L( tmpB ); + + # negate the float + tmpB = f-( tmpB ); + setSPEFSCR_L( tmpB ); + + + + setSummarySPEFSCR(); + + + # + # high section + # + tmpE = B(4); + + # check if negative + if ( ( tmpE & 0x80000000 ) != 0 ) goto ; + + # float the integer portion of register B + tmpC:4 = int2float( tmpE ); + setSPEFSCR_H( tmpC ); + + goto ; + + + + # float the integer portion of register B, 2's complement negate + tmpC = int2float( -( tmpE ) ); + setSPEFSCR_H( tmpC ); + + # negate the float + tmpC = f-( tmpC ); + setSPEFSCR_H( tmpC ); + + + + setSummarySPEFSCR(); + + + # move results into upper and lower words + tmpZ:8 = zext( tmpC ); + tmpZ = ( tmpZ << 32 ) | zext( tmpB ); + + D = tmpZ; +} + + +# ================================================================= +# Page 566 + +# evfscfuf rT,rB +# ISA-cmt: evfscfuf - Vector Convert Floating-Point Single-Precision from Unsigned Fraction +# ISA-info: evfscfuf - Form "EVX" Page 566 Category "SP.FV" +# binutils: mytest.d: 208: 10 20 12 92 evfscfuf r1,r2 +:evfscfuf D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=658 +{ + # load fractional divisor as a float + tmpA:8 = 0x0000000100000000; + tmpF:4 = int2float( tmpA ); + setSPEFSCR_L( tmpF ); + + # + # low section + # + tmpE:4 = B:4; + + # float the fractional portion of register B + tmpB:4 = int2float( tmpE ); + setSPEFSCR_L( tmpB ); + tmpC:4 = tmpB f/ tmpF; + setSPEFSCRDivFlags_L( tmpB, tmpF, tmpC ); + + # + # high section + # + tmpE = B(4); + + # float the fractional portion of register B + tmpB = int2float( tmpE ); + setSPEFSCR_H( tmpB ); + tmpD:4 = tmpB f/ tmpF; + setSPEFSCRDivFlags_H( tmpB, tmpF, tmpD ); + + # move results into upper and lower words + tmpZ:8 = zext( tmpD ); + tmpZ = ( tmpZ << 32 ) | zext( tmpC ); + + D = tmpZ; +} + + +# ================================================================= +# Page 566 + +#evfscfui rT,rB +# ISA-cmt: evfscfui - Vector Convert Floating-Point Single-Precision from Unsigned Integer +# ISA-info: evfscfui - Form "EVX" Page 566 Category "SP.FV" +# binutils: mytest.d: 200: 10 20 12 90 evfscfui r1,r2 +:evfscfui D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=656 +{ + # + # low section + # + tmpE:4 = B:4; + + tmpC:4 = int2float( tmpE ); + setSPEFSCR_L( tmpC ); + + # + # high section + # + tmpE = B(4); + + tmpD:4 = int2float( tmpE ); + setSPEFSCR_H( tmpD ); + + setSummarySPEFSCR(); + + + # move results into upper and lower words + tmpZ:8 = zext( tmpD ); + tmpZ = ( tmpZ << 32 ) | zext( tmpC ); + + D = tmpZ; +} + + +# ================================================================= +# Page 564 + +# evfscmpeq CRFD,rA,rB +# ISA-cmt: evfscmpeq - Vector Floating-Point Single-Precision Compare Equal +# ISA-info: evfscmpeq - Form "EVX" Page 564 Category "SP.FV" +# binutils: mytest.d: 1fc: 10 82 1a 8e evfscmpeq cr1,r2,r3 +:evfscmpeq CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=654 +{ + tmpA:4 = A:4; + tmpB:4 = B:4; + tmpC:4 = A(4); + tmpD:4 = B(4); + + tmpL:1 = tmpA f== tmpB; + tmpH:1 = tmpC f== tmpD; + + CRFD = (8 * tmpH ) + (4 * tmpL ) + (2 * (tmpH | tmpL) ) + (tmpH & tmpL); +} + + +# ================================================================= +# Page 563 + +# evfscmpgt CRFD,rA,rB +# ISA-cmt: evfscmpgt - Vector Floating-Point Single-Precision Compare Greater Than +# ISA-info: evfscmpgt - Form "EVX" Page 563 Category "SP.FV" +# binutils: mytest.d: 1f4: 10 82 1a 8c evfscmpgt cr1,r2,r3 +:evfscmpgt CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=652 +{ + tmpA:4 = A:4; + tmpB:4 = B:4; + tmpC:4 = A(4); + tmpD:4 = B(4); + + tmpL:1 = tmpA f> tmpB; + tmpH:1 = tmpC f> tmpD; + + CRFD = (8 * tmpH ) + (4 * tmpL ) + (2 * (tmpH | tmpL) ) + (tmpH & tmpL); +} + + +# ================================================================= +# Page 563 + +# evfscmplt CRFD,rA,rB +# ISA-cmt: evfscmplt - Vector Floating-Point Single-Precision Compare Less Than +# ISA-info: evfscmplt - Form "EVX" Page 563 Category "SP.FV" +# binutils: mytest.d: 1f8: 10 82 1a 8d evfscmplt cr1,r2,r3 +:evfscmplt CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=653 +{ + tmpA:4 = A:4; + tmpB:4 = B:4; + tmpC:4 = A(4); + tmpD:4 = B(4); + + tmpL:1 = tmpA f< tmpB; + tmpH:1 = tmpC f< tmpD; + + CRFD = (8 * tmpH ) + (4 * tmpL ) + (2 * (tmpH | tmpL) ) + (tmpH & tmpL); +} + + +# ================================================================= +# Page 568 + +# evfsctsf rT,rB +# ISA-cmt: evfsctsf - Vector Convert Floating-Point Single-Precision to Signed Fraction +# ISA-info: evfsctsf - Form "EVX" Page 568 Category "SP.FV" +# binutils: mytest.d: 21c: 10 20 12 97 evfsctsf r1,r2 +:evfsctsf D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=663 +{ + # multiply by 0x8000 0000 to scale the fraction up to integer range + + # load fractional multiplier as a float + tmpM:4 = 0x80000000; + tmpM = int2float( tmpM ); + setSPEFSCR_L( tmpM ); + + # load saturation limit as a float + tmpS:4 = 0x80000000 - 1; + tmpS = int2float( tmpS ); + setSPEFSCR_L( tmpS ); + + # scale the saturation limit to a fractional float + tmpS = tmpS f/ tmpM; + setSPEFSCRDivFlags_L( tmpS, tmpM, tmpS ); + + # form negative saturation limit + tmpN:4 = f-( tmpS ); + + # + # low section + # + tmpB:4 = B:4; + + # check if less than or equal to positive saturation limit + if ( tmpB f<= tmpS ) goto ; + + # set to positive saturation + tmpB = tmpS; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + goto ; + + + + # check if greater than or equal to negative saturation limit + if ( tmpB f>= tmpN ) goto ; + + # set to negative saturation + tmpB = tmpN; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + # scale the fractional portion up to integer side of mantissa + tmpB = tmpB f* tmpM; + setSPEFSCRMulFlags_L( tmpB, tmpM, tmpB ); + + # truncate back to signed fraction format + tmpL:4 = trunc( tmpB ); + setSPEFSCR_L( tmpL ); + + setSummarySPEFSCR(); + + + # + # high section + # + tmpB = B(4); + + # check if less than or equal to positive saturation limit + if ( tmpB f<= tmpS ) goto ; + + # set to positive saturation + tmpB = tmpS; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + goto ; + + + + # check if greater than or equal to negative saturation limit + if ( tmpB f>= tmpN ) goto ; + + # set to negative saturation + tmpB = tmpN; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + # scale the fractional portion up to integer side of mantissa + tmpB = tmpB f* tmpM; + setSPEFSCRMulFlags_H( tmpB, tmpM, tmpB ); + + # truncate back to signed fraction format + tmpH:4 = trunc( tmpB ); + setSPEFSCR_H( tmpH ); + + setSummarySPEFSCR(); + + # move results into upper and lower words + tmpZ:8 = zext( tmpH ); + tmpZ = ( tmpZ << 32 ) | zext( tmpL ); + + D = tmpZ; +} + + +# ================================================================= +# Page 567 + +# evfsctsi rT,rB +# ISA-cmt: evfsctsi - Vector Convert Floating-Point Single-Precision to Signed Integer +# ISA-info: evfsctsi - Form "EVX" Page 567 Category "SP.FV" +# binutils: mytest.d: 214: 10 20 12 95 evfsctsi r1,r2 +:evfsctsi D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=661 +{ + # create zero float constant + tmpA:4 = 0; + tmpA = int2float( tmpA ); + + # + # low section + # + tmpB:4 = B:4; + + # check if negative + if ( tmpB f< tmpA ) goto ; + + tmpB = round( tmpB ); + setSPEFSCR_L( tmpB ); + + # limit to positive saturation + if ( tmpB <= 0x000000007FFFFFFF ) goto ; + tmpB = 0x000000007FFFFFFF; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + tmpL:4 = tmpB; + + goto ; + + + + # negate the float + tmpB = round( f-( tmpB ) ); + setSPEFSCR_L( tmpB ); + + # limit to negative saturation + if ( tmpB <= 0x0000000080000000 ) goto ; + tmpB = 0x0000000080000000; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + # negate the signed int + tmpL = -( tmpB ); + + + + setSummarySPEFSCR(); + + # + # high section + # + tmpB = B(4); + + # check if negative + if ( tmpB f< tmpA ) goto ; + + tmpB = round( tmpB ); + setSPEFSCR_H( tmpB ); + + # limit to positive saturation + if ( tmpB <= 0x000000007FFFFFFF ) goto ; + tmpB = 0x000000007FFFFFFF; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + tmpH:4 = tmpB; + + goto ; + + + + # negate the float + tmpB = round( f-( tmpB ) ); + setSPEFSCR_H( tmpB ); + + # limit to negative saturation + if ( tmpB <= 0x0000000080000000 ) goto ; + tmpB = 0x0000000080000000; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + # negate the signed int + tmpH = -( tmpB ); + + + + setSummarySPEFSCR(); + + # move results into upper and lower words + tmpZ:8 = zext( tmpH ); + tmpZ = ( tmpZ << 32 ) | zext( tmpL ); + + D = tmpZ; +} + + +# ================================================================= +# Page 567 + +# evfsctsiz rT,rB +# ISA-cmt: evfsctsiz - Vector Convert Floating-Point Single-Precision to Signed Integer with Round toward Zero +# ISA-info: evfsctsiz - Form "EVX" Page 567 Category "SP.FV" +# binutils: mytest.d: 224: 10 20 12 9a evfsctsiz r1,r2 +:evfsctsiz D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=666 +{ + # create zero float constant + tmpA:8 = 0; + tmpA = int2float( tmpA ); + + # create positive saturation float constant + tmpS:8 = 0x000000007FFFFFFF; + tmpS = int2float( tmpS ); + + # create negative saturation float constant + tmpN:8 = 0x0000000080000000; + tmpN = int2float( tmpN ); + + # + # low section + # + tmpB:8 = float2float( B:4 ); + + # check if negative + if ( tmpB f< tmpA ) goto ; + + # limit to positive saturation + if ( tmpB f<= tmpS ) goto ; + tmpB = tmpS; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + tmpL:4 = trunc( tmpB ); + setSPEFSCR_L( tmpL ); + + goto ; + + + + + # negate the float + tmpB = f-( tmpB ); + + # limit to negative saturation + if ( tmpB f<= tmpN ) goto ; + tmpB = tmpN; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + # negate the signed int + tmpL = -( trunc( tmpB ) ); + setSPEFSCR_L( tmpL ); + + + + setSummarySPEFSCR(); + + # + # high section + # + tmpE:4 = B(4); + tmpB = float2float( tmpE ); + + # check if negative + if ( tmpB f< tmpA ) goto ; + + # limit to positive saturation + if ( tmpB f<= tmpS ) goto ; + tmpB = tmpS; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + tmpH:4 = trunc( tmpB ); + setSPEFSCR_H( tmpH ); + + goto ; + + + + + # negate the float + tmpB = f-( tmpB ); + + # limit to negative saturation + if ( tmpB f<= tmpN ) goto ; + tmpB = tmpN; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + # negate the signed int + tmpH = -( trunc( tmpB ) ); + setSPEFSCR_H( tmpH ); + + + + setSummarySPEFSCR(); + + # move results into upper and lower words + tmpZ:8 = zext( tmpH ); + tmpZ = ( tmpZ << 32 ) | zext( tmpL ); + + D = tmpZ; +} + + +# ================================================================= +# Page 568 + +# evfsctuf rT,rB +# ISA-cmt: evfsctuf - Vector Convert Floating-Point Single-Precision to Unsigned Fraction +# ISA-info: evfsctuf - Form "EVX" Page 568 Category "SP.FV" +# binutils: mytest.d: 218: 10 20 12 96 evfsctuf r1,r2 +:evfsctuf D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=662 +{ + # multiply by 0x0000 0001 0000 0000 to scale the fraction up to integer range + + # load fractional multiplier as a float + tmpM:8 = 0x0000000100000000; + tmpM = int2float( tmpM ); + setSPEFSCR_L( tmpM ); + + # load saturation limit as a float + tmpS:8 = 0x0000000100000000 - 1; + tmpS = int2float( tmpS ); + setSPEFSCR_L( tmpS ); + + # scale the saturation limit to a fractional float + tmpS = tmpS f/ tmpM; + setSPEFSCRDivFlags_L( tmpS, tmpM, tmpS ); + + # + # low section + # + # get B float up to 64 bit width + tmpE:4 = B:4; + tmpB:8 = float2float( tmpE ); + setSPEFSCR_L( tmpB ); + + # check if less than or equal to positive saturation limit + if ( tmpB f<= tmpS ) goto ; + + # set to saturation + tmpB = tmpS; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + # scale the fractional portion up to integer side of mantissa + tmpB = tmpB f* tmpM; + setSPEFSCRMulFlags_L( tmpB, tmpM, tmpB ); + + # truncate back to integer + tmpL:4 = trunc( tmpB ); + setSPEFSCR_L( tmpL ); + + setSummarySPEFSCR(); + + # + # high section + # + # get B float up to 64 bit width + tmpE = B(4); + tmpB = float2float( tmpE ); + setSPEFSCR_H( tmpB ); + + # check if less than or equal to positive saturation limit + if ( tmpB f<= tmpS ) goto ; + + # set to saturation + tmpB = tmpS; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + # scale the fractional portion up to integer side of mantissa + tmpB = tmpB f* tmpM; + setSPEFSCRMulFlags_H( tmpB, tmpM, tmpB ); + + # truncate back to integer + tmpH:4 = trunc( tmpB ); + setSPEFSCR_H( tmpH ); + + setSummarySPEFSCR(); + + # move results into upper and lower words + tmpZ:8 = zext( tmpH ); + tmpZ = ( tmpZ << 32 ) | zext( tmpL ); + + D = tmpZ; +} + + +# ================================================================= +# Page 567 + +# evfsctui rT,rB +# ISA-cmt: evfsctui - Vector Convert Floating-Point Single-Precision to Unsigned Integer +# ISA-info: evfsctui - Form "EVX" Page 567 Category "SP.FV" +# binutils: mytest.d: 210: 10 20 12 94 evfsctui r1,r2 +:evfsctui D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=660 +{ + # load saturation limit as a float + tmpS:8 = 0x00000000FFFFFFFF; + tmpS = int2float( tmpS ); + setSPEFSCR_L( tmpS ); + + # + # low section + # + tmpE:4 = B:4; + tmpB:8 = float2float( tmpE ); + + # limit to saturation + if ( tmpB f<= tmpS ) goto ; + tmpB = tmpS; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + # round back to integer + tmpL:4 = trunc(round( tmpB )); + setSPEFSCR_L( tmpL ); + + setSummarySPEFSCR(); + + # + # high section + # + tmpE = B(4); + tmpB = float2float( tmpE ); + + # limit to saturation + if ( tmpB f<= tmpS ) goto ; + tmpB = tmpS; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + # round back to integer + tmpH:4 = trunc(round( tmpB )); + setSPEFSCR_H( tmpH ); + + setSummarySPEFSCR(); + + # move results into upper and lower words + tmpZ:8 = zext( tmpH ); + tmpZ = ( tmpZ << 32 ) | zext( tmpL ); + + D = tmpZ; +} + + +# ================================================================= +# Page 567 + +# evfsctuiz rT,rB +# ISA-cmt: evfsctuiz - Vector Convert Floating-Point Single-Precision to Unsigned Integer with Round toward Zero +# ISA-info: evfsctuiz - Form "EVX" Page 567 Category "SP.FV" +# binutils: mytest.d: 220: 10 20 12 98 evfsctuiz r1,r2 +:evfsctuiz D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=664 +{ + # load saturation limit as a float + tmpS:8 = 0x00000000FFFFFFFF; + tmpS = int2float( tmpS ); + setSPEFSCR_L( tmpS ); + + # + # low section + # + tmpE:4 = B:4; + tmpB:8 = float2float( tmpE ); + + # limit to saturation + if ( tmpB f<= tmpS ) goto ; + tmpB = tmpS; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + tmpL:4 = trunc( tmpB ); + + setSummarySPEFSCR(); + + # + # high section + # + tmpE = B(4); + tmpB = float2float( tmpE ); + + # limit to saturation + if ( tmpB f<= tmpS ) goto ; + tmpB = tmpS; + spef_fx = 1; + spef_finxs = 1; + spef_fg = 1; + + + + tmpH:4 = trunc( tmpB ); + + setSummarySPEFSCR(); + + # move results into upper and lower words + tmpZ:8 = zext( tmpH ); + tmpZ = ( tmpZ << 32 ) | zext( tmpL ); + + D = tmpZ; +} + + +# ================================================================= +# Page 562 + +# evfsdiv rT,rA,rB +# ISA-cmt: evfsdiv - Vector Floating-Point Single-Precision Divide +# ISA-info: evfsdiv - Form "EVX" Page 562 Category "SP.FV" +# binutils: mytest.d: 1f0: 10 22 1a 89 evfsdiv r1,r2,r3 +:evfsdiv D,A,B is OP=4 & D & A & B & XOP_0_10=649 +{ + tmpAL:4 = A:4; + tmpAH:4 = A(4); + tmpBL:4 = B:4; + tmpBH:4 = B(4); + + tmpL:4 = tmpAL f/ tmpBL; + setSPEFSCRDivFlags_L( tmpAL, tmpBL, tmpL ); + + tmpH:4 = tmpAH f/ tmpBH; + setSPEFSCRDivFlags_H( tmpAH, tmpBH, tmpH ); + + # move results into upper and lower words + tmpZ:8 = zext( tmpH ); + tmpZ = ( tmpZ << 32 ) | zext( tmpL ); + + D = tmpZ; +} + + +# ================================================================= +# Page 562 + +# evfsmul rT,rA,rB +# ISA-cmt: evfsmul - Vector Floating-Point Single-Precision Multiply +# ISA-info: evfsmul - Form "EVX" Page 562 Category "SP.FV" +# binutils: mytest.d: 1ec: 10 22 1a 88 evfsmul r1,r2,r3 +:evfsmul D,A,B is OP=4 & D & A & B & XOP_0_10=648 +{ + tmpAL:4 = A:4; + tmpAH:4 = A(4); + tmpBL:4 = B:4; + tmpBH:4 = B(4); + + tmpL:4 = tmpAL f* tmpBL; + setSPEFSCRMulFlags_L( tmpAL, tmpBL, tmpL ); + + tmpH:4 = tmpAH f* tmpBH; + setSPEFSCRMulFlags_H( tmpAH, tmpBH, tmpH ); + + # move results into upper and lower words + tmpZ:8 = zext( tmpH ); + tmpZ = ( tmpZ << 32 ) | zext( tmpL ); + + D = tmpZ; +} + + +# ================================================================= +# Page 561 + +# evfsnabs rT,rA +# ISA-cmt: evfsnabs - Vector Floating-Point Single-Precision Negative Absolute Value +# ISA-info: evfsnabs - Form "EVX" Page 561 Category "SP.FV" +# binutils: mytest.d: 1e4: 10 22 02 85 evfsnabs r1,r2 +:evfsnabs D,A is OP=4 & D & A & BITS_11_15=0 & XOP_0_10=645 +{ + tmpAL:4 = A:4; + tmpAH:4 = A(4); + + tmpL:4 = f- ( abs( tmpAL ) ); + + tmpH:4 = f- ( abs( tmpAH ) ); + + # move results into upper and lower words + tmpZ:8 = zext( tmpH ); + tmpZ = ( tmpZ << 32 ) | zext( tmpL ); + + D = tmpZ; +} + + +# ================================================================= +# Page 561 + +# evfsneg rT,rA +# ISA-cmt: evfsneg - Vector Floating-Point Single-Precision Negate +# ISA-info: evfsneg - Form "EVX" Page 561 Category "SP.FV" +# binutils: mytest.d: 1e8: 10 22 02 86 evfsneg r1,r2 +:evfsneg D,A is OP=4 & D & A & BITS_11_15=0 & XOP_0_10=646 +{ + tmpAL:4 = A:4; + tmpAH:4 = A(4); + + tmpL:4 = f-( tmpAL ); + + tmpH:4 = f-( tmpAH ); + + # move results into upper and lower words + tmpZ:8 = zext( tmpH ); + tmpZ = ( tmpZ << 32 ) | zext( tmpL ); + + D = tmpZ; +} + + +# ================================================================= +# Page 562 + +# evfssub rT,rA,rB +# ISA-cmt: evfssub - Vector Floating-Point Single-Precision Subtract +# ISA-info: evfssub - Form "EVX" Page 562 Category "SP.FV" +# binutils: mytest.d: 1dc: 10 22 1a 81 evfssub r1,r2,r3 +:evfssub D,A,B is OP=4 & D & A & B & XOP_0_10=641 +{ + tmpAL:4 = A:4; + tmpAH:4 = A(4); + tmpBL:4 = B:4; + tmpBH:4 = B(4); + + tmpL:4 = tmpAL f- tmpBL; + setSPEFSCRSubFlags_L( tmpAL, tmpBL, tmpL ); + + tmpH:4 = tmpAH f- tmpBH; + setSPEFSCRSubFlags_H( tmpAH, tmpBH, tmpH ); + +} + + +# ================================================================= +# Page 565 + +# evfststeq CRFD,rA,rB +# ISA-cmt: evfststeq - Vector Floating-Point Single-Precision Test Equal +# ISA-info: evfststeq - Form "EVX" Page 565 Category "SP.FV" +# binutils: mytest.d: 230: 10 82 1a 9e evfststeq cr1,r2,r3 +:evfststeq CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=670 +{ + tmpA:4 = A:4; + tmpB:4 = B:4; + tmpC:4 = A(4); + tmpD:4 = B(4); + + tmpL:1 = tmpA f== tmpB; + tmpH:1 = tmpC f== tmpD; + + CRFD = (8 * tmpH ) + (4 * tmpL ) + (2 * (tmpH | tmpL) ) + (tmpH & tmpL); +} + + +# ================================================================= +# Page 564 + +# evfststgt CRFD,rA,rB +# ISA-cmt: evfststgt - Vector Floating-Point Single-Precision Test Greater Than +# ISA-info: evfststgt - Form "EVX" Page 564 Category "SP.FV" +# binutils: mytest.d: 228: 10 82 1a 9c evfststgt cr1,r2,r3 +:evfststgt CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=668 +{ + tmpA:4 = A:4; + tmpB:4 = B:4; + tmpC:4 = A(4); + tmpD:4 = B(4); + + tmpL:1 = tmpA f> tmpB; + tmpH:1 = tmpC f> tmpD; + + CRFD = (8 * tmpH ) + (4 * tmpL ) + (2 * (tmpH | tmpL) ) + (tmpH & tmpL); +} + + +# ================================================================= +# Page 565 + +# evfststlt CRFD,rA,rB +# ISA-cmt: evfststlt - Vector Floating-Point Single-Precision Test Less Than +# ISA-info: evfststlt - Form "EVX" Page 565 Category "SP.FV" +# binutils: mytest.d: 22c: 10 82 1a 9d evfststlt cr1,r2,r3 +:evfststlt CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=669 +{ + tmpA:4 = A:4; + tmpB:4 = B:4; + tmpC:4 = A(4); + tmpD:4 = B(4); + + tmpL:1 = tmpA f< tmpB; + tmpH:1 = tmpC f< tmpD; + + CRFD = (8 * tmpH ) + (4 * tmpL ) + (2 * (tmpH | tmpL) ) + (tmpH & tmpL); +} + + +# ================================================================= +# Page 915 + +# evlddepx rT,rA,rB +# Note: context is not supported +:evlddepx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=799 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + D = *:8(ea); +} + + + +# ================================================================= +# Page 519 + +# evlwhe RT,D(RA) +# evlwhe rT,rA,UI +:evlwhe D,EVUIMM_4_RAt is OP=4 & D & EVUIMM_4_RAt & RA_OR_ZERO & UI & XOP_0_10=785 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + (UI * 4); + + # move results into upper and lower words + tmpZ:8 = zext( *:2(ea + 2) ); + tmpZ = ( tmpZ << 32 ) | zext( *:2(ea) ); + + D = tmpZ; +} + + + +# ================================================================= +# Page 519 + +# evlwhex rT,rA,rB +# ISA-cmt: evlwhex - Vector Load Word into Two Halfwords Even Indexed +# ISA-info: evlwhex - Form "EVX" Page 519 Category "SP" +# binutils: mytest.d: 238: 10 22 1b 10 evlwhex r1,r2,r3 +:evlwhex D,RA_OR_ZERO,B is OP=4 & D & RA_OR_ZERO & B & XOP_0_10=784 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + + # move results into upper and lower words + tmpZ:8 = zext( *:2(ea + 2) ); + tmpZ = ( tmpZ << 32 ) | zext( *:2(ea) ); + + D = tmpZ; +} + + + +# ================================================================= +# Page 521 + +# evlwwsplat RT,D(RA) +# evlwwsplat rT,rA,UI +# ISA-cmt: evlwwsplat - Vector Load Word into Word and Splat +# ISA-info: evlwwsplat - Form "EVX" Page 521 Category "SP" +# binutils: NO-EXAMPLE - evlwwsplat +# collides with maclhwu +:evlwwsplat D,EVUIMM_4_RAt is OP=4 & D & RA_OR_ZERO & EVUIMM_4_RAt & UI & XOP_0_10=793 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + (UI * 4); + + # move results into upper and lower words + tmpZ:8 = zext( *:4(ea) ); + tmpZ = ( tmpZ << 32 ) | zext( *:4(ea) ); + + D = tmpZ; +} + + + +# ================================================================= +# Page 521 + +# evlwwsplatx rT,rA,rB +# ISA-cmt: evlwwsplatx - Vector Load Word into Word and Splat Indexed +# ISA-info: evlwwsplatx - Form "EVX" Page 521 Category "SP" +# binutils: mytest.d: 23c: 10 22 1b 18 evlwwsplatx r1,r2,r3 +# collides with maclhwu +:evlwwsplatx D,RA_OR_ZERO,B is OP=4 & D & RA_OR_ZERO & B & XOP_0_10=792 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + + # move results into upper and lower words + tmpZ:8 = zext( *:4(ea) ); + tmpZ = ( tmpZ << 32 ) | zext( *:4(ea) ); + + D = tmpZ; +} + + +# ================================================================= +# Page 541 + +# evmwlsmiaaw rT,rA,rB +# ISA-cmt: evmwlsmiaaw - Vector Multiply Word Low Signed +# ISA-info: evmwlsmiaaw - Form "EVX" Page 541 Category "SP" +# binutils: mytest.d: 248: 10 22 1d 49 evmwlsmiaaw r1,r2,r3 +:evmwlsmiaaw D,A,B is OP=4 & D & A & B & XOP_0_10=1353 +{ + tmpACCL:4 = ACC:4; + tmpACCH:4 = ACC(4); + + tmpAL:8 = zext( A:4 ); + tmp:4 = A(4); + tmpAH:8 = zext( tmp ); + tmpBL:8 = zext( B:4 ); + tmp = B(4); + tmpBH:8 = zext( tmp ); + + temp:8 = tmpAH * tmpBH; + tmpD:4 = tmpACCH + temp:4; + D = ( zext( tmpD ) ) << 32; + temp = tmpAL * tmpBL; + tmpDL:4 = tmpACCL + temp:4; + D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpDL ); + ACC = D; +} + + +# ================================================================= +# Page 541 + +# evmwlsmianw rT,rA,rB +# ISA-cmt: evmwlsmianw - Vector Multiply Word Low Signed +# ISA-info: evmwlsmianw - Form "EVX" Page 541 Category "SP" +# binutils: mytest.d: 254: 10 22 1d c9 evmwlsmianw r1,r2,r3 +:evmwlsmianw D,A,B is OP=4 & D & A & B & XOP_0_10=1481 +{ + tmpACCL:4 = ACC:4; + tmpACCH:4 = ACC(4); + + tmpAL:8 = zext( A:4 ); + tmp:4 = A(4); + tmpAH:8 = zext( tmp ); + tmpBL:8 = zext( B:4 ); + tmp = B(4); + tmpBH:8 = zext( tmp ); + + temp:8 = tmpAH * tmpBH; + tmpD:4 = tmpACCH - temp:4; + D = ( zext( tmpD ) ) << 32; + temp = tmpAL * tmpBL; + tmpDL:4 = tmpACCL - temp:4; + D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpDL ); + ACC = D; +} + + +# ================================================================= +# Page 541 + +# evmwlssianw rT,rA,rB +# ISA-cmt: evmwlssianw - Vector Multiply Word Low Signed +# ISA-info: evmwlssianw - Form "EVX" Page 541 Category "SP" +# binutils: mytest.d: 250: 10 22 1d c1 evmwlssianw r1,r2,r3 +:evmwlssianw D,A,B is OP=4 & D & A & B & XOP_0_10=1473 +{ + tmpACCL:4 = ACC:4; + tmpACCH:4 = ACC(4); + + tmpAL:8 = zext( A:4 ); + tmp:4 = A(4); + tmpAH:8 = zext( tmp ); + tmpBL:8 = zext( B:4 ); + tmp = B(4); + tmpBH:8 = zext( tmp ); + + temp:8 = tmpAH * tmpBH; + temp = sext( tmpACCH ) - sext( temp:4 ); + tmpOVH:1 = temp[32,1] ^ temp[31,1]; + + # check for saturation + if ( tmpOVH == 0 ) goto ; + + if ( temp[32,1] == 1 ) goto ; + D = ( D & 0x00000000FFFFFFFF ) | 0x7FFFFFFF00000000; + goto ; + + + D = ( D & 0x00000000FFFFFFFF ) | 0x8000000000000000; + goto ; + + + D = ( D & 0x00000000FFFFFFFF ) | ( zext( temp:4 ) << 32 ); + + + + + temp = tmpAL * tmpBL; + temp = sext( tmpACCL ) - sext( temp:4 ); + tmpOVL:1 = temp[32,1] ^ temp[31,1]; + + # check for saturation + if ( tmpOVL == 0 ) goto ; + + if ( temp[32,1] == 1 ) goto ; + D = ( D & 0xFFFFFFFF00000000 ) | 0x000000007FFFFFFF; + goto ; + + + D = ( D & 0xFFFFFFFF00000000 ) | 0x0000000080000000; + goto ; + + + D = ( D & 0xFFFFFFFF00000000 ) | zext( temp:4 ); + + + + + ACC = D; + + spef_ovh = tmpOVH; + spef_ov = tmpOVL; + spef_sovh = spef_sovh | tmpOVH; + spef_sov = spef_sov | tmpOVL; +} + + + +# ================================================================= +# Page 544 + +# evmwsmi rT,rA,rB +# ISA-cmt: evmwsmi - Vector Multiply Word Signed +# ISA-info: evmwsmi - Form "EVX" Page 544 Category "SP" +# binutils: mytest.d: 244: 10 22 1c 59 evmwsmi r1,r2,r3 +# collides with machhwo +:evmwsmi D,A,B is OP=4 & D & A & B & XOP_0_10=1113 +{ + tmpAL:8 = zext( A:4 ); + tmpBL:8 = zext( B:4 ); + + D = tmpAL * tmpBL; +} + + + +# ================================================================= +# Page 544 + +# evmwsmiaa rT,rA,rB +# ISA-cmt: evmwsmiaa - Vector Multiply Word Signed +# ISA-info: evmwsmiaa - Form "EVX" Page 544 Category "SP" +# binutils: mytest.d: 24c: 10 22 1d 59 evmwsmiaa r1,r2,r3 +# collides with macchwo. +:evmwsmiaa D,A,B is OP=4 & D & A & B & XOP_0_10=1369 +{ + tmpAL:8 = zext( A:4 ); + tmpBL:8 = zext( B:4 ); + + temp:8 = tmpAL * tmpBL; + D = ACC + temp; + ACC = D; +} + + + + +# ================================================================= +# Page 544 + +# evmwsmian rT,rA,rB +# ISA-cmt: evmwsmian - Vector Multiply Word Signed +# ISA-info: evmwsmian - Form "EVX" Page 544 Category "SP" +# binutils: mytest.d: 25c: 10 22 1d d9 evmwsmian r1,r2,r3 +# collides with macchwso. +:evmwsmian D,A,B is OP=4 & D & A & B & XOP_0_10=1497 +{ + tmpAL:8 = zext( A:4 ); + tmpBL:8 = zext( B:4 ); + + temp:8 = tmpAL * tmpBL; + D = ACC - temp; + ACC = D; +} + + + +# ================================================================= +# Page 546 + +# evmwumi rT,rA,rB +# ISA-cmt: evmwumi - Vector Multiply Word Unsigned +# ISA-info: evmwumi - Form "EVX" Page 546 Category "SP" +# binutils: mytest.d: 240: 10 22 1c 58 evmwumi r1,r2,r3 +# collides with machhwo +:evmwumi D,A,B is OP=4 & D & A & B & XOP_0_10=1112 +{ + tmpAL:8 = zext( A:4 ); + tmpBL:8 = zext( B:4 ); + + D = tmpAL * tmpBL; +} + + + +# ================================================================= +# Page 547 + +# evmwumian rT,rA,rB +# ISA-cmt: evmwumian - Vector Multiply Word Unsigned +# ISA-info: evmwumian - Form "EVX" Page 547 Category "SP" +# binutils: mytest.d: 258: 10 22 1d d8 evmwumian r1,r2,r3 +# collides with macchwso +:evmwumian D,A,B is OP=4 & D & A & B & XOP_0_10=1496 +{ + tmpAL:8 = zext( A:4 ); + tmpBL:8 = zext( B:4 ); + + temp:8 = tmpAL * tmpBL; + D = ACC - temp; + ACC = D; +} + + +# ================================================================= +# Page 549 + +# evsel rT,rA,rB +# ISA-cmt: evsel - Vector Select +# ISA-info: evsel - Form "EVS" Page 549 Category "SP" +# binutils: mytest.d: 1d4: 10 22 1a 7c evsel r1,r2,r3,cr4 +:evsel D,A,B,BFA is OP=4 & D & A & B & XOP_3_10=79 & BFA +{ + + tmpAL:8 = zext( A:4 ); + tmp:4 = A(4); + tmpAH:8 = zext( tmp ); + tmpBL:8 = zext( B:4 ); + tmp = B(4); + tmpBH:8 = zext( tmp ); + + tmpBFA:1 = BFA; + + if ( tmpBFA[3,1] == 0 ) goto ; + D = ( D & 0x00000000FFFFFFFF ) | ( tmpAH << 32 ); + goto ; + + + D = ( D & 0x00000000FFFFFFFF ) | ( tmpBH << 32 ); + + + + if ( tmpBFA[2,1] == 0 ) goto ; + D = ( D & 0xFFFFFFFF00000000 ) | tmpAL; + goto ; + + + D = ( D & 0xFFFFFFFF00000000 ) | tmpBL; + + +} + + +# ================================================================= +# Page 915 + +# evstddepx rT,rA,rB +# Note: context is not supported +:evstddepx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=927 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *:8(ea) = D; +} + + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/Scalar_SPFP.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/Scalar_SPFP.sinc new file mode 100644 index 00000000..f1c10dec --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/Scalar_SPFP.sinc @@ -0,0 +1,472 @@ +# Based on "EREF: A Reference for Freescale Book E and e500 Core" document version 01/2004 Rev 2.0 +# Instructions that are specific to the (PowerPC) e500 core are implemented as auxiliary processing units (APUs) +# Embedded Vector and Scalar Single-Precision Floating-Point APUs (SPFP APU) + +# There are three versions of e500 core, namely e500v1, the e500v2, and the e500mc. +# A 64-bit evolution of the e500mc core is called e5500 core. +# All PowerQUICC 85xx devices are based on e500v1 or e500v2 cores. + + +# ================================================================= +# Page 408 + +# efsabs rT,rA 010 1100 0100 +#define pcodeop FloatingPointAbsoluteValue; +:efsabs D,A is OP=4 & D & A & XOP_0_10=0x2C4 & BITS_11_15=0 +{ + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( abs( A:4 ) ); +} + +# efsadd rT,rA,rB 010 1100 0000 +#define pcodeop FloatingPointAdd; +:efsadd D,A,B is OP=4 & D & A & B & XOP_0_10=0x2C0 +{ + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( A:4 f+ B:4 ); + setFPAddFlags( A:4, B:4, D:4 ); +} + +# ================================================================= +# Page 410 + +# efscfsf rT,rB 010 1101 0011 +#define pcodeop ConvertFloatingPointFromSignedFraction; +:efscfsf D,B is OP=4 & D & B & XOP_0_10=0x2D3 & BITS_16_20=0 +{ + # load fractional divisor as a float + tmpA:4 = 0x80000000; + tmpA = int2float( tmpA ); + setFPRF( tmpA ); + + # check if negative + if ( ( B:4 & 0x80000000 ) != 0 ) goto ; + + # float the fractional portion of register B + tmpB:4 = int2float( B:4 ); + setFPRF( tmpB ); + tmpB = tmpB f/ tmpA; + setFPDivFlags( tmpB, tmpA, tmpB ); + + goto ; + + + + # float the fractional portion of register B, 2's complement negate + tmpB = int2float( -( B:4 ) ); + setFPRF( tmpB ); + tmpB = tmpB f/ tmpA; + setFPDivFlags( tmpB, tmpA, tmpB ); + + # negate the float + tmpB = f-( tmpB ); + setFPRF( tmpB ); + + + + setSummaryFPSCR(); + + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB ); +} + +# efscfsi rT,rB 010 1101 0001 +#define pcodeop ConvertFloatingPointFromSignedInteger; +:efscfsi D,B is OP=4 & D & B & XOP_0_10=0x2D1 & BITS_16_20=0 +{ + # check if negative + if ( ( B:4 & 0x80000000 ) != 0 ) goto ; + + # float the integer portion of register B + tmpB:4 = int2float( B:4 ); + setFPRF( tmpB ); + + goto ; + + + + # float the integer portion of register B, 2's complement negate + tmpB = int2float( -( B:4 ) ); + setFPRF( tmpB ); + + # negate the float + tmpB = f-( tmpB ); + setFPRF( tmpB ); + + + + setSummaryFPSCR(); + + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB ); +} + +# efscfuf rT,rB 010 1101 0010 +define pcodeop ConvertFloatingPointFromUnsignedFraction; +:efscfuf D,B is OP=4 & D & B & XOP_0_10=0x2D2 & BITS_16_20=0 +{ + # load fractional divisor as a float + tmpA:8 = 0x0000000100000000; + tmpA = int2float( tmpA ); + setFPRF( tmpA ); + + # float the fractional portion of register B + tmpB:8 = int2float( B:4 ); + setFPRF( tmpB ); + tmpB = tmpB f/ tmpA; + setFPDivFlags( tmpB, tmpA, tmpB ); + + tmpC:4 = float2float( tmpB ); + setFPRF( tmpC ); + + setSummaryFPSCR(); + + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpC ); +} + +# rT,rB 010 1101 0000 +#define pcodeop ConvertFloatingPointFromUnsignedInteger; +:efscfui D,B is OP=4 & D & B & XOP_0_10=0x2D0 & BITS_16_20=0 +{ + tmp:4 = int2float( B:4 ); + setFPRF( tmp ); + + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( tmp ); + setSummaryFPSCR(); +} + +# efscmpeq CRFD,rA,rB 010 1100 1110 +#define pcodeop FloatingPointCompareEqual; +:efscmpeq CRFD,A,B is OP=4 & CRFD & A & B & XOP_0_10=0x2CE & BITS_21_22=0 +{ + CRFD[2,1] = A:4 f== B:4; +} + +# ================================================================= +# Page 415 + +# efscmpgt CRFD,rA,rB 010 1100 1100 +#define pcodeop FloatingPointCompareGreaterThan; +:efscmpgt CRFD,A,B is OP=4 & CRFD & A & B & XOP_0_10=0x2CC & BITS_21_22=0 +{ + CRFD[2,1] = A:4 f> B:4; +} + +# efscmplt CRFD,rA,rB 010 1100 1101 +#define pcodeop FloatingPointCompareLessThan; +:efscmplt CRFD,A,B is OP=4 & CRFD & A & B & XOP_0_10=0x2CD & BITS_21_22=0 +{ + CRFD[2,1] = A:4 f< B:4; +} + +# efsctsf rT,rB 010 1101 0111 +#define pcodeop ConvertFloatingPointToSignedFraction; +:efsctsf D,B is OP=4 & D & B & XOP_0_10=0x2D7 & BITS_16_20=0 +{ + # multiply by 0x0000 0000 8000 0000 to scale the fraction up to integer range + + # load fractional multiplier as a float + tmpM:8 = 0x0000000080000000; + tmpM = int2float( tmpM ); + setFPRF( tmpM ); + + # load saturation limit as a float + tmpL:8 = 0x0000000080000000 - 1; + tmpL = int2float( tmpL ); + setFPRF( tmpL ); + + # scale the saturation limit to a fractional float + tmpL = tmpL f/ tmpM; + setFPDivFlags( tmpL, tmpM, tmpL ); + + # get B float up to 64 bit width + tmpB:8 = float2float( B:4 ); + setFPRF( tmpB ); + + # check if less than or equal to positive saturation limit + if ( tmpB f<= tmpL ) goto ; + + # set to positive saturation + tmpB = tmpL; + + goto ; + + + + # check if greater than or equal to negative saturation limit + tmpL = f-( tmpL ); + if ( tmpB f>= tmpL ) goto ; + + # set to negative saturation + tmpB = tmpL; + + + + # scale the fractional portion up to integer side of mantissa + tmpB = tmpB f* tmpM; + setFPMulFlags( tmpB, tmpM, tmpB ); + + # truncate back to signed fraction format + tmpC:4 = trunc( tmpB ); + setFPRF( tmpB ); + + setSummaryFPSCR(); + + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpC ); +} + + +# efsctsi rT,rB 010 1101 0101 +#define pcodeop ConvertFloatingPointToSignedInteger; +:efsctsi D,B is OP=4 & D & B & XOP_0_10=0x2D5 & BITS_16_20=0 +{ + # create zero float constant + tmpA:4 = 0; + tmpA = int2float( tmpA ); + + # check if negative + if ( B:4 f< tmpA ) goto ; + + tmpB:8 = trunc(round( B:4 )); + setFPRF( tmpB ); + + # limit to positive saturation + if ( tmpB <= 0x000000007FFFFFFF ) goto ; + tmpB = 0x000000007FFFFFFF; + + + + goto ; + + + + # negate the float + tmpB = trunc(round( f-( B:4 ) )); + setFPRF( tmpB ); + + # limit to negative saturation + if ( tmpB <= 0x0000000080000000 ) goto ; + tmpB = 0x0000000080000000; + + + + # negate the signed int + tmpB = -( tmpB ); + + + + setSummaryFPSCR(); + + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB:4 ); +} + +# efsctsiz rT,rB 010 1101 1010 +#define pcodeop ConvertFloatingPointToSignedIntegerWithRoundTowardZero; +:efsctsiz D,B is OP=4 & D & B & XOP_0_10=0x2DA & BITS_16_20=0 +{ + # create zero float constant + tmpA:4 = 0; + tmpA = int2float( tmpA ); + + # check if negative + if ( B:4 f< tmpA ) goto ; + + tmpB:8 = trunc( B:4 ); + setFPRF( tmpB ); + + # limit to saturation + if ( tmpB <= 0x000000007FFFFFFF ) goto ; + tmpB = 0x000000007FFFFFFF; + + + + goto ; + + + + # negate the float + tmpB = trunc( f-( B:4 ) ); + setFPRF( tmpB ); + + # limit to saturation + if ( tmpB <= 0x0000000080000000 ) goto ; + tmpB = 0x0000000080000000; + + + + # negate the signed int + tmpB = -( tmpB ); + + + + setSummaryFPSCR(); + + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB:4 ); +} + +# ================================================================= +# Page 420 + +# efsctuf rT,rB 010 1101 0110 +#define pcodeop ConvertFloatingPointToUnsignedFraction; +:efsctuf D,B is OP=4 & D & B & XOP_0_10=0x2D6 & BITS_16_20=0 +{ + # multiply by 0x0000 0001 0000 0000 to scale the fraction up to integer range + + # load fractional multiplier as a float + tmpM:8 = 0x0000000100000000; + tmpM = int2float( tmpM ); + setFPRF( tmpM ); + + # load saturation limit as a float + tmpL:8 = 0x0000000100000000 - 1; + tmpL = int2float( tmpL ); + setFPRF( tmpL ); + + # scale the saturation limit to a fractional float + tmpL = tmpL f/ tmpM; + setFPDivFlags( tmpL, tmpM, tmpL ); + + # get B float up to 64 bit width + tmpB:8 = float2float( B:4 ); + setFPRF( tmpB ); + + # check if less than or equal to positive saturation limit + if ( tmpB f<= tmpL ) goto ; + + # set to saturation + tmpB = tmpL; + + + + # scale the fractional portion up to integer side of mantissa + tmpB = tmpB f* tmpM; + setFPMulFlags( tmpB, tmpM, tmpB ); + + # truncate back to integer + tmpC:4 = trunc( tmpB ); + setFPRF( tmpC ); + + setSummaryFPSCR(); + + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpC ); +} + +# efsctui rT,rB 010 1101 0100 +#define pcodeop ConvertFloatingPointToUnsignedInteger; +:efsctui D,B is OP=4 & D & B & XOP_0_10=0x2D4 & BITS_16_20=0 +{ + tmpB:8 = trunc(round( B:4 )); + setFPRF( tmpB ); + + # limit to saturation + if ( tmpB <= 0x000000007FFFFFFF ) goto ; + tmpB = 0x000000007FFFFFFF; + + + + setSummaryFPSCR(); + + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB:4 ); +} + +# efsctuiz rT,rB 010 1101 1000 +#define pcodeop ConvertFloatingPointToUnsignedIntegerWithRoundTowardZero; +:efsctuiz D,B is OP=4 & D & B & XOP_0_10=0x2D8 & BITS_16_20=0 +{ + tmpB:8 = trunc( B:4 ); + setFPRF( tmpB ); + + # limit to saturation + if ( tmpB <= 0x000000007FFFFFFF ) goto ; + tmpB = 0x000000007FFFFFFF; + + + + setSummaryFPSCR(); + + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB:4 ); +} + +# efsdiv rT,rA,rB 010 1100 1001 +#define pcodeop FloatingPointDivide; +:efsdiv D,A,B is OP=4 & D & A & B & XOP_0_10=0x2C9 +{ + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( A:4 f/ B:4 ); + setFPDivFlags( A:4, B:4, D:4 ); +} + +# efsmul rT,rA,rB 010 1100 1000 +#define pcodeop FloatingPointMultiply; +:efsmul D,A,B is OP=4 & D & A & B & XOP_0_10=0x2C8 +{ + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( A:4 f* B:4 ); + setFPMulFlags( A:4, B:4, D:4 ); +} + +# ================================================================= +# Page 425 + +# efsnabs rT,rA 010 1100 0101 +#define pcodeop FloatingPointNegativeAbsoluteValue; +:efsnabs D,A is OP=4 & D & A & XOP_0_10=0x2C5 & BITS_11_15=0 +{ + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( f- ( abs( A:4 ) ) ); + setFPRF( D:4 ); + setSummaryFPSCR(); +} + +# efsneg rT,rA 010 1100 0110 +#define pcodeop FloatingPointNegate; +:efsneg D,A is OP=4 & D & A & XOP_0_10=0x2C6 & BITS_11_15=0 +{ + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( f-( A:4 ) ); + setFPRF( D:4 ); + setSummaryFPSCR(); +} + +# efssub rT,rA,rB 010 1100 0001 +#define pcodeop FloatingPointSubtract; +:efssub D,A,B is OP=4 & D & A & B & XOP_0_10=0x2C1 +{ + # assign to lower word of D + D = ( D & 0xFFFFFFFF00000000 ) | zext( A:4 f- B:4 ); + setFPSubFlags( A:4, B:4, D:4 ); + setSummaryFPSCR(); +} + +# efststeq CRFD,rA,rB 010 1101 1110 +#define pcodeop FloatingPointTestEqual; +:efststeq CRFD,A,B is OP=4 & CRFD & A & B & XOP_0_10=0x2DE & BITS_21_22=0 +{ + CRFD[2,1] = A:4 f== B:4; +} + +# efststgt CRFD,rA,rB 010 1101 1100 +#define pcodeop FloatingPointTestGreaterThan; +:efststgt CRFD,A,B is OP=4 & CRFD & A & B & XOP_0_10=0x2DC & BITS_21_22=0 +{ + CRFD[2,1] = A:4 f> B:4; +} + +# ================================================================= +# Page 430 + +# efststlt CRFD,rA,rB 010 1101 1101 +#define pcodeop FloatingPointTestLessThan; +:efststlt CRFD,A,B is OP=4 & CRFD & A & B & XOP_0_10=0x2DD & BITS_21_22=0 +{ + CRFD[2,1] = A:4 f< B:4; +} diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/altivec.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/altivec.sinc new file mode 100644 index 00000000..7f4b339b --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/altivec.sinc @@ -0,0 +1,1852 @@ +# altivec pcodes are stubbed out with pseudocode calls +define pcodeop dataStreamStop; +define pcodeop dataStreamStopAll; +define pcodeop dataStreamTouch; +define pcodeop dataStreamTouchSoon; +define pcodeop dataStreamTouchForStore; +define pcodeop dataStreamTouchForStoreTransient; +define pcodeop loadVectorElementByteIndexed; +define pcodeop loadVectorElementHalfWordIndexed; +define pcodeop loadVectorElementWordIndexed; +define pcodeop loadVectorForShiftLeft; +define pcodeop loadVectorForShiftRight; +define pcodeop loadVectorIndexed; +define pcodeop loadVectorIndexedLRU; +define pcodeop moveFromVectorStatusAndControlRegister; +define pcodeop moveToVectorStatusAndControlRegister; +define pcodeop storeVectorElementByteIndexed; +define pcodeop storeVectorElementHalfWordIndexed; +define pcodeop storeVectorElementWordIndexed; +define pcodeop storeVectorIndexed; +define pcodeop storeVectorIndexedLRU; +define pcodeop vectorAddCarryoutUnsignedWord; +define pcodeop vectorAddFloatingPoint; +define pcodeop vectorAddSignedByteSaturate; +define pcodeop vectorAddSignedHalfWordSaturate; +define pcodeop vectorAddSignedWordSaturate; +define pcodeop vectorAddUnsignedByteSaturate; +define pcodeop vectorAddUnsignedHalfWordModulo; +define pcodeop vectorAddUnsignedHalfWordSaturate; +define pcodeop vectorAddUnsignedWordSaturate; +define pcodeop vectorLogicalAnd; +define pcodeop vectorLogicalAndWithComplement; +define pcodeop vectorAverageSignedByte; +define pcodeop vectorAverageSignedHalfWord; +define pcodeop vectorAverageSignedWord; +define pcodeop vectorAverageUnsignedByte; +define pcodeop vectorAverageUnsignedHalfWord; +define pcodeop vectorAverageUnsignedWord; +define pcodeop vectorConvertFromSignedFixedPointWord; +define pcodeop vectorConvertFromUnsignedFixedPointWord; +define pcodeop vectorCompareBoundsFloatingPoint; +define pcodeop vectorCompareEqualToFloatingPoint; +define pcodeop vectorCompareEqualToUnsignedByte; +define pcodeop vectorCompareEqualToUnsignedHalfWord; +define pcodeop vectorCompareEqualToUnsignedWord; +define pcodeop vectorCompareGreaterThanOrEqualToFloatingPoint; +define pcodeop vectorCompareGreaterThanFloatingPoint; +define pcodeop vectorCompareGreaterThanSignedByte; +define pcodeop vectorCompareGreaterThanConditionRegisterSignedHalfWord; +define pcodeop vectorCompareGreaterThanSignedWord; +define pcodeop vectorCompareGreaterThanUnsignedByte; +define pcodeop vectorCompareGreaterThanUnsignedHalfWord; +define pcodeop vectorCompareGreaterThanUnsignedWord; +define pcodeop vectorConvertToSignedFixedPointWordSaturate; +define pcodeop vectorConvertToUnsignedFixedPointWordSaturate; +define pcodeop vector2RaisedToTheExponentEstimateFloatingPoint; +define pcodeop vectorLog2EstimateFloatingPoint; +define pcodeop vectorMultiplyAddFloatingPoint; +define pcodeop vectorMaximumFloatingPoint; +define pcodeop vectorMaximumSignedByte; +define pcodeop vectorMaximumSignedHalfWord; +define pcodeop vectorMaximumSignedWord; +define pcodeop vectorMaximumUnsignedByte; +define pcodeop vectorMaximumUnsignedHalfWord; +define pcodeop vectorMaximumUnsignedWord; +define pcodeop vectorMultiplyHighAndAddSignedHalfWordSaturate; +define pcodeop vectorMultiplyHighRoundAndAddSignedHalfWordSaturate; +define pcodeop vectorMinimumFloatingPoint; +define pcodeop vectorMinimumSignedByte; +define pcodeop vectorMinimumSignedHalfWord; +define pcodeop vectorMinimumSignedWord; +define pcodeop vectorMinimumUnsignedByte; +define pcodeop vectorMinimumUnsignedHalfWord; +define pcodeop vectorMinimumUnsignedWord; +define pcodeop vectorMultiplyLowAndAddUnsignedHalfWordModulo; +define pcodeop vectorMergeHighByte; +define pcodeop vectorMergeHighHalfWord; +define pcodeop vectorMergeHighWord; +define pcodeop vectorMergeLowByte; +define pcodeop vectorMergeLowHalfWord; +define pcodeop vectorMergeLowWord; +define pcodeop vectorMultiplySumMixedSignByteModulo; +define pcodeop vectorMultiplySumSignedHalfWordModulo; +define pcodeop vectorMultiplySumSignedHalfWordSaturate; +define pcodeop vectorMultiplySumUnsignedByteModulo; +define pcodeop vectorMultiplySumUnsignedHalfWordModulo; +define pcodeop vectorMultiplySumUnsignedHalfWordSaturate; +define pcodeop vectorMultiplyEvenSignedByte; +define pcodeop vectorMultiplyEvenSignedHalfWord; +define pcodeop vectorMultiplyEvenUnsignedByte; +define pcodeop vectorMultiplyEvenUnsignedHalfWord; +define pcodeop vectorMultiplyOddSignedByte; +define pcodeop vectorMultiplyOddSignedHalfWord; +define pcodeop vectorMultiplyOddUnsignedByte; +define pcodeop vectorMultiplyOddUnsignedHalfWord; +define pcodeop vectorNegativeMultiplySubtractFloatingPoint; +define pcodeop vectorLogicalNOR; +define pcodeop vectorLogicalOR; +define pcodeop vectorPackPixel32; +define pcodeop vectorPackSignedHalfWordSignedSaturate; +define pcodeop vectorPackSignedHalfWordUnsignedSaturate; +define pcodeop vectorPackSignedWordSignedSaturate; +define pcodeop vectorPackSignedWordUnsignedSaturate; +define pcodeop vectorPackUnsignedHalfWordUnsignedModulo; +define pcodeop vectorPackUnsignedHalfWordUnsignedSaturate; +define pcodeop vectorPackUnsignedWordUnsignedModulo; +define pcodeop vectorPackUnsignedWordUnsignedSaturate; +define pcodeop vectorReciprocalEstimateFloatingPoint; +define pcodeop vectorRoundToFloatingPointIntegerTowardMinusInfinity; +define pcodeop vectorRoundToFloatingPointIntegerNearest; +define pcodeop vectorRoundToFloatingPointIntegerTowardPluInfinity; +define pcodeop vectorRoundToFloatingPointIntegerTowardZero; +define pcodeop vectorRotateLeftIntegerByte; +define pcodeop vectorRotateLeftIntegerHalfWord; +define pcodeop vectorRotateLeftIntegerWord; +define pcodeop vectorReciprocalSquareRootEstimateFloatingPoint; +define pcodeop vectorConditionalSelect; +define pcodeop vectorShiftLeft; +define pcodeop vectorShiftLeftIntegerByte; +define pcodeop vectorShiftLeftDoubleByOctetImmediate; +define pcodeop vectorShiftLeftIntegerHalfWord; +define pcodeop vectorShiftLeftByOctet; +define pcodeop vectorShiftLeftIntegerWord; +define pcodeop vectorSplatByte; +define pcodeop vectorSplatHalfWord; +define pcodeop vectorSplatImmediateSignedByte; +define pcodeop vectorSplatImmediateSignedHalfWord; +define pcodeop vectorSplatImmediateSignedWord; +define pcodeop vectorSplatWord; +define pcodeop vectorShiftRight; +define pcodeop vectorShiftRightAlgebraicByte; +define pcodeop vectorShiftRightAlgebraicHalfWord; +define pcodeop vectorShiftRightAlgebraicWord; +define pcodeop vectorShiftRightByte; +define pcodeop vectorShiftRightHalfWord; +define pcodeop vectorShiftRightByOctet; +define pcodeop vectorShiftRightWord; +define pcodeop vectorSubtractCarryoutUnsignedWord; +define pcodeop vectorSubtractFloatingPoint; +define pcodeop vectorSubtractSignedByteSaturate; +define pcodeop vectorSubtractSignedHalfWordSaturate; +define pcodeop vectorSubtractSignedWordSaturate; +define pcodeop vectorSubtractUnsignedByteModulo; +define pcodeop vectorSubtractUnsignedByteSaturate; +define pcodeop vectorSubtractUnsignedHalfWordSaturate; +define pcodeop vectorSubtractUnsignedWordModulo; +define pcodeop vectorSubtractUnsignedWordSaturate; +define pcodeop vectorSumAcrossSignedWordSaturate; +define pcodeop vectorSumAcrossPartialSignedWordSaturate; +define pcodeop vectorSumAcrossPartialSignedByteSaturate; +define pcodeop vectorSumAcrossPartialSignedHalfWordSaturate; +define pcodeop vectorSumAcrossPartialUnsignedByteSaturate; +define pcodeop vectorUnpackHighPixel16; +define pcodeop vectorUnpackHighSignedByte; +define pcodeop vectorUnpackHighSignedHalfWord; +define pcodeop vectorUnpackLowPixel16; +define pcodeop vectorUnpackLowSignedByte; +define pcodeop vectorUnpackLowSignedHalfWord; + +# dss +:dss STRM is $(NOTVLE) & OP=31 & BIT_25=0 & BITS_23_24=0 & STRM & BITS_16_20=0 & BITS_11_15=0 & XOP_1_10=822 & Rc=0 +{ + dataStreamStop(STRM:1); +} + +# dssall +:dssall STRM is $(NOTVLE) & OP=31 & BIT_25=1 & BITS_23_24=0 & STRM & BITS_16_20=0 & BITS_11_15=0 & XOP_1_10=822 & Rc=0 +{ + dataStreamStopAll(STRM:1); +} + +:dst A,B,STRM is $(NOTVLE) & OP=31 & BIT_25=0 & BITS_23_24=0 & STRM & A & B & XOP_1_10=342 & Rc=0 +{ + dataStreamTouch(A,B,STRM:1); +} + +:dstt A,B,STRM is $(NOTVLE) & OP=31 & BIT_25=1 & BITS_23_24=0 & STRM & A & B & XOP_1_10=342 & Rc=0 +{ + dataStreamTouchSoon(A,B,STRM:1); +} + +:dstst A,B,STRM is $(NOTVLE) & OP=31 & BIT_25=0 & BITS_23_24=0 & STRM & A & B & XOP_1_10=374 & Rc=0 +{ + dataStreamTouchForStore(A,B,STRM:1); +} + +:dststt A,B,STRM is $(NOTVLE) & OP=31 & BIT_25=1 & BITS_23_24=0 & STRM & A & B & XOP_1_10=374 & Rc=0 +{ + dataStreamTouchForStoreTransient(A,B,STRM:1); +} + +:lvebx vrD,RA_OR_ZERO,B is OP=31 & vrD & RA_OR_ZERO & B & XOP_1_10=7 & Rc=0 +{ + tmp:$(REGISTER_SIZE) = (RA_OR_ZERO + B); + tmpb:1 = *[ram]:1 tmp; + eb:1 = tmp[0,4]; +# This looks backwards from what the manual says, but it's ok since byte 0 in the manual is MSB +# where as for us byte 0 is LSB +@if ENDIAN == "big" + eb = 0xF - eb; +@endif + eb = eb * 8; + vrD = (zext(tmpb) << eb); + #vrD = loadVectorElementByteIndexed(A,B); +} + +:lvehx vrD,A,B is OP=31 & vrD & A & B & XOP_1_10=39 & Rc=0 +{ # TODO defintion + vrD = loadVectorElementHalfWordIndexed(A,B); +} + +:lvewx vrD,A,B is OP=31 & vrD & A & B & XOP_1_10=71 & Rc=0 +{ # TODO definition + vrD = loadVectorElementWordIndexed(A,B); +} + +:lvsl vrD,A,B is OP=31 & vrD & A & B & XOP_1_10=6 & Rc=0 +{ # TODO definition + vrD = loadVectorForShiftLeft(A,B); +} + +:lvsr vrD,RA_OR_ZERO,B is OP=31 & vrD & RA_OR_ZERO & B & XOP_1_10=38 & Rc=0 +{ + tmp:$(REGISTER_SIZE) = (RA_OR_ZERO + B); + eb:1 = tmp[0,4]; + eb = eb * 8; + srca:32=0x0001020304050607; + srcb:32=0x08090a0b0c0d0e0f; + srcc:32=0x1011121314151617; + srcd:32=0x18191a1b1c1d1e1f; + src:32 = (srca << 192) | (srcb << 128) | (srcc << 64) | srcd; + src = src >> eb; + vrD = src:16; +} + +:lvx vrD,RA_OR_ZERO,B is OP=31 & vrD & RA_OR_ZERO & B & XOP_1_10=103 & Rc=0 +{ +# vrD = loadVectorIndexed(A,B); + build RA_OR_ZERO; + tmp:$(REGISTER_SIZE) = (RA_OR_ZERO + B) & 0xfffffffffffffff0; + vrD = *[ram]:16 tmp; +} + +:lvxl vrD,A,B is OP=31 & vrD & A & B & XOP_1_10=359 & Rc=0 +{ # TODO definition + vrD = loadVectorIndexedLRU(A,B); +} + +:mfvscr vrD is OP=4 & vrD & vrAR=0 & vrBR=0 & XOP_1_10=770 & Rc=0 +{ # TODO definition + vrD = moveFromVectorStatusAndControlRegister(); +} + +:mtvscr vrB is OP=4 & vrDR=0 & vrAR=0 & vrB & XOP_1_10=802 & Rc=0 +{ # TODO definition + moveToVectorStatusAndControlRegister(vrB); +} + +:stvebx vrS,RA_OR_ZERO,B is OP=31 & vrS & RA_OR_ZERO & B & XOP_1_10=135 & Rc=0 +{ # TODO definition + EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *[ram]:1 EA = storeVectorElementByteIndexed(vrS,RA_OR_ZERO,B); +} + +:stvehx vrS,RA_OR_ZERO,B is OP=31 & vrS & RA_OR_ZERO & B & XOP_1_10=167 & Rc=0 +{ # TODO definition + EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *[ram]:2 EA = storeVectorElementHalfWordIndexed(vrS,RA_OR_ZERO,B); +} + +:stvewx vrS,RA_OR_ZERO,B is OP=31 & vrS & RA_OR_ZERO & B & XOP_1_10=199 & Rc=0 +{ # TODO definition + EA:$(REGISTER_SIZE) = (RA_OR_ZERO + B) & 0xfffffffffffffffc; + *[ram]:4 EA = storeVectorElementWordIndexed(vrS,RA_OR_ZERO,B); +} + +:stvx vrS,RA_OR_ZERO,B is OP=31 & vrS & B & RA_OR_ZERO & XOP_1_10=231 & Rc=0 +{ + tmp:$(REGISTER_SIZE) = (RA_OR_ZERO + B) & 0xfffffffffffffff0; + *[ram]:16 tmp = vrS; +} + +:stvxl vrS,RA_OR_ZERO,B is OP=31 & vrS & B & RA_OR_ZERO & XOP_1_10=487 & Rc=0 +{ # TODO definition + tmp:$(REGISTER_SIZE) = (RA_OR_ZERO + B) & 0xfffffffffffffff0; + *[ram]:16 tmp = vrS; + # mark_as_not_likely_to_be_needed_again_anytime_soon(tmp); +} + +:vaddcuw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=384 +{ # TODO definition + vrD = vectorAddCarryoutUnsignedWord(vrA,vrB); +} + +:vaddfp vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=10 +{ # TODO definition + vrD = vectorAddFloatingPoint(vrA,vrB); +} + +:vaddsbs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=768 +{ # TODO definition + vrD = vectorAddSignedByteSaturate(vrA,vrB); +} + +:vaddshs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=832 +{ # TODO definition + vrD = vectorAddSignedHalfWordSaturate(vrA,vrB); +} + +:vaddsws vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=896 +{ # TODO definition + vrD = vectorAddSignedWordSaturate(vrA,vrB); +} + +vaddubm_part1: is vrA_8_0 & vrA_8_1 & vrA_8_2 & vrA_8_3 & vrA_8_4 & vrA_8_5 & vrA_8_6 & vrA_8_7 + & vrB_8_0 & vrB_8_1 & vrB_8_2 & vrB_8_3 & vrB_8_4 & vrB_8_5 & vrB_8_6 & vrB_8_7 + & vrD_8_0 & vrD_8_1 & vrD_8_2 & vrD_8_3 & vrD_8_4 & vrD_8_5 & vrD_8_6 & vrD_8_7 +{ + vrD_8_0 = vrA_8_0 + vrB_8_0; + vrD_8_1 = vrA_8_1 + vrB_8_1; + vrD_8_2 = vrA_8_2 + vrB_8_2; + vrD_8_3 = vrA_8_3 + vrB_8_3; + vrD_8_4 = vrA_8_4 + vrB_8_4; + vrD_8_5 = vrA_8_5 + vrB_8_5; + vrD_8_6 = vrA_8_6 + vrB_8_6; + vrD_8_7 = vrA_8_7 + vrB_8_7; +} + +vaddubm_part2: is vrA_8_8 & vrA_8_9 & vrA_8_10 & vrA_8_11 & vrA_8_12 & vrA_8_13 & vrA_8_14 & vrA_8_15 + & vrB_8_8 & vrB_8_9 & vrB_8_10 & vrB_8_11 & vrB_8_12 & vrB_8_13 & vrB_8_14 & vrB_8_15 + & vrD_8_8 & vrD_8_9 & vrD_8_10 & vrD_8_11 & vrD_8_12 & vrD_8_13 & vrD_8_14 & vrD_8_15 +{ + vrD_8_8 = vrA_8_8 + vrB_8_8; + vrD_8_9 = vrA_8_9 + vrB_8_9; + vrD_8_10 = vrA_8_10 + vrB_8_10; + vrD_8_11 = vrA_8_11 + vrB_8_11; + vrD_8_12 = vrA_8_12 + vrB_8_12; + vrD_8_13 = vrA_8_13 + vrB_8_13; + vrD_8_14 = vrA_8_14 + vrB_8_14; + vrD_8_15 = vrA_8_15 + vrB_8_15; +} + +# A bug in sleigh compiler forces us to keep the number of imported symbols less than 35 (it slows to a halt pass there), that is why we have vaddubm_part1 & vaddubm_part2 +:vaddubm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=0 & vaddubm_part1 & vaddubm_part2 +{ +} + +:vaddubs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=512 +{ # TODO definition + vrD = vectorAddUnsignedByteSaturate(vrA,vrB); +} + +:vadduhm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=64 + & vrA_16_0 & vrA_16_1 & vrA_16_2 & vrA_16_3 & vrA_16_4 & vrA_16_5 & vrA_16_6 & vrA_16_7 + & vrB_16_0 & vrB_16_1 & vrB_16_2 & vrB_16_3 & vrB_16_4 & vrB_16_5 & vrB_16_6 & vrB_16_7 + & vrD_16_0 & vrD_16_1 & vrD_16_2 & vrD_16_3 & vrD_16_4 & vrD_16_5 & vrD_16_6 & vrD_16_7 +{ + vrD_16_0 = vrA_16_0 + vrB_16_0; + vrD_16_1 = vrA_16_1 + vrB_16_1; + vrD_16_2 = vrA_16_2 + vrB_16_2; + vrD_16_3 = vrA_16_3 + vrB_16_3; + vrD_16_4 = vrA_16_4 + vrB_16_4; + vrD_16_5 = vrA_16_5 + vrB_16_5; + vrD_16_6 = vrA_16_6 + vrB_16_6; + vrD_16_7 = vrA_16_7 + vrB_16_7; +} + +:vadduhs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=576 +{ # TODO definition + vrD = vectorAddUnsignedHalfWordSaturate(vrA,vrB); +} + +:vadduwm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=128 + & vrA_32_0 & vrA_32_1 & vrA_32_2 & vrA_32_3 + & vrB_32_0 & vrB_32_1 & vrB_32_2 & vrB_32_3 + & vrD_32_0 & vrD_32_1 & vrD_32_2 & vrD_32_3 +{ + vrD_32_0 = vrA_32_0 + vrB_32_0; + vrD_32_1 = vrA_32_1 + vrB_32_1; + vrD_32_2 = vrA_32_2 + vrB_32_2; + vrD_32_3 = vrA_32_3 + vrB_32_3; +} + +# Collides with vadduws +# :vadduws vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=640 +# { # TODO definition +# vrD = vectorAddUnsignedWordSaturate(vrA,vrB); +# } + +:vand vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1028 +{ # TODO definition + vrD = vectorLogicalAnd(vrA,vrB); +} + +:vandc vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1092 +{ # TODO definition + vrD = vectorLogicalAndWithComplement(vrA,vrB); +} + +:vavgsb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1282 +{ # TODO definition + vrD = vectorAverageSignedByte(vrA,vrB); +} + +:vavgsh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1346 +{ # TODO definition + vrD = vectorAverageSignedHalfWord(vrA,vrB); +} + +:vavgsw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1410 +{ # TODO definition + vrD = vectorAverageSignedWord(vrA,vrB); +} + +:vavgub vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1026 +{ # TODO definition + vrD = vectorAverageUnsignedByte(vrA,vrB); +} + +:vavguh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1090 +{ # TODO definition + vrD = vectorAverageUnsignedHalfWord(vrA,vrB); +} + +:vavguw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1154 +{ # TODO definition + vrD = vectorAverageUnsignedWord(vrA,vrB); +} + +:vcfsx vrD,vrB,A_BITS is OP=4 & vrD & A_BITS & vrB & XOP_0_10=842 +{ # TODO definition + vrD = vectorConvertFromSignedFixedPointWord(vrB,A_BITS:1); +} + +:vcfux vrD,vrB,A_BITS is OP=4 & vrD & A_BITS & vrB & XOP_0_10=778 +{ # TODO definition + vrD = vectorConvertFromUnsignedFixedPointWord(vrB,A_BITS:1); +} + +:vcmpbfp vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=966 +{ # TODO definition + vrD = vectorCompareBoundsFloatingPoint(vrA,vrB); +} + +:vcmpbfp. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=966 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareBoundsFloatingPoint(vrA,vrB); +} + +:vcmpeqfp vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=198 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareEqualToFloatingPoint(vrA,vrB); +} + +:vcmpeqfp. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=198 +{ # TODO definition + vrD = vectorCompareEqualToFloatingPoint(vrA,vrB); +} + +:vcmpequb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=6 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareEqualToUnsignedByte(vrA,vrB); +} + +:vcmpequb. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=6 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareEqualToUnsignedByte(vrA,vrB); +} + +:vcmpequh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=70 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareEqualToUnsignedHalfWord(vrA,vrB); +} + +:vcmpequh. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=70 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareEqualToUnsignedHalfWord(vrA,vrB); +} + +:vcmpequw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=134 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareEqualToUnsignedWord(vrA,vrB); +} + +:vcmpequw. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=134 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareEqualToUnsignedWord(vrA,vrB); +} + +:vcmpgefp vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=454 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareGreaterThanOrEqualToFloatingPoint(vrA,vrB); +} + +:vcmpgefp. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=454 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareGreaterThanOrEqualToFloatingPoint(vrA,vrB); +} + +:vcmpgtfp vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=710 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareGreaterThanFloatingPoint(vrA,vrB); +} + +:vcmpgtfp. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=710 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareGreaterThanFloatingPoint(vrA,vrB); +} + +:vcmpgtsb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=774 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareGreaterThanSignedByte(vrA,vrB); +} + +:vcmpgtsb. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=774 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareGreaterThanSignedByte(vrA,vrB); +} + +:vcmpgtsh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=838 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareGreaterThanConditionRegisterSignedHalfWord(vrA,vrB); +} + +:vcmpgtsh. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=838 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareGreaterThanConditionRegisterSignedHalfWord(vrA,vrB); +} + +:vcmpgtsw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=902 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareGreaterThanSignedWord(vrA,vrB); +} + +:vcmpgtsw. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=902 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareGreaterThanSignedWord(vrA,vrB); +} + +:vcmpgtub vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=518 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareGreaterThanUnsignedByte(vrA,vrB); +} + +:vcmpgtub. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=518 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareGreaterThanUnsignedByte(vrA,vrB); +} + +:vcmpgtuh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=582 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareGreaterThanUnsignedHalfWord(vrA,vrB); +} + +:vcmpgtuh. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=582 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareGreaterThanUnsignedHalfWord(vrA,vrB); +} + +:vcmpgtuw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=646 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareGreaterThanUnsignedWord(vrA,vrB); +} + +:vcmpgtuw. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=646 +{ # TODO definition + # TODO change CR6 + vrD = vectorCompareGreaterThanUnsignedWord(vrA,vrB); +} + +:vctsxs vrD,vrB,A_BITS is OP=4 & vrD & A_BITS & vrB & XOP_0_10=970 +{ # TODO definition + vrD = vectorConvertToSignedFixedPointWordSaturate(vrB,A_BITS:1); +} + +:vctuxs vrD,vrB,A_BITS is OP=4 & vrD & A_BITS & vrB & XOP_0_10=906 +{ # TODO definition + vrD = vectorConvertToUnsignedFixedPointWordSaturate(vrB,A_BITS:1); +} + +:vexptefp vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=394 +{ # TODO definition + vrD = vector2RaisedToTheExponentEstimateFloatingPoint(vrB); +} + +:vlogefp vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=458 +{ # TODO definition + vrD = vectorLog2EstimateFloatingPoint(vrB); +} + +:vmaddfp vrD,vrA,vrC,vrB is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=46 +{ # TODO definition + vrD = vectorMultiplyAddFloatingPoint(vrA,vrC,vrB); +} + +:vmaxfp vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1034 +{ # TODO definition + vrD = vectorMaximumFloatingPoint(vrA,vrB); +} + +:vmaxsb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=258 +{ # TODO definition + vrD = vectorMaximumSignedByte(vrA,vrB); +} + +:vmaxsh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=322 +{ # TODO definition + vrD = vectorMaximumSignedHalfWord(vrA,vrB); +} + +:vmaxsw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=386 +{ # TODO definition + vrD = vectorMaximumSignedWord(vrA,vrB); +} + +:vmaxub vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=2 +{ # TODO definition + vrD = vectorMaximumUnsignedByte(vrA,vrB); +} + +:vmaxuh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=66 +{ # TODO definition + vrD = vectorMaximumUnsignedHalfWord(vrA,vrB); +} + +:vmaxuw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=130 +{ # TODO definition + vrD = vectorMaximumUnsignedWord(vrA,vrB); +} + +:vmhaddshs vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=32 +{ # TODO definition + vrD = vectorMultiplyHighAndAddSignedHalfWordSaturate(vrA,vrB,vrC); +} + +:vmhraddshs vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=33 +{ # TODO definition + vrD = vectorMultiplyHighRoundAndAddSignedHalfWordSaturate(vrA,vrB,vrC); +} + +:vminfp vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1098 +{ # TODO definition + vrD = vectorMinimumFloatingPoint(vrA,vrB); +} + +:vminsb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=770 +{ # TODO definition + vrD = vectorMinimumSignedByte(vrA,vrB); +} + +:vminsh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=834 +{ # TODO definition + vrD = vectorMinimumSignedHalfWord(vrA,vrB); +} + +:vminsw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=898 +{ # TODO definition + vrD = vectorMinimumSignedWord(vrA,vrB); +} + +:vminub vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=514 +{ # TODO definition + vrD = vectorMinimumUnsignedByte(vrA,vrB); +} + +:vminuh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=578 +{ # TODO definition + vrD = vectorMinimumUnsignedHalfWord(vrA,vrB); +} + +:vminuw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=642 +{ # TODO definition + vrD = vectorMinimumUnsignedWord(vrA,vrB); +} + +:vmladduhm vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=34 +{ # TODO definition + vrD = vectorMultiplyLowAndAddUnsignedHalfWordModulo(vrA,vrB,vrC); +} + +:vmrghb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=12 +{ # TODO definition + vrD = vectorMergeHighByte(vrA,vrB); +} + +:vmrghh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=76 +{ # TODO definition + vrD = vectorMergeHighHalfWord(vrA,vrB); +} + +:vmrghw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=140 +{ # TODO definition + vrD = vectorMergeHighWord(vrA,vrB); +} + +:vmrglb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=268 +{ # TODO definition + vrD = vectorMergeLowByte(vrA,vrB); +} + +:vmrglh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=332 +{ # TODO definition + vrD = vectorMergeLowHalfWord(vrA,vrB); +} + +:vmrglw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=396 +{ # TODO definition + vrD = vectorMergeLowWord(vrA,vrB); +} + +:vmsummbm vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=37 +{ # TODO definition + vrD = vectorMultiplySumMixedSignByteModulo(vrA,vrB,vrC); +} + +:vmsumshm vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=40 +{ # TODO definition + vrD = vectorMultiplySumSignedHalfWordModulo(vrA,vrB,vrC); +} + +:vmsumshs vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=41 +{ # TODO definition + vrD = vectorMultiplySumSignedHalfWordSaturate(vrA,vrB,vrC); +} + +:vmsumubm vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=36 +{ # TODO definition + vrD = vectorMultiplySumUnsignedByteModulo(vrA,vrB,vrC); +} + +:vmsumuhm vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=38 +{ # TODO definition + vrD = vectorMultiplySumUnsignedHalfWordModulo(vrA,vrB,vrC); +} + +:vmsumuhs vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=39 +{ # TODO definition + vrD = vectorMultiplySumUnsignedHalfWordSaturate(vrA,vrB,vrC); +} + +:vmulesb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=776 +{ # TODO definition + vrD = vectorMultiplyEvenSignedByte(vrA,vrB); +} + +:vmulesh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=840 +{ # TODO definition + vrD = vectorMultiplyEvenSignedHalfWord(vrA,vrB); +} + +:vmuleub vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=520 +{ # TODO definition + vrD = vectorMultiplyEvenUnsignedByte(vrA,vrB); +} + +:vmuleuh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=584 +{ # TODO definition + vrD = vectorMultiplyEvenUnsignedHalfWord(vrA,vrB); +} + +:vmulosb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=264 +{ # TODO definition + vrD = vectorMultiplyOddSignedByte(vrA,vrB); +} + +:vmulosh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=328 +{ # TODO definition + vrD = vectorMultiplyOddSignedHalfWord(vrA,vrB); +} + +:vmuloub vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=8 +{ # TODO definition + vrD = vectorMultiplyOddUnsignedByte(vrA,vrB); +} + +:vmulouh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=72 +{ # TODO definition + vrD = vectorMultiplyOddUnsignedHalfWord(vrA,vrB); +} + +:vnmsubfp vrD,vrA,vrC,vrB is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=47 +{ # TODO definition + vrD = vectorNegativeMultiplySubtractFloatingPoint(vrA,vrC,vrB); +} + +:vnor vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1284 +{ + vrD = ~(vrA | vrB); +} + +:vor vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1156 +{ + vrD = vrA | vrB; +} + +:vperm vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=43 +{ +# tmp:32 = (zext(vrA) << 128) | zext(vrB); +# tmp2:16 = 0; +# tmp3:32 = 0; +# cnt:1 = 15; +# +# tmp2 = (vrC >> (cnt * 8)) & 0x1F; +# tmp3 = tmp >> ((31 - tmp2) * 8); +# vrD = vrD << 8; +# vrD[0,8] = tmp3[0,8]; +# if (cnt == 0) goto ; +# cnt = cnt - 1; +# goto ; +# + vrD = vectorPermute(vrA,vrB,vrC); +} + +:vpkpx vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=782 +{ # TODO definition + vrD = vectorPackPixel32(vrA,vrB); +} + +:vpkshss vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=398 +{ # TODO definition + vrD = vectorPackSignedHalfWordSignedSaturate(vrA,vrB); +} + +:vpkshus vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=270 +{ # TODO definition + vrD = vectorPackSignedHalfWordUnsignedSaturate(vrA,vrB); +} + +:vpkswss vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=462 +{ # TODO definition + vrD = vectorPackSignedWordSignedSaturate(vrA,vrB); +} + +:vpkswus vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=334 +{ # TODO definition + vrD = vectorPackSignedWordUnsignedSaturate(vrA,vrB); +} + +:vpkuhum vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=14 +{ # TODO definition + vrD = vectorPackUnsignedHalfWordUnsignedModulo(vrA,vrB); +} + +:vpkuhus vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=142 +{ # TODO definitionXTF = + vrD = vectorPackUnsignedHalfWordUnsignedSaturate(vrA,vrB); +} + +:vpkuwum vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=78 +{ # TODO definition + vrD = vectorPackUnsignedWordUnsignedModulo(vrA,vrB); +} + +:vpkuwus vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=206 +{ # TODO definition + vrD = vectorPackUnsignedWordUnsignedSaturate(vrA,vrB); +} + +:vrefp vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=266 +{ # TODO definition + vrD = vectorReciprocalEstimateFloatingPoint(vrB); +} + +:vrfim vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=714 +{ # TODO definition + vrD = vectorRoundToFloatingPointIntegerTowardMinusInfinity(vrB); +} + +:vrfin vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=522 +{ # TODO definition + vrD = vectorRoundToFloatingPointIntegerNearest(vrB); +} + +:vrfip vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=650 +{ # TODO definition + vrD = vectorRoundToFloatingPointIntegerTowardPluInfinity(vrB); +} + +:vrfiz vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=586 +{ # TODO definition + vrD = vectorRoundToFloatingPointIntegerTowardZero(vrB); +} + +:vrlb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=4 +{ # TODO definition + vrD = vectorRotateLeftIntegerByte(vrA,vrB); +} + +:vrlh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=68 +{ # TODO definition + vrD = vectorRotateLeftIntegerHalfWord(vrA,vrB); +} + +:vrlw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=132 +{ # TODO definition + vrD = vectorRotateLeftIntegerWord(vrA,vrB); +} + +:vrsqrtefp vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=330 +{ # TODO definition + vrD = vectorReciprocalSquareRootEstimateFloatingPoint(vrB); +} + +:vsel vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=42 +{ # TODO definition + vrD = vectorConditionalSelect(vrA,vrB,vrC); +} + +:vsl vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=452 +{ # TODO definition + vrD = vectorShiftLeft(vrA,vrB); +} + +:vslb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=260 +{ # TODO definition + vrD = vectorShiftLeftIntegerByte(vrA,vrB); +} + +:vsldoi vrD,vrA,vrB,SHB is OP=4 & vrD & vrA & vrB & BIT_10=0 & SHB & XOP_0_5=44 +{ + tmp:32 = (zext(vrA) << 128) | zext(vrB); + tmp = tmp << (SHB:1 * 8); + vrD = tmp[128,128]; +} + +:vslh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=324 +{ # TODO definition + vrD = vectorShiftLeftIntegerHalfWord(vrA,vrB); +} + +:vslo vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1036 +{ # TODO definition + vrD = vectorShiftLeftByOctet(vrA,vrB); +} + +:vslw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=388 +{ # TODO definition + vrD = vectorShiftLeftIntegerWord(vrA,vrB); +} + +:vspltb vrD,vrB,UIMB is OP=4 & vrD & BITS_20_20=0 & UIMB & vrB & XOP_0_10=524 +{ + tmp:1 = (0xF - UIMB) * 8; + tmpa:16 = (vrB >> tmp) & 0xFF; + vrD = tmpa | (tmpa << 8) | (tmpa << 16) | (tmpa << 24) | (tmpa << 32) | (tmpa << 40) | (tmpa << 48) | (tmpa << 56); + vrD = vrD | (tmpa << 64) | (tmpa << 72) | (tmpa << 80) | (tmpa << 88) | (tmpa << 96) | (tmpa << 104) | (tmpa << 112) | (tmpa << 120); +} + +:vsplth vrD,vrB,UIMH is OP=4 & vrD & BITS_19_20=0 & UIMH & vrB & XOP_0_10=588 +{ # TODO definition + vrD = vectorSplatHalfWord(vrB,UIMH:1); +} + +:vspltisb vrD,A_BITSS is OP=4 & vrD & A_BITSS & B_BITS=0 & XOP_0_10=780 +{ # TODO definition + vrD = vectorSplatImmediateSignedByte(A_BITSS:1); +} + +:vspltish vrD,A_BITSS is OP=4 & vrD & A_BITSS & B_BITS=0 & XOP_0_10=844 +{ # TODO definition + vrD = vectorSplatImmediateSignedHalfWord(A_BITSS:1); +} + +:vspltisw vrD,A_BITSS is OP=4 & vrD & A_BITSS & B_BITS=0 & XOP_0_10=908 +{ + tmpw:4 = sext(A_BITSS:1); + tmp:16 = zext(tmpw); + vrD = (tmp) | (tmp << 32) | (tmp << 64) | (tmp << 96); +} + +# A better way to do this would be to make a subtable to interpret +# UIMW into the corresponding subword, then assign the subregisters of vrD +# to that value. +:vspltw vrD,vrB,UIMW is OP=4 & vrD & vrB & BITS_18_20=0 & UIMW & XOP_0_10=652 +{ + local b = (3 - UIMW) * 32; + local tmp:16 = (vrB >> b) & 0xffffffff; + vrD = (tmp) | (tmp << 32) | (tmp << 64) | (tmp << 96); +} + +:vsr vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=708 +{ # TODO definition + vrD = vectorShiftRight(vrA,vrB); +} + +:vsrab vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=772 +{ # TODO definition + vrD = vectorShiftRightAlgebraicByte(vrA,vrB); +} + +:vsrah vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=836 +{ # TODO definition + vrD = vectorShiftRightAlgebraicHalfWord(vrA,vrB); +} + +:vsraw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=900 +{ # TODO definition + vrD = vectorShiftRightAlgebraicWord(vrA,vrB); +} + +:vsrb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=516 +{ # TODO definition + vrD = vectorShiftRightByte(vrA,vrB); +} + +:vsrh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=580 +{ # TODO definition + vrD = vectorShiftRightHalfWord(vrA,vrB); +} + +:vsro vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1100 +{ # TODO definition + vrD = vectorShiftRightByOctet(vrA,vrB); +} + +:vsrw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=644 +{ # TODO definition + vrD = vectorShiftRightWord(vrA,vrB); +} + +:vsubcuw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1408 +{ # TODO definition + vrD = vectorSubtractCarryoutUnsignedWord(vrA,vrB); +} + +:vsubfp vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=74 +{ # TODO definition + vrD = vectorSubtractFloatingPoint(vrA,vrB); +} + +:vsubsbs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1792 +{ # TODO definition + vrD = vectorSubtractSignedByteSaturate(vrA,vrB); +} + +:vsubshs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1856 +{ # TODO definition + vrD = vectorSubtractSignedHalfWordSaturate(vrA,vrB); +} + +:vsubsws vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1920 +{ # TODO definition + vrD = vectorSubtractSignedWordSaturate(vrA,vrB); +} + +:vsububm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1024 +{ # TODO definition + vrD = vectorSubtractUnsignedByteModulo(vrA,vrB); +} + +:vsububs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1536 +{ # TODO definition + vrD = vectorSubtractUnsignedByteSaturate(vrA,vrB); +} + +:vsubuhm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1088 + & vrA_16_0 & vrA_16_1 & vrA_16_2 & vrA_16_3 & vrA_16_4 & vrA_16_5 & vrA_16_6 & vrA_16_7 + & vrB_16_0 & vrB_16_1 & vrB_16_2 & vrB_16_3 & vrB_16_4 & vrB_16_5 & vrB_16_6 & vrB_16_7 + & vrD_16_0 & vrD_16_1 & vrD_16_2 & vrD_16_3 & vrD_16_4 & vrD_16_5 & vrD_16_6 & vrD_16_7 +{ + vrD_16_0 = vrA_16_0 - vrB_16_0; + vrD_16_1 = vrA_16_1 - vrB_16_1; + vrD_16_2 = vrA_16_2 - vrB_16_2; + vrD_16_3 = vrA_16_3 - vrB_16_3; + vrD_16_4 = vrA_16_4 - vrB_16_4; + vrD_16_5 = vrA_16_5 - vrB_16_5; + vrD_16_6 = vrA_16_6 - vrB_16_6; + vrD_16_7 = vrA_16_7 - vrB_16_7; +} + +:vsubuhs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1600 +{ # TODO definition + vrD = vectorSubtractUnsignedHalfWordSaturate(vrA,vrB); +} + +:vsubuwm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1152 +{ # TODO definition + vrD = vectorSubtractUnsignedWordModulo(vrA,vrB); +} + +:vsubuws vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1664 +{ # TODO definition + vrD = vectorSubtractUnsignedWordSaturate(vrA,vrB); +} + +:vsumsws vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1928 +{ # TODO definition + vrD = vectorSumAcrossSignedWordSaturate(vrA,vrB); +} + +:vsum2sws vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1672 +{ # TODO definition + vrD = vectorSumAcrossPartialSignedWordSaturate(vrA,vrB); +} + +:vsum4sbs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1800 +{ # TODO definition + vrD = vectorSumAcrossPartialSignedByteSaturate(vrA,vrB); +} + +:vsum4shs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1608 +{ # TODO definition + vrD = vectorSumAcrossPartialSignedHalfWordSaturate(vrA,vrB); +} + +:vsum4ubs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1544 +{ # TODO definition + vrD = vectorSumAcrossPartialUnsignedByteSaturate(vrA,vrB); +} + +:vupkhpx vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=846 +{ # TODO definition + vrD = vectorUnpackHighPixel16(vrB); +} + +:vupkhsb vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=526 +{ # TODO definition + vrD = vectorUnpackHighSignedByte(vrB); +} + +:vupkhsh vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=590 +{ # TODO definition + vrD = vectorUnpackHighSignedHalfWord(vrB); +} + +:vupklpx vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=974 +{ # TODO definition + vrD = vectorUnpackLowPixel16(vrB); +} + +:vupklsb vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=654 +{ # TODO definition + vrD = vectorUnpackLowSignedByte(vrB); +} + +:vupklsh vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=718 +{ # TODO definition + vrD = vectorUnpackLowSignedHalfWord(vrB); +} + +:vxor vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1220 +{ + vrD = vrA ^ vrB; +} + + +define pcodeop altv207_1; +define pcodeop altv207_2; +define pcodeop altv207_3; +define pcodeop altv207_4; +define pcodeop altv207_5; +define pcodeop altv207_6; +define pcodeop altv207_7; +define pcodeop altv207_8; +define pcodeop altv207_9; +define pcodeop altv207_10; +define pcodeop altv207_11; +define pcodeop altv207_12; +define pcodeop altv207_13; +define pcodeop altv207_14; +define pcodeop altv207_15; +define pcodeop altv207_16; +define pcodeop altv207_17; +define pcodeop altv207_18; +define pcodeop altv207_19; +define pcodeop altv207_20; +define pcodeop altv207_21; +define pcodeop altv207_22; +define pcodeop altv207_23; +define pcodeop altv207_24; +define pcodeop altv207_25; +define pcodeop altv207_26; +define pcodeop altv207_27; +define pcodeop altv207_28; +define pcodeop altv207_29; +define pcodeop altv207_30; +define pcodeop altv207_31; +define pcodeop altv207_32; +define pcodeop altv207_33; +define pcodeop altv207_34; +define pcodeop altv207_35; +define pcodeop altv207_36; +define pcodeop altv207_37; +define pcodeop altv207_38; +define pcodeop altv207_39; +define pcodeop altv207_40; +define pcodeop altv207_41; +define pcodeop altv207_42; +define pcodeop altv207_43; +define pcodeop altv207_44; +define pcodeop altv207_45; +define pcodeop altv207_46; +define pcodeop altv207_47; +define pcodeop altv207_48; +define pcodeop altv207_49; +define pcodeop altv207_50; +define pcodeop altv207_51; +define pcodeop altv207_52; +define pcodeop altv207_53; +define pcodeop altv207_54; +define pcodeop altv207_55; +define pcodeop altv207_56; +define pcodeop altv207_57; +define pcodeop altv207_58; +define pcodeop altv207_59; +define pcodeop altv207_60; +define pcodeop altv207_61; +define pcodeop altv207_62; +define pcodeop altv207_63; +define pcodeop altv207_64; +define pcodeop altv207_65; + +define pcodeop altv300_1; +define pcodeop altv300_2; +define pcodeop altv300_3; +define pcodeop altv300_4; +define pcodeop altv300_5; +define pcodeop altv300_6; +define pcodeop altv300_7; +define pcodeop altv300_8; +define pcodeop altv300_9; +define pcodeop altv300_10; +define pcodeop altv300_11; +define pcodeop altv300_12; +define pcodeop altv300_13; +define pcodeop altv300_14; +define pcodeop altv300_15; +define pcodeop altv300_16; +define pcodeop altv300_17; +define pcodeop altv300_18; +define pcodeop altv300_19; +define pcodeop altv300_20; +define pcodeop altv300_21; +define pcodeop altv300_22; +define pcodeop altv300_23; +define pcodeop altv300_24; +define pcodeop altv300_25; +define pcodeop altv300_26; +define pcodeop altv300_27; +define pcodeop altv300_28; +define pcodeop altv300_29; +define pcodeop altv300_30; +define pcodeop altv300_31; +define pcodeop altv300_32; +define pcodeop altv300_33; +define pcodeop altv300_34; +define pcodeop altv300_35; +define pcodeop altv300_36; +define pcodeop altv300_41; +define pcodeop altv300_42; +define pcodeop altv300_43; +define pcodeop altv300_44; +define pcodeop altv300_45; +define pcodeop altv300_46; +define pcodeop altv300_47; +define pcodeop altv300_48; +define pcodeop altv300_49; +define pcodeop altv300_50; +define pcodeop altv300_51; +define pcodeop altv300_52; +define pcodeop altv300_53; +define pcodeop altv300_54; +define pcodeop altv300_55; +define pcodeop altv300_56; +define pcodeop altv300_57; +define pcodeop altv300_58; +define pcodeop altv300_59; +define pcodeop altv300_60; +define pcodeop altv300_61; +define pcodeop altv300_62; +define pcodeop altv300_63; +define pcodeop altv300_64; +define pcodeop altv300_65; +define pcodeop altv300_66; +define pcodeop altv300_67; +define pcodeop altv300_68; +define pcodeop altv300_69; +define pcodeop altv300_70; +define pcodeop altv300_71; + +################# +# 2.07 additions +:bcdadd. vrD,vrA,vrB,PS is OP=4 & BIT_10=1 & XOP_0_8=1 & vrA & vrB & vrD & PS { + vrD = altv207_64(vrA,vrB,PS:1); +} + +:bcdsub. vrD,vrA,vrB,PS is OP=4 & BIT_10=1 & XOP_0_8=65 & vrA & vrB & vrD & PS { + vrD = altv207_65(vrA,vrB,PS:1); +} + + +:vaddcuq vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=320 { + vrD = altv207_1(vrA,vrB); +} + +:vaddecuq vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=61 { + vrD = altv207_2(vrA,vrB,vrC); +} + +:vaddeuqm vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=60 { + vrD = altv207_3(vrA,vrB,vrC); +} + +:vaddudm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=192 { + vrD = altv207_4(vrA,vrB); +} + +:vadduqm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=256 { + vrD = altv207_5(vrA,vrB); +} + +:vbpermq vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1356 { + vrD = altv207_6(vrA,vrB); +} + +:vcipher vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1288 { + vrD = altv207_7(vrA,vrB); +} + +:vcipherlast vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1289 { + vrD = altv207_8(vrA,vrB); +} + +:vclzb vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1794 { + vrD = altv207_9(vrB); +} + +:vclzd vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1986 { + vrD = altv207_10(vrB); +} + +:vclzh vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1858 { + vrD = altv207_11(vrB); +} + +:vclzw vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1922 { + vrD = altv207_12(vrB); +} + +:vcmpequd vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=0 & XOP_0_9=199 { + vrD = altv207_13(vrA,vrB); +} + +:vcmpequd. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=1 & XOP_0_9=199 { + vrD = altv207_14(vrA,vrB); +} + +:vcmpgtsd vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=0 & XOP_0_9=967 { + vrD = altv207_15(vrA,vrB); +} + +:vcmpgtsd. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=1 & XOP_0_9=967 { + vrD = altv207_16(vrA,vrB); +} + +:vcmpgtud vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=0 & XOP_0_9=711 { + vrD = altv207_17(vrA,vrB); +} + +:vcmpgtud. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=1 & XOP_0_9=711 { + vrD = altv207_18(vrA,vrB); +} + +:veqv vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1668 { + vrD = altv207_19(vrA,vrB); +} + +:vgbbd vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1292 { + vrD = altv207_20(vrB); +} + +:vmaxsd vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=450 { + vrD = altv207_21(vrA,vrB); +} + +:vmaxud vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=194 { + vrD = altv207_22(vrA,vrB); +} + +:vminsd vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=962 { + vrD = altv207_23(vrA,vrB); +} + +:vminud vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=706 { + altv207_24(vrA,vrB); +} + +:vmrgew vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1932 { + vrD = altv207_25(vrA,vrB); +} + +:vmrgow vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1676 { + vrD = altv207_26(vrA,vrB); +} + +:vmulesw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=904 { + vrD = altv207_27(vrA,vrB); +} + +:vmuleuw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=648 { + vrD = altv207_28(vrA,vrB); +} + +:vmulosw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=392 { + vrD = altv207_29(vrA,vrB); +} + +:vmulouw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=136 { + vrD = altv207_30(vrA,vrB); +} + +:vmuluwm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=137 { + vrD = altv207_31(vrA,vrB); +} + +:vnand vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1412 { + vrD = altv207_32(vrA,vrB); +} + +:vncipher vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1352 { + vrD = altv207_33(vrA,vrB); +} + +:vncipherlast vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1353 { + vrD = altv207_34(vrA,vrB); +} + +:vorc vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1348 { + vrD = altv207_35(vrA,vrB); +} + +:vpermxor vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=45 { + vrD = altv207_36(vrA,vrB,vrC); +} + +:vpksdss vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1486 { + vrD = altv207_37(vrA,vrB); +} + +:vpksdus vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1358 { + vrD = altv207_38(vrA,vrB); +} + +:vpkudum vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1102 { + vrD = altv207_39(vrA,vrB); +} + +:vpkudus vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1230 { + vrD = altv207_41(vrA,vrB); +} + +:vpmsumb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1032 { + vrD = altv207_42(vrA,vrB); +} + +:vpmsumd vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1224 { + vrD = altv207_43(vrA,vrB); +} + +:vpmsumh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1096 { + vrD = altv207_44(vrA,vrB); +} + +:vpmsumw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1160 { + vrD = altv207_45(vrA,vrB); +} + +:vpopcntb vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1795 { + vrD = altv207_46(vrB); +} + +:vpopcntd vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1987 { + vrD = altv207_47(vrB); +} + +:vpopcnth vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1859 { + vrD = altv207_48(vrB); +} + +:vpopcntw vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1923 { + vrD = altv207_49(vrB); +} + +:vrld vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=196 { + vrD = altv207_50(vrA,vrB); +} + +:vsbox vrD,vrA is OP=4 & vrD & vrA & BITS_11_15=0 & XOP_0_10=1480 { + vrD = altv207_51(vrA); +} + +:vshasigmad vrD,vrA,ST,SIX is OP=4 & vrD & vrA & ST & SIX & XOP_0_10=1730 { + vrD = altv207_52(vrA,ST:1,SIX:1); +} + +:vshasigmaw vrD,vrA,ST,SIX is OP=4 & vrD & vrA & ST & SIX & XOP_0_10=1666 { + vrD = altv207_53(vrA,ST:1,SIX:1); +} + +:vsld vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1476 { + vrD = altv207_54(vrA,vrB); +} + +:vsrad vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=964 { + vrD = altv207_55(vrA,vrB); +} + +:vsrd vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1732 { + vrD = altv207_56(vrA,vrB); +} + +:vsubcuq vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1344 { + vrD = altv207_57(vrA,vrB); +} + +:vsubecuq vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=63 { + vrD = altv207_58(vrA,vrB,vrC); +} + +:vsubeuqm vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=62 { + vrD = altv207_59(vrA,vrB,vrC); +} + +:vsubudm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1216 { + vrD = altv207_60(vrA,vrB); +} + +:vsubuqm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1280 { + vrD = altv207_61(vrA,vrB); +} + +:vupkhsw vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1614 { + vrD = altv207_62(vrB); +} + +:vupklsw vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1742 { + vrD = altv207_63(vrB); +} + +################### +# v3.0 + +:bcdcfn. vrD,vrB,PS is OP=4 & vrD & vrB & BITS_16_20=7 & XOP_0_8=385 & BIT_10=1 & PS { + vrD = altv300_1(vrB,PS:1); +} + +:bcdcfsq. vrD,vrB,PS is OP=4 & vrD & vrB & BITS_16_20=2 & XOP_0_8=385 & BIT_10=1 & PS { + vrD = altv300_2(vrB,PS:1); +} + +:bcdcfz. vrD,vrB,PS is OP=4 & vrD & vrB & BITS_16_20=6 & XOP_0_8=385 & BIT_10=1 & PS { + vrD = altv300_3(vrB,PS:1); +} + +:bcdcpsgn. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=833 { + vrD = altv300_4(vrA,vrB); +} + +:bcdctn. vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=5 & XOP_0_8=385 & BIT_10=1 & BIT_9=0 { + vrD = altv300_5(vrB); +} + +:bcdctsq. vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_8=385 & BIT_10=1 & BIT_9=0 { + vrD = altv300_6(vrB); +} + +:bcdctz. vrD,vrB,PS is OP=4 & vrD & vrB & BITS_16_20=4 & XOP_0_8=385 & BIT_10=1 & PS { + vrD = altv300_7(vrB,PS:1); +} + +:bcds. vrD,vrA,vrB,PS is OP=4 & vrD & vrA & vrB & XOP_0_8=193 & BIT_10=1 & PS { + vrD = altv300_8(vrA,vrB,PS:1); +} + +:bcdsetsgn. vrD,vrB,PS is OP=4 & vrD & vrB & BITS_16_20=31 & XOP_0_8=385 & BIT_10=1 & PS { + vrD = altv300_9(vrB,PS:1); +} + +:bcdsr. vrD,vrA,vrB,PS is OP=4 & vrD & vrA & vrB & XOP_0_8=449 & BIT_10=1 & PS { + vrD = altv300_10(vrA,vrB,PS:1); +} + +:bcdtrunc. vrD,vrA,vrB,PS is OP=4 & vrD & vrA & vrB & XOP_0_8=257 & BIT_10=1 & PS { + vrD = altv300_12(vrA,vrB,PS:1); +} + +:bcdus. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_8=129 & BIT_10=1 { + vrD = altv300_13(vrA,vrB); +} + +:bcdutrunc. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_8=321 & BIT_10=1 { + vrD = altv300_14(vrA,vrB); +} + +:vabsdub vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1027 { + vrD = altv300_15(vrA,vrB); +} + +:vabsduh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1091 { + vrD = altv300_16(vrA,vrB); +} + +:vabsduw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1155 { + vrD = altv300_17(vrA,vrB); +} + +:vbpermd vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1484 { + vrD = altv300_18(vrA,vrB); +} + +:vclzlsbb vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1538 { + vrD = altv300_19(vrB); +} + +:vcmpneb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=0 & XOP_0_9=7 { + vrD = altv300_20(vrA,vrB); +} + +:vcmpneb. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=1 & XOP_0_9=7 { + vrD = altv300_21(vrA,vrB); +} + +:vcmpneh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=0 & XOP_0_9=71 { + vrD = altv300_22(vrA,vrB); +} + +:vcmpneh. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=1 & XOP_0_9=71 { + vrD = altv300_23(vrA,vrB); +} + +:vcmpnew vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=0 & XOP_0_9=135 { + vrD = altv300_24(vrA,vrB); +} + +:vcmpnew. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=1 & XOP_0_9=135 { + vrD = altv300_25(vrA,vrB); +} + +:vcmpnezb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=0 & XOP_0_9=263 { + vrD = altv300_26(vrA,vrB); +} + +:vcmpnezb. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=1 & XOP_0_9=263 { + vrD = altv300_27(vrA,vrB); +} + +:vcmpnezh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=0 & XOP_0_9=327 { + vrD = altv300_28(vrA,vrB); +} + +:vcmpnezh. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=1 & XOP_0_9=327 { + vrD = altv300_29(vrA,vrB); +} + +:vcmpnezw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=0 & XOP_0_9=391 { + vrD = altv300_30(vrA,vrB); +} + +:vcmpnezw. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=1 & XOP_0_9=391 { + vrD = altv300_31(vrA,vrB); +} + +:vctzb vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=28 & XOP_0_10=1538 { + vrD = altv300_32(vrB); +} + +:vctzh vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=29 & XOP_0_10=1538 { + vrD = altv300_33(vrB); +} + +:vctzd vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=31 & XOP_0_10=1538 { + vrD = altv300_34(vrB); +} + +:vctzlsbb vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=1 & XOP_0_10=1538 { + vrD = altv300_35(vrB); +} + +:vctzw vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=30 & XOP_0_10=1538 { + vrD = altv300_36(vrB); +} + +:vextractd vrD,vrB,UIMB is OP=4 & vrD & BITS_20_20=0 & UIMB & vrB & XOP_0_10=717 { + # if UIMB > 8 the result is undefined + vrD = (vrB >> (8 * (8 - UIMB))) & 0xffffffffffffffff; +} + +:vextractub vrD,vrB,UIMB is OP=4 & vrD & BITS_20_20=0 & UIMB & vrB & XOP_0_10=525 { + # if UIMB > 15 the result is undefined + vrD = (vrB >> (16 * (15 - UIMB))) & 0xff; +} + +:vextractuh vrD,vrB,UIMB is OP=4 & vrD & BITS_20_20=0 & UIMB & vrB & XOP_0_10=589 { + # if UIMB > 14 the result is undefined + vrD = (vrB >> (16 * (14 - UIMB))) & 0xffff; +} + +:vextractuw vrD,vrB,UIMB is OP=4 & vrD & BITS_20_20=0 & UIMB & vrB & XOP_0_10=653 { + # if UIMB > 12 the result is undefined + vrD = (vrB >> (16 * (12 - UIMB))) & 0xffffffff; +} + +:vextsb2d vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=24 & XOP_0_10=1538 { + vrD = altv300_41(vrB); +} + +:vextsb2w vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=16 & XOP_0_10=1538 { + vrD = altv300_42(vrB); +} + +:vextsh2d vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=25 & XOP_0_10=1538 { + vrD = altv300_43(vrB); +} + +:vextsh2w vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=17 & XOP_0_10=1538 { + vrD = altv300_44(vrB); +} + +:vextsw2d vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=26 & XOP_0_10=1538 { + vrD = altv300_45(vrB); +} + +:vextublx D,A,vrB is OP=4 & D & A & vrB & XOP_0_10=1549 { + D = altv300_46(A,vrB); +} + +:vextubrx D,A,vrB is OP=4 & D & A & vrB & XOP_0_10=1805 { + D = altv300_47(A,vrB); +} + +:vextuhlx D,A,vrB is OP=4 & D & A & vrB & XOP_0_10=1613 { + D = altv300_48(A,vrB); +} + +:vextuhrx D,A,vrB is OP=4 & D & A & vrB & XOP_0_10=1869 { + D = altv300_49(A,vrB); +} + +# beware the backwards bit/byte ordering in the manual +:vextuwlx D,A,vrB is OP=4 & D & A & vrB & XOP_0_10=1677 +{ + local offs:2 = (12 - zext(A[0,4])) * 8; + local out:16 = (vrB >> offs) & 0xffffffff; + D = out:4; +} + +:vextuwrx D,A,vrB is OP=4 & D & A & vrB & XOP_0_10=1933 { + D = altv300_51(A,vrB); +} + +:vinsertb vrD,vrB,UIMB is OP=4 & vrD & BITS_20_20=0 & UIMB & vrB & XOP_0_10=781 { + vrD = altv300_52(vrB,UIMB:1); +} + +:vinsertd vrD,vrB,UIMB is OP=4 & vrD & BITS_20_20=0 & UIMB & vrB & XOP_0_10=973 { + vrD = altv300_53(vrB,UIMB:1); +} + +:vinserth vrD,vrB,UIMB is OP=4 & vrD & BITS_20_20=0 & UIMB & vrB & XOP_0_10=845 { + vrD = altv300_54(vrB,UIMB:1); +} + +:vinsertw vrD,vrB,UIMB is OP=4 & vrD & BITS_20_20=0 & UIMB & vrB & XOP_0_10=909 { + vrD = altv300_55(vrB,UIMB:1); +} + +:vmul10cuq vrD,vrA is OP=4 & vrD & vrA & BITS_11_15=0 & XOP_0_10=1 { + vrD = altv300_56(vrA); +} + +:vmul10ecuq vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=65 { + vrD = altv300_57(vrA,vrB); +} + +:vmul10euq vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=577 { + vrD = altv300_58(vrA,vrB); +} + +:vmul10uq vrD,vrA is OP=4 & vrD & vrA & BITS_11_15=0 & XOP_0_10=513 { + vrD = altv300_59(vrA); +} + +:vnegd vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=7 & XOP_0_10=1538 { + vrD = altv300_60(vrB); +} + +:vnegw vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=6 & XOP_0_10=1538 { + vrD = altv300_61(vrB); +} + +:vpermr vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=59 { + vrD = altv300_62(vrA,vrB,vrC); +} + +:vprtybd vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=9 & XOP_0_10=1538 { + vrD = altv300_63(vrB); +} + +:vprtybq vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=10 & XOP_0_10=1538 { + vrD = altv300_64(vrB); +} + +:vprtybw vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=8 & XOP_0_10=1538 { + vrD = altv300_65(vrB); +} + +:vrldmi vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=197 { + vrD = altv300_66(vrA,vrB); +} + +:vrldnm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=453 { + vrD = altv300_67(vrA,vrB); +} + +:vrlwmi vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=133 { + vrD = altv300_68(vrA,vrB); +} + +:vrlwnm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=389 { + vrD = altv300_69(vrA,vrB); +} + +:vslv vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1860 { + vrD = altv300_70(vrA,vrB); +} + +:vsrv vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1796 { + vrD = altv300_71(vrA,vrB); +} diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/evx.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/evx.sinc new file mode 100644 index 00000000..b47a5f18 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/evx.sinc @@ -0,0 +1,129 @@ + +@include "Scalar_SPFP.sinc" +@ifdef IS_ISA +@include "SPE_APU.sinc" +@endif + +define pcodeop vectorExclusiveOr; +define pcodeop vectorMergeHigh; +define pcodeop vectorMergeLow; +define pcodeop vectorLoadDoubleWordIntoDoubleWordIndexed; +define pcodeop vectorStoreDoubleOfDoubleIndexed; +define pcodeop initializeAccumulator; +define pcodeop vectorShiftRightWordSigned; +define pcodeop vectorShiftRightWordUnsigned; + +:evxor vrD_64_0,vrA_64_0,vrB_64_0 is OP=4 & vrD_64_0 & vrA_64_0 & vrB_64_0 & XOP_0_10=534 +{ + vrD_64_0 = vrA_64_0 ^ vrB_64_0; +} + +:evmergehi S,A,B is OP=4 & S & A & B & XOP_0_10=556 +{ + vectorMergeHigh(S,A,B); +} + +:evmergelo S,A,B is OP=4 & S & A & B & XOP_0_10=557 +{ + vectorMergeLow(S,A,B); +} + + +:evldd RT,dUI16PlusRAOrZeroAddress is OP=4 & RT & dUI16PlusRAOrZeroAddress & XOP_0_10=769 +{ + ea:$(REGISTER_SIZE) = dUI16PlusRAOrZeroAddress; + RT = *:8 ($(EATRUNC)); +} + +:evlddx RT,RA_OR_ZERO,RB is OP=4 & RT & RA_OR_ZERO & RB & XOP_0_10=768 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + RB; + RT = *:8 ($(EATRUNC)); +} + +@ifndef IS_ISA +:evsrws S,A,B is OP=4 & S & A & B & XOP_0_10=545 +{ + vectorShiftRightWordSigned(S,A,B); +} +@endif + +@ifndef IS_ISA +:evsrwu S,A,B is OP=4 & S & A & B & XOP_0_10=544 +{ + vectorShiftRightWordUnsigned(S,A,B); +} +@endif + +:evstdd RS,dUI16PlusRAOrZeroAddress is OP=4 & RS & dUI16PlusRAOrZeroAddress & XOP_0_10=801 +{ + ea:$(REGISTER_SIZE) = dUI16PlusRAOrZeroAddress; + *:8 ($(EATRUNC)) = RS; +} + +:evstddx RS,RA_OR_ZERO,RB is OP=4 & RS & RA_OR_ZERO & RB & XOP_0_10=800 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + RB; + *:8 ($(EATRUNC)) = RS; +} + +:evmra RT,RA is OP=4 & RT & RA & BITS_11_15=0 & XOP_0_10=1220 +{ + ACC = zext(RA); + RT = RA; +} + +# evmergehilo rD,rA,rB 010 0010 1110 +define pcodeop VectorMergeHighLow; +:evmergehilo D,A,B is OP=4 & A & B & D & XOP_0_10=558 { + local lo = (A & 0x00000000FFFFFFFF); + local hi = ((A & 0xFFFFFFFF00000000) >> 32); + #local b_lo:$(REGISTER_SIZE) = (B & 0x00000000FFFFFFFF); + local b_hi:$(REGISTER_SIZE) = ((B & 0xFFFFFFFF00000000) >> 32); + + lo = lo; + hi = b_hi; + + D = ((hi << 32) | lo); +} + +# evmergelohi rD,rA,rB 010 0010 1111 +:evmergelohi D,A,B is OP=4 & D & A & B & XOP_0_10=559 { + local lo = (A & 0x00000000FFFFFFFF); + local hi = ((A & 0xFFFFFFFF00000000) >> 32); + local b_lo:$(REGISTER_SIZE) = (B & 0x00000000FFFFFFFF); + #local b_hi:$(REGISTER_SIZE) = ((B & 0xFFFFFFFF00000000) >> 32); + + lo = lo; + hi = b_lo; + + D = ((hi << 32) | lo); +} + +# evstwwe rS,rA,UIMM 011 0011 1001 +:evstwwe RS,dUI16PlusRAOrZeroAddress is OP=4 & RS & dUI16PlusRAOrZeroAddress & XOP_0_10=0x339 +{ + ea:$(REGISTER_SIZE) = dUI16PlusRAOrZeroAddress; + *:4 ($(EATRUNC)) = RS:4; +} + +# evstwwex rS,rA,rB 011 0011 1000 +:evstwwex RS,RA_OR_ZERO,RB is OP=4 & RS & RA_OR_ZERO & RB & XOP_0_10=0x338 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + RB; + *:4 ($(EATRUNC)) = RS:4; +} + +:lvx vrD, RA_OR_ZERO, RB is OP=31 & vrD & RA_OR_ZERO & RB & XOP_1_10=103 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + RB; + vrD = *:16 ($(EATRUNC)); +} + +:stvx vrS, RA_OR_ZERO, RB is OP=31 & vrS & RA_OR_ZERO & RB & XOP_1_10=231 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + RB; + *:16 ($(EATRUNC)) = vrS; +} + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/g2.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/g2.sinc new file mode 100644 index 00000000..1e1466ae --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/g2.sinc @@ -0,0 +1,14 @@ + +define pcodeop tlbli; +define pcodeop tlbld; + +:tlbld B is $(NOTVLE) & OP=31 & BITS_21_25=0 & BITS_16_20=0 & B & XOP_1_10=978 & BIT_0=0 +{ + tlbld(B); +} + +:tlbli B is $(NOTVLE) & OP=31 & BITS_21_25=0 & BITS_16_20=0 & B & XOP_1_10=1010 & BIT_0=0 +{ + tlbli(B); +} + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/lmwInstructions.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/lmwInstructions.sinc new file mode 100644 index 00000000..d213a8e3 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/lmwInstructions.sinc @@ -0,0 +1,101 @@ +LDMR0: is lsmul=1 {} +LDMR0: is epsilon { loadReg(r0); } + +LDMR1: is lsmul=2 {} +LDMR1: is LDMR0 { build LDMR0; loadReg(r1); } + +LDMR2: is lsmul=3 {} +LDMR2: is LDMR1 { build LDMR1; loadReg(r2); } + +LDMR3: is lsmul=4 {} +LDMR3: is LDMR2 { build LDMR2; loadReg(r3); } + +LDMR4: is lsmul=5 {} +LDMR4: is LDMR3 { build LDMR3; loadReg(r4); } + +LDMR5: is lsmul=6 {} +LDMR5: is LDMR4 { build LDMR4; loadReg(r5); } + +LDMR6: is lsmul=7 {} +LDMR6: is LDMR5 { build LDMR5; loadReg(r6); } + +LDMR7: is lsmul=8 {} +LDMR7: is LDMR6 { build LDMR6; loadReg(r7); } + +LDMR8: is lsmul=9 {} +LDMR8: is LDMR7 { build LDMR7; loadReg(r8); } + +LDMR9: is lsmul=10 {} +LDMR9: is LDMR8 { build LDMR8; loadReg(r9); } + +LDMR10: is lsmul=11 {} +LDMR10: is LDMR9 { build LDMR9; loadReg(r10); } + +LDMR11: is lsmul=12 {} +LDMR11: is LDMR10 { build LDMR10; loadReg(r11); } + +LDMR12: is lsmul=13 {} +LDMR12: is LDMR11 { build LDMR11; loadReg(r12); } + +LDMR13: is lsmul=14 {} +LDMR13: is LDMR12 { build LDMR12; loadReg(r13); } + +LDMR14: is lsmul=15 {} +LDMR14: is LDMR13 { build LDMR13; loadReg(r14); } + +LDMR15: is lsmul=16 {} +LDMR15: is LDMR14 { build LDMR14; loadReg(r15); } + +LDMR16: is lsmul=17 {} +LDMR16: is LDMR15 { build LDMR15; loadReg(r16); } + +LDMR17: is lsmul=18 {} +LDMR17: is LDMR16 { build LDMR16; loadReg(r17); } + +LDMR18: is lsmul=19 {} +LDMR18: is LDMR17 { build LDMR17; loadReg(r18); } + +LDMR19: is lsmul=20 {} +LDMR19: is LDMR18 { build LDMR18; loadReg(r19); } + +LDMR20: is lsmul=21 {} +LDMR20: is LDMR19 { build LDMR19; loadReg(r20); } + +LDMR21: is lsmul=22 {} +LDMR21: is LDMR20 { build LDMR20; loadReg(r21); } + +LDMR22: is lsmul=23 {} +LDMR22: is LDMR21 { build LDMR21; loadReg(r22); } + +LDMR23: is lsmul=24 {} +LDMR23: is LDMR22 { build LDMR22; loadReg(r23); } + +LDMR24: is lsmul=25 {} +LDMR24: is LDMR23 { build LDMR23; loadReg(r24); } + +LDMR25: is lsmul=26 {} +LDMR25: is LDMR24 { build LDMR24; loadReg(r25); } + +LDMR26: is lsmul=27 {} +LDMR26: is LDMR25 { build LDMR25; loadReg(r26); } + +LDMR27: is lsmul=28 {} +LDMR27: is LDMR26 { build LDMR26; loadReg(r27); } + +LDMR28: is lsmul=29 {} +LDMR28: is LDMR27 { build LDMR27; loadReg(r28); } + +LDMR29: is lsmul=30 {} +LDMR29: is LDMR28 { build LDMR28; loadReg(r29); } + +LDMR30: is lsmul=31 {} +LDMR30: is LDMR29 { build LDMR29; loadReg(r30); } + +LDMR31: is LDMR30 { build LDMR30; loadReg(r31); } + +:lmw D,dPlusRaOrZeroAddress is $(NOTVLE) & OP=46 & D & BITS_21_25 & dPlusRaOrZeroAddress & LDMR31 [ lsmul = BITS_21_25; ] +{ + tea = dPlusRaOrZeroAddress; + build LDMR31; +} + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/lswInstructions.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/lswInstructions.sinc new file mode 100644 index 00000000..4f3c0818 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/lswInstructions.sinc @@ -0,0 +1,185 @@ +#lswi r0,0,7 0x7c 00 3c aa +#lswi r0,r2,7 0x7c 02 3c aa + +DYN_D1: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 1)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } +DYN_D2: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 2)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } +DYN_D3: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 3)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } +DYN_D4: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 4)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } +DYN_D5: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 5)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } +DYN_D6: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 6)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } +DYN_D7: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 7)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } + +:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=0 & BH=0 & XOP_1_10=597 & BIT_0=0 + & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 & DYN_D5 & DYN_D6 & DYN_D7 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + loadRegister(D,ea); + loadRegister(DYN_D1,ea); + loadRegister(DYN_D2,ea); + loadRegister(DYN_D3,ea); + loadRegister(DYN_D4,ea); + loadRegister(DYN_D5,ea); + loadRegister(DYN_D6,ea); + loadRegister(DYN_D7,ea); +} + +:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=0 & BH & XOP_1_10=597 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + sa:1 = BH; + loadRegisterPartial(D,ea,sa); +} + +:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=1 & BH=0 & XOP_1_10=597 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + loadRegister(D,ea); +} + +:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=1 & BH & XOP_1_10=597 & BIT_0=0 + & DYN_D1 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + loadRegister(D,ea); + sa:1 = BH; + loadRegisterPartial(DYN_D1,ea,sa); +} + + +:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=2 & BH=0 & XOP_1_10=597 & BIT_0=0 + & DYN_D1 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + loadRegister(D,ea); + loadRegister(DYN_D1,ea); +} + +:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=2 & BH & XOP_1_10=597 & BIT_0=0 + & DYN_D1 & DYN_D2 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + loadRegister(D,ea); + loadRegister(DYN_D1,ea); + sa:1 = BH; + loadRegisterPartial(DYN_D2,ea,sa); +} + +:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=3 & BH=0 & XOP_1_10=597 & BIT_0=0 + & DYN_D1 & DYN_D2 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + loadRegister(D,ea); + loadRegister(DYN_D1,ea); + loadRegister(DYN_D2,ea); +} + +:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=3 & BH & XOP_1_10=597 & BIT_0=0 + & DYN_D1 & DYN_D2 & DYN_D3 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + loadRegister(D,ea); + loadRegister(DYN_D1,ea); + loadRegister(DYN_D2,ea); + sa:1 = BH; + loadRegisterPartial(DYN_D3,ea,sa); +} + +:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=4 & BH=0 & XOP_1_10=597 & BIT_0=0 + & DYN_D1 & DYN_D2 & DYN_D3 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + loadRegister(D,ea); + loadRegister(DYN_D1,ea); + loadRegister(DYN_D2,ea); + loadRegister(DYN_D3,ea); +} + +:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=4 & BH & XOP_1_10=597 & BIT_0=0 + & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + loadRegister(D,ea); + loadRegister(DYN_D1,ea); + loadRegister(DYN_D2,ea); + loadRegister(DYN_D3,ea); + sa:1 = BH; + loadRegisterPartial(DYN_D4,ea,sa); +} + +:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=5 & BH=0 & XOP_1_10=597 & BIT_0=0 + & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + loadRegister(D,ea); + loadRegister(DYN_D1,ea); + loadRegister(DYN_D2,ea); + loadRegister(DYN_D3,ea); + loadRegister(DYN_D4,ea); +} + +:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=5 & BH & XOP_1_10=597 & BIT_0=0 + & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 & DYN_D5 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + loadRegister(D,ea); + loadRegister(DYN_D1,ea); + loadRegister(DYN_D2,ea); + loadRegister(DYN_D3,ea); + loadRegister(DYN_D4,ea); + sa:1 = BH; + loadRegisterPartial(DYN_D5,ea,sa); +} + +:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=6 & BH=0 & XOP_1_10=597 & BIT_0=0 + & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 & DYN_D5 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + loadRegister(D,ea); + loadRegister(DYN_D1,ea); + loadRegister(DYN_D2,ea); + loadRegister(DYN_D3,ea); + loadRegister(DYN_D4,ea); + loadRegister(DYN_D5,ea); +} + +:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=6 & BH & XOP_1_10=597 & BIT_0=0 + & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 & DYN_D5 & DYN_D6 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + loadRegister(D,ea); + loadRegister(DYN_D1,ea); + loadRegister(DYN_D2,ea); + loadRegister(DYN_D3,ea); + loadRegister(DYN_D4,ea); + loadRegister(DYN_D5,ea); + sa:1 = BH; + loadRegisterPartial(DYN_D6,ea,sa); +} + +:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=7 & BH=0 & XOP_1_10=597 & BIT_0=0 + & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 & DYN_D5 & DYN_D6 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + loadRegister(D,ea); + loadRegister(DYN_D1,ea); + loadRegister(DYN_D2,ea); + loadRegister(DYN_D3,ea); + loadRegister(DYN_D4,ea); + loadRegister(DYN_D5,ea); + loadRegister(DYN_D6,ea); +} + +:lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=7 & BH & XOP_1_10=597 & BIT_0=0 + & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 & DYN_D5 & DYN_D6 & DYN_D7 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + loadRegister(D,ea); + loadRegister(DYN_D1,ea); + loadRegister(DYN_D2,ea); + loadRegister(DYN_D3,ea); + loadRegister(DYN_D4,ea); + loadRegister(DYN_D5,ea); + loadRegister(DYN_D6,ea); + sa:1 = BH; + loadRegisterPartial(DYN_D7,ea,sa); +} diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/old/oldPPC.lang b/src/third-party/sleigh/processors/PowerPC/data/languages/old/oldPPC.lang new file mode 100644 index 00000000..0e242f5b --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/old/oldPPC.lang @@ -0,0 +1,1196 @@ + + + + PowerPC:BE:32:DEPRECATED + PowerPC + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/old/oldPPC.trans b/src/third-party/sleigh/processors/PowerPC/data/languages/old/oldPPC.trans new file mode 100644 index 00000000..af53127d --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/old/oldPPC.trans @@ -0,0 +1,7 @@ + + + Sleigh-PowerPC 32-bit + PowerPC:BE:32:default + + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc.dwarf b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc.dwarf new file mode 100644 index 00000000..9bff0cce --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc.dwarf @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc.ldefs b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc.ldefs new file mode 100644 index 00000000..a829a66c --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc.ldefs @@ -0,0 +1,303 @@ + + + + + PowerPC 32-bit big endian w/Altivec, G2 + + + + + + + + PowerPC 32-bit little endian w/Altivec, G2 + + + + + + + + PowerPC 64-bit big endian w/Altivec, G2 + + + + + + + + PowerPC 64-bit big endian w/Altivec and 32 bit addressing, G2 + + + + + + + + PowerPC 64-bit little endian w/Altivec and 32 bit addressing, G2 + + + + + + + + + PowerPC 64-bit little endian w/Altivec, G2 + + + + + + + PowerPC 4xx 32-bit big endian embedded core + + + + + + + PowerPC 4xx 32-bit little endian embedded core + + + + + + + + Freescale MPC8280 32-bit big endian family (PowerQUICC-III) + + + + + + + PowerQUICC-III 32-bit big endian family + + + + + + + PowerQUICC-III 32-bit little endian family + + + + + + + + Power ISA 3.0 Big Endian w/EVX and 32-bit Addressing + + + + + + + + Power ISA 3.0 Little Endian w/EVX and 32-bit Addressing + + + + + + + + + Power ISA 3.0 Big Endian w/Altivec and 32-bit Addressing + + + + + + + + Power ISA 3.0 Little Endian w/Altivec and 32-bit Addressing + + + + + + + + + Power ISA 3.0 Big Endian w/Altivec + + + + + + + Power ISA 3.0 Little Endian w/Altivec + + + + + + + Power ISA 3.0 Big Endian w/VLE, EVX and 32-bit Addressing + + + + + + + Power ISA 3.0 Big Endian w/VLE, Altivec and 32-bit Addressing + + + + + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32.pspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32.pspec new file mode 100644 index 00000000..8c9194eb --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32.pspec @@ -0,0 +1,1223 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_4xx_be.slaspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_4xx_be.slaspec new file mode 100644 index 00000000..1a370bee --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_4xx_be.slaspec @@ -0,0 +1,13 @@ +# SLA specification file for IBM PowerPC 4xx series core + +@define ENDIAN "big" + +@define REGISTER_SIZE "4" + +@define EATRUNC "ea" + +@define CTR_OFFSET "32" + +@include "ppc_common.sinc" +@include "4xx.sinc" + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_4xx_le.slaspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_4xx_le.slaspec new file mode 100644 index 00000000..91e7a934 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_4xx_le.slaspec @@ -0,0 +1,13 @@ +# SLA specification file for IBM PowerPC 4xx series core + +@define ENDIAN "little" + +@define REGISTER_SIZE "4" + +@define EATRUNC "ea" + +@define CTR_OFFSET "32" + +@include "ppc_common.sinc" +@include "4xx.sinc" + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_be.cspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_be.cspec new file mode 100644 index 00000000..7e90bd53 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_be.cspec @@ -0,0 +1,122 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_be.slaspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_be.slaspec new file mode 100644 index 00000000..046f729e --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_be.slaspec @@ -0,0 +1,14 @@ +# SLA specification file for PowerPC 32-bit big endian + +@define ENDIAN "big" + +@define REGISTER_SIZE "4" + +@define EATRUNC "ea" + +@define CTR_OFFSET "32" + +@include "ppc_common.sinc" +@include "altivec.sinc" +@include "g2.sinc" + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_be_Mac.cspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_be_Mac.cspec new file mode 100644 index 00000000..4029a5b6 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_be_Mac.cspec @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + * (r1 + 0x14) = r2; + + + + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_le.cspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_le.cspec new file mode 100644 index 00000000..d1699dc0 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_le.cspec @@ -0,0 +1,121 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_le.slaspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_le.slaspec new file mode 100644 index 00000000..4cc253c0 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_le.slaspec @@ -0,0 +1,11 @@ +# SLA specification file for PowerPC 32-bit little endian + +@define ENDIAN "little" + +@define REGISTER_SIZE "4" + +@define EATRUNC "ea" + +@include "ppc_common.sinc" +@include "altivec.sinc" +@include "g2.sinc" diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_mpc8270.pspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_mpc8270.pspec new file mode 100644 index 00000000..95fdbf33 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_mpc8270.pspec @@ -0,0 +1,1156 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_quicciii_be.slaspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_quicciii_be.slaspec new file mode 100644 index 00000000..d1636cf4 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_quicciii_be.slaspec @@ -0,0 +1,14 @@ +# SLA specification file for IBM PowerPC 4xx series core + +@define ENDIAN "big" + +@define REGISTER_SIZE "4" + +@define EATRUNC "ea" + +@define CTR_OFFSET "32" + +@include "ppc_common.sinc" +@include "quicciii.sinc" +@include "evx.sinc" + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_quicciii_le.slaspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_quicciii_le.slaspec new file mode 100644 index 00000000..5f09364d --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_32_quicciii_le.slaspec @@ -0,0 +1,14 @@ +# SLA specification file for IBM PowerPC 4xx series core + +@define ENDIAN "little" + +@define REGISTER_SIZE "4" + +@define EATRUNC "ea" + +@define CTR_OFFSET "32" + +@include "ppc_common.sinc" +@include "quicciii.sinc" +@include "evx.sinc" + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64.cspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64.cspec new file mode 100644 index 00000000..7220be77 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64.cspec @@ -0,0 +1,124 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + # Inject pcode when returning from a function call to place the r2Save + # value into 0x28(r1) which should be restored by the "ld r2,0x28(r1)" + # which immediately follows calls which comply with the PPC64 ABI spec. + local saveR2ptr = r1 + 0x28; + *:8 saveR2ptr = r2Save; + + + + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64.pspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64.pspec new file mode 100644 index 00000000..028581a0 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64.pspec @@ -0,0 +1,1220 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_32.cspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_32.cspec new file mode 100644 index 00000000..109b980e --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_32.cspec @@ -0,0 +1,129 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_be.slaspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_be.slaspec new file mode 100644 index 00000000..6cc821a3 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_be.slaspec @@ -0,0 +1,11 @@ +# SLA specification file for PowerPC 64-bit big endian + +@define ENDIAN "big" + +@define REGISTER_SIZE "8" +@define BIT_64 "64" +@define EATRUNC "ea:4" + +@include "ppc_common.sinc" +@include "altivec.sinc" +@include "g2.sinc" diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_be_Mac.cspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_be_Mac.cspec new file mode 100644 index 00000000..d36edb1f --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_be_Mac.cspec @@ -0,0 +1,109 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_isa_altivec_be.slaspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_isa_altivec_be.slaspec new file mode 100644 index 00000000..7e2c7dfe --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_isa_altivec_be.slaspec @@ -0,0 +1,33 @@ +# SLA specification file for Power ISA Version 2.06 Revision B (July 23, 2010) +# ISA (Instruction Set Architecture) a trademarked name for PowerPC specifications from IBM. + +@define ENDIAN "big" + +@define IS_ISA "1" +@define NoLegacyIntegerMultiplyAccumulate "1" + +@define REGISTER_SIZE "8" +@define BIT_64 "64" + +@define EATRUNC "ea" + +@define CTR_OFFSET "32" + +@include "ppc_common.sinc" +@include "ppc_isa.sinc" + +@include "ppc_a2.sinc" +@include "quicciii.sinc" +@include "FPRC.sinc" + +# A given processor can be compliant with the PowerISA spec by including EITHER +# the embedded vector instructions (EVX) OR the AltiVec instructions +# However, these instruction sets overlap in their bit patterns, so Sleigh cannot support +# both at the same time. We have two language variants for PowerISA +# that specify which of these two vector specs is supported. +#@include "evx.sinc" +#@include "SPEF_SCR.sinc" +#@include "SPE_EFSD.sinc" +#@include "SPE_EFV.sinc" +## OR +@include "altivec.sinc" diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_isa_altivec_le.slaspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_isa_altivec_le.slaspec new file mode 100644 index 00000000..4bb1fe40 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_isa_altivec_le.slaspec @@ -0,0 +1,34 @@ +# SLA specification file for Power ISA Version 2.06 Revision B (July 23, 2010) +# ISA (Instruction Set Architecture) a trademarked name for PowerPC specifications from IBM. + +@define ENDIAN "little" + +@define IS_ISA "1" +@define NoLegacyIntegerMultiplyAccumulate "1" + +@define REGISTER_SIZE "8" +@define BIT_64 "64" + +@define EATRUNC "ea" + +@define CTR_OFFSET "32" + +@include "ppc_common.sinc" +@include "ppc_isa.sinc" + +@include "ppc_a2.sinc" +@include "quicciii.sinc" +@include "FPRC.sinc" + +# A given processor can be compliant with the PowerISA spec by including EITHER +# the embedded vector instructions (EVX) OR the AltiVec instructions +# However, these instruction sets overlap in their bit patterns, so Sleigh cannot support +# both at the same time. We have two language variants for PowerISA +# that specify which of these two vector specs is supported. +#@include "evx.sinc" +#@include "SPEF_SCR.sinc" +#@include "SPE_EFSD.sinc" +#@include "SPE_EFV.sinc" +# OR +@include "altivec.sinc" + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_isa_altivec_vle_be.slaspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_isa_altivec_vle_be.slaspec new file mode 100644 index 00000000..9a39b197 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_isa_altivec_vle_be.slaspec @@ -0,0 +1,35 @@ +# SLA specification file for Power ISA Version 2.06 Revision B (July 23, 2010) +# ISA (Instruction Set Architecture) a trademarked name for PowerPC specifications from IBM. + +@define ENDIAN "big" + +@define IS_ISA "1" +@define NoLegacyIntegerMultiplyAccumulate "1" + +@define REGISTER_SIZE "8" +@define BIT_64 "64" + +@define EATRUNC "ea" + +@define CTR_OFFSET "32" + +@include "ppc_common.sinc" +@include "ppc_isa.sinc" + +@include "ppc_a2.sinc" +@include "quicciii.sinc" +@include "FPRC.sinc" + +@include "ppc_vle.sinc" + +# A given processor can be compliant with the PowerISA spec by including EITHER +# the embedded vector instructions (EVX) OR the AltiVec instructions +# However, these instruction sets overlap in their bit patterns, so Sleigh cannot support +# both at the same time. We have two language variants for PowerISA +# that specify which of these two vector specs is supported. +#@include "evx.sinc" +#@include "SPEF_SCR.sinc" +#@include "SPE_EFSD.sinc" +#@include "SPE_EFV.sinc" +## OR +@include "altivec.sinc" diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_isa_be.slaspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_isa_be.slaspec new file mode 100644 index 00000000..8be67141 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_isa_be.slaspec @@ -0,0 +1,32 @@ +# SLA specification file for Power ISA Version 2.06 Revision B (July 23, 2010) +# ISA (Instruction Set Architecture) a trademarked name for PowerPC specifications from IBM. + +@define ENDIAN "big" + +@define IS_ISA "1" +@define NoLegacyIntegerMultiplyAccumulate "1" + +@define REGISTER_SIZE "8" +@define BIT_64 "64" + +@define EATRUNC "ea" + +@define CTR_OFFSET "32" + +@include "ppc_common.sinc" +@include "ppc_isa.sinc" + +@include "quicciii.sinc" +@include "FPRC.sinc" + +# A given processor can be compliant with the PowerISA spec by including EITHER +# the embedded vector instructions (EVX) OR the AltiVec instructions +# However, these instruction sets overlap in their bit patterns, so Sleigh cannot support +# both at the same time. We have two language variants for PowerISA +# that specify which of these two vector specs is supported. +@include "evx.sinc" +@include "SPEF_SCR.sinc" +@include "SPE_EFSD.sinc" +@include "SPE_EFV.sinc" +# OR +#@include "altivec.sinc" diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_isa_le.slaspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_isa_le.slaspec new file mode 100644 index 00000000..ec7f1f50 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_isa_le.slaspec @@ -0,0 +1,33 @@ +# SLA specification file for Power ISA Version 2.06 Revision B (July 23, 2010) +# ISA (Instruction Set Architecture) a trademarked name for PowerPC specifications from IBM. + +@define ENDIAN "little" + +@define IS_ISA "1" +@define NoLegacyIntegerMultiplyAccumulate "1" + +@define REGISTER_SIZE "8" +@define BIT_64 "64" + +@define EATRUNC "ea" + +@define CTR_OFFSET "32" + +@include "ppc_common.sinc" +@include "ppc_isa.sinc" + +@include "quicciii.sinc" +@include "FPRC.sinc" + +# A given processor can be compliant with the PowerISA spec by including EITHER +# the embedded vector instructions (EVX) OR the AltiVec instructions +# However, these instruction sets overlap in their bit patterns, so Sleigh cannot support +# both at the same time. We have two language variants for PowerISA +# that specify which of these two vector specs is supported. +@include "evx.sinc" +@include "SPEF_SCR.sinc" +@include "SPE_EFSD.sinc" +@include "SPE_EFV.sinc" +# OR +#@include "altivec.sinc" + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_isa_vle_be.slaspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_isa_vle_be.slaspec new file mode 100644 index 00000000..8a81dc5f --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_isa_vle_be.slaspec @@ -0,0 +1,34 @@ +# SLA specification file for Power ISA Version 2.06 Revision B (July 23, 2010) +# ISA (Instruction Set Architecture) a trademarked name for PowerPC specifications from IBM. + +@define ENDIAN "big" + +@define IS_ISA "1" +@define NoLegacyIntegerMultiplyAccumulate "1" + +@define REGISTER_SIZE "8" +@define BIT_64 "64" + +@define EATRUNC "ea" + +@define CTR_OFFSET "32" + +@include "ppc_common.sinc" +@include "ppc_isa.sinc" + +@include "quicciii.sinc" +@include "FPRC.sinc" + +@include "ppc_vle.sinc" + +# A given processor can be compliant with the PowerISA spec by including EITHER +# the embedded vector instructions (EVX) OR the AltiVec instructions +# However, these instruction sets overlap in their bit patterns, so Sleigh cannot support +# both at the same time. We have two language variants for PowerISA +# that specify which of these two vector specs is supported. +@include "evx.sinc" +@include "SPEF_SCR.sinc" +@include "SPE_EFSD.sinc" +@include "SPE_EFV.sinc" +# OR +#@include "altivec.sinc" diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_le.slaspec b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_le.slaspec new file mode 100644 index 00000000..67e5c1ce --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_64_le.slaspec @@ -0,0 +1,11 @@ +# SLA specification file for PowerPC 64-bit little endian + +@define ENDIAN "little" + +@define REGISTER_SIZE "8" +@define BIT_64 "64" +@define EATRUNC "ea:4" + +@include "ppc_common.sinc" +@include "altivec.sinc" +@include "g2.sinc" diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_a2.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_a2.sinc new file mode 100644 index 00000000..4de97739 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_a2.sinc @@ -0,0 +1,113 @@ + +# binutils: a2.d 88: 00 00 02 00 attn +# binutils: power4_32.d 28: 00 00 02 00 attn +# binutils: power4.d +64: 00 00 02 00 attn +# binutils: power6.d 54: 00 00 02 00 attn +# "attn", X(0,256), X_MASK, POWER4|PPCA2, PPC476, {0} +define pcodeop attnOp; +:attn is $(NOTVLE) & OP=0 & XOP_1_10=256 & BITS_11_25=0 { attnOp(); } + +# binutils: a2.d 214: 7d 4b 01 a6 eratwe r10,r11,0 +# binutils: a2.d 218: 7d 4b 19 a6 eratwe r10,r11,3 +# {"eratwe", X(31,211), X_MASK, PPCA2, PPCNONE, {RS, RA, WS}}, +# WS=> { 0x7, 11, NULL, NULL, 0 }, +define pcodeop eratweOp; +:eratwe S,A is $(NOTVLE) & OP=31 & XOP_1_10=211 & S & A & BITS_11_13 & BITS_14_15 & BIT_0=0 { eratweOp(S,A); } + +# binutils: a2.d 200: 7d 4b 66 66 erativax r10,r11,r12 +# "erativax", X(31,819), X_MASK, PPCA2, PPCNONE, {RS, RA0, RB} +define pcodeop erativaxOp; +:erativax S,A,B is $(NOTVLE) & OP=31 & XOP_1_10=819 & S & A & B { erativaxOp(S,A,B); } + +# binutils: a2.d 1f4: 7c 0a 58 66 eratilx 0,r10,r11 +# binutils: a2.d 1f8: 7c 2a 58 66 eratilx 1,r10,r11 +# binutils: a2.d 1fc: 7c ea 58 66 eratilx 7,r10,r11 +# "eratilx", X(31,51), X_MASK, PPCA2, PPCNONE, {ERAT_T, RA, RB} +define pcodeop eratilxOp; +:eratilx BITS_21_23,A,B is $(NOTVLE) & OP=31 & XOP_1_10=51 & BITS_21_23 & A & B { eratilxOp(A,B); } + +# binutils: a2.d 210: 7d 4b 61 26 eratsx r10,r11,r12 +# "eratsx", XRC(31,147,0), X_MASK, PPCA2, PPCNONE, {RT, RA0, RB} +define pcodeop eratsxOp; +:eratsx TH,A,B is $(NOTVLE) & OP=31 & XOP_1_10=147 & Rc=0 & TH & A & B { eratsxOp(TH,A,B); } + +# binutils: a2.d 20c: 7d 4b 61 27 eratsx\. r10,r11,r12 +# "eratsx.", XRC(31,147,1), X_MASK, PPCA2, PPCNONE, {RT, RA0, RB} +define pcodeop eratsxXOp; +:eratsx. TH,A,B is $(NOTVLE) & OP=31 & XOP_1_10=147 & Rc=1 & TH & A & B { eratsxXOp(TH,A,B); } + +# "eratre", X(31,179), # binutils: a2.d 204: 7d 4b 01 66 eratre r10,r11,0 +# binutils: a2.d 208: 7d 4b 19 66 eratre r10,r11,3 +define pcodeop eratreOp; +:eratre TH,A,BITS_11_13 is $(NOTVLE) & OP=31 & XOP_1_10=179 & TH & A & BITS_11_13 { eratreOp(TH,A); } + +# binutils: a2.d 3e0: 7d 4b 63 2c icswx r10,r11,r12 +# "icswx", XRC(31,406,0), X_MASK, POWER7|PPCA2, PPCNONE, {RS, RA, RB} +define pcodeop icswxOp; +:icswx S,A,B is $(NOTVLE) & OP=31 & XOP_1_10=406 & Rc=0 & S & A & B { icswxOp(S,A,B); } + +# binutils: a2.d 3dc: 7d 4b 63 2d icswx\. r10,r11,r12 +# "icswx.", XRC(31,406,1), X_MASK, POWER7|PPCA2, PPCNONE, {RS, RA, RB} +define pcodeop icswxDotOp; +:icswx. S,A,B is $(NOTVLE) & OP=31 & XOP_1_10=406 & Rc=1 & S & A & B { icswxDotOp(S,A,B); } + +# binutils: 476.d 49c: 7c 85 02 06 mfdcrx r4,r5 +# binutils: a2.d 520: 7d 4b 02 06 mfdcrx r10,r11 +# binutils: booke.d 28: 7c 85 02 06 mfdcrx r4,r5 +# binutils: booke_xcoff.d 24: 7c 85 02 06 mfdcrx r4,r5 +# "mfdcrx", X(31,259), X_MASK, BOOKE|PPCA2|PPC476, TITAN, {S, A} +define pcodeop mfdcrxOp; +# :mfdcrx S,A is $(NOTVLE) & OP=31 & XOP_1_10=259 & S & A & BITS_11_15=0 { mfdcrxOp(S,A); } + +# binutils: a2.d 51c: 7d 4b 02 07 mfdcrx\. r10,r11 +# "mfdcrx", X(31,259), X_MASK, BOOKE|PPCA2|PPC476, TITAN, {RS, RA} +define pcodeop mfdcrxDotOp; +:mfdcrx. S,A is $(NOTVLE) & OP=31 & XOP_1_10=259 & Rc=1 & S & A & BITS_11_15=0 { mfdcrxDotOp(S,A); } + +# binutils: a2.d 564: 7d 6a 03 07 mtdcrx\. r10,r11 +define pcodeop mtdcrxDotOp; +:mtdcrx. A,S is $(NOTVLE) & OP=31 & XOP_1_10=387 & A & S & Rc=1 { mtdcrxDotOp(A,S); } + +# binutils: a2.d 884: 7c 00 01 6c wchkall +# binutils: a2.d 888: 7c 00 01 6c wchkall +# binutils: a2.d 88c: 7d 80 01 6c wchkall cr3 +# "wchkall", X(31,182), X_MASK, PPCA2, PPCNONE, {OBF} +define pcodeop wchkallOp; +:wchkall BITS_23_25 is $(NOTVLE) & OP=31 & XOP_1_10=182 & BITS_23_25 { wchkallOp(); } + +# binutils: a2.d 894: 7c 20 07 4c wclrall 1 +# "wclrall", X(31,934), XRARB_MASK, PPCA2, PPCNONE, {L} +define pcodeop wclrallOp; +:wclrall L is $(NOTVLE) & OP=31 & XOP_1_10=934 & L { wclrallOp(); } + +# binutils: a2.d 890: 7c 2a 5f 4c wclr 1,r10,r11 +# "wclr", X(31,934), X_MASK, PPCA2, PPCNONE, {L, RA0, RB} +define pcodeop wclrOp; +# :wclr L,A,B is $(NOTVLE) & OP=31 & XOP_1_10=934 & L & A & B { wclrOp(); } + +@ifdef IS_ISA +# binutils: 476.d 474: 7c 00 06 ac mbar +# binutils: 476.d 47c: 7c 20 06 ac mbar 1 +# "mbar", X(31,854), X_MASK, BOOKE|PPCA2|PPC476, PPCNONE, {MO} +define pcodeop mbarOp; +:mbar MO is OP=31 & XOP_1_10=854 & MO { mbarOp(); } +@endif + +# binutils: a2.d: 514: 7d 4a 3a 87 mfdcr\. r10,234 +:mfdcr. D, DCRN is $(NOTVLE) & OP=31 & D & DCRN & XOP_1_10=323 & BIT_0=1 +{ + D = DCRN; +} + +# binutils: a2.d: 55c: 7d 4a 3b 87 mtdcr\. 234,r10 +:mtdcr. DCRN, D is $(NOTVLE) & OP=31 & D & DCRN & XOP_1_10=451 & BIT_0=1 +{ + DCRN = D; +} + +# binutils: a2.d: 188: 7d 4b 61 fe dcbtstep r10,r11,r12 +# binutils: e500mc.d: a0: 7c 64 29 fe dcbtstep r3,r4,r5 +define pcodeop DataCacheBlockTouchForStoreByExternalPID; +:dcbtstep TH,A,B is OP=31 & TH & A & B & XOP_1_10=255 & BIT_0=0 { + DataCacheBlockTouchForStoreByExternalPID(TH,A,B); +} diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_common.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_common.sinc new file mode 100644 index 00000000..0a251411 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_common.sinc @@ -0,0 +1,1995 @@ +# PowerPC assembly SLA spec (size agnostic) + +# version 1.0 + +define endian=$(ENDIAN); + +define alignment=2; + +# -size: How many bytes make up an address +define space ram type=ram_space size=$(REGISTER_SIZE) default; + +# -size: How many bytes do we need for register addressing +define space register type=register_space size=4; + +# General registers (some pcode that follows depends on these registers being at +# offset 0 + +define register offset=0 size=$(REGISTER_SIZE) [ + r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 + r16 r17 r18 r19 r20 r21 r22 r23 r24 r25 r26 r27 r28 r29 r30 r31 ]; + +# XER flags +define register offset=0x400 size=1 [ xer_so xer_ov xer_ov32 xer_ca xer_ca32 xer_count ]; + +define register offset=0x500 size=1 [ fp_fx fp_fex fp_vx fp_ox + fp_ux fp_zx fp_xx fp_vxsnan + fp_vxisi fp_vxidi fp_vxzdz fp_vximz + fp_vxvc fp_fr fp_fi fp_c + fp_cc0 fp_cc1 fp_cc2 fp_cc3 + fp_reserve1 fp_vxsoft fp_vxsqrt fp_vxcvi + fp_ve fp_oe fp_ue fp_ze + fp_xe fp_ni fp_rn0 fp_rn1 ]; + +define register offset = 0x700 size =$(REGISTER_SIZE) [MSR]; +define register offset = 0x720 size=$(REGISTER_SIZE) [RESERVE_ADDRESS]; +define register offset = 0x728 size=1 [RESERVE]; +define register offset = 0x730 size=1 [RESERVE_LENGTH]; + +# Program Counter register: This register is not actually visible in the +# API for powerpc but it is needed to create a consistent model for the debugger +define register offset=0x780 size=$(REGISTER_SIZE) pc; + +@define SEG_REGISTER_BASE "0x800" +# Segment Registers +define register offset=$(SEG_REGISTER_BASE) size=4 [ sr0 sr1 sr2 sr3 sr4 sr5 sr6 sr7 sr8 sr9 sr10 sr11 sr12 sr13 sr14 sr15 ]; + +# Condition register flags +define register offset=0x900 size=1 [ cr0 cr1 cr2 cr3 cr4 cr5 cr6 cr7 ]; +define register offset=0x900 size=8 [ crall ]; + +define register offset=0x980 size=$(REGISTER_SIZE) [ tea ]; + +# Fake storage used to help preserve r2 across function calls within the decompiler (see appropriate cspec) +define register offset=0x988 size=$(REGISTER_SIZE) [ r2Save ]; + +# Special Purpose Registers are defined with generic names with the exception of XER, LR, CTR, SRR0, SRR1, TBL(r/w), TBU(r/w) +# These names may be replaced within register_data section within a PPC variant's pspec file +define register offset=0x1000 size=$(REGISTER_SIZE) + [ spr000 XER spr002 spr003 spr004 spr005 spr006 spr007 LR CTR spr00a spr00b spr00c spr00d spr00e spr00f + spr010 spr011 spr012 spr013 spr014 spr015 spr016 spr017 spr018 spr019 SRR0 SRR1 spr01c spr01d spr01e spr01f + spr020 spr021 spr022 spr023 spr024 spr025 spr026 spr027 spr028 spr029 spr02a spr02b spr02c spr02d spr02e spr02f + spr030 spr031 spr032 spr033 spr034 spr035 spr036 spr037 spr038 spr039 CSRR0 CSRR1 spr03c spr03d spr03e spr03f + spr040 spr041 spr042 spr043 spr044 spr045 spr046 spr047 spr048 spr049 spr04a spr04b spr04c spr04d spr04e spr04f + spr050 spr051 spr052 spr053 spr054 spr055 spr056 spr057 spr058 spr059 spr05a spr05b spr05c spr05d spr05e spr05f + spr060 spr061 spr062 spr063 spr064 spr065 spr066 spr067 spr068 spr069 spr06a spr06b spr06c spr06d spr06e spr06f + spr070 spr071 spr072 spr073 spr074 spr075 spr076 spr077 spr078 spr079 spr07a spr07b spr07c spr07d spr07e spr07f + spr080 spr081 spr082 spr083 spr084 spr085 spr086 spr087 spr088 spr089 spr08a spr08b spr08c spr08d spr08e spr08f + spr090 spr091 spr092 spr093 spr094 spr095 spr096 spr097 spr098 spr099 spr09a spr09b spr09c spr09d spr09e spr09f + spr0a0 spr0a1 spr0a2 spr0a3 spr0a4 spr0a5 spr0a6 spr0a7 spr0a8 spr0a9 spr0aa spr0ab spr0ac spr0ad spr0ae spr0af + spr0b0 spr0b1 spr0b2 spr0b3 spr0b4 spr0b5 spr0b6 spr0b7 spr0b8 spr0b9 spr0ba spr0bb spr0bc spr0bd spr0be spr0bf + spr0c0 spr0c1 spr0c2 spr0c3 spr0c4 spr0c5 spr0c6 spr0c7 spr0c8 spr0c9 spr0ca spr0cb spr0cc spr0cd spr0ce spr0cf + spr0d0 spr0d1 spr0d2 spr0d3 spr0d4 spr0d5 spr0d6 spr0d7 spr0d8 spr0d9 spr0da spr0db spr0dc spr0dd spr0de spr0df + spr0e0 spr0e1 spr0e2 spr0e3 spr0e4 spr0e5 spr0e6 spr0e7 spr0e8 spr0e9 spr0ea spr0eb spr0ec spr0ed spr0ee spr0ef + spr0f0 spr0f1 spr0f2 spr0f3 spr0f4 spr0f5 spr0f6 spr0f7 spr0f8 spr0f9 spr0fa spr0fb spr0fc spr0fd spr0fe spr0ff + spr100 spr101 spr102 spr103 spr104 spr105 spr106 spr107 spr108 spr109 spr10a spr10b TBLr TBUr spr10e spr10f + spr110 spr111 spr112 spr113 spr114 spr115 spr116 spr117 spr118 spr119 spr11a spr11b TBLw TBUw spr11e spr11f + spr120 spr121 spr122 spr123 spr124 spr125 spr126 spr127 spr128 spr129 spr12a spr12b spr12c spr12d spr12e spr12f + spr130 spr131 spr132 spr133 spr134 spr135 spr136 spr137 spr138 spr139 spr13a spr13b spr13c spr13d spr13e spr13f + spr140 spr141 spr142 spr143 spr144 spr145 spr146 spr147 spr148 spr149 spr14a spr14b spr14c spr14d spr14e spr14f + spr150 spr151 spr152 spr153 spr154 spr155 spr156 spr157 spr158 spr159 spr15a spr15b spr15c spr15d spr15e spr15f + spr160 spr161 spr162 spr163 spr164 spr165 spr166 spr167 spr168 spr169 spr16a spr16b spr16c spr16d spr16e spr16f + spr170 spr171 spr172 spr173 spr174 spr175 spr176 spr177 spr178 spr179 spr17a spr17b spr17c spr17d spr17e spr17f + spr180 spr181 spr182 spr183 spr184 spr185 spr186 spr187 spr188 spr189 spr18a spr18b spr18c spr18d spr18e spr18f + spr190 spr191 spr192 spr193 spr194 spr195 spr196 spr197 spr198 spr199 spr19a spr19b spr19c spr19d spr19e spr19f + spr1a0 spr1a1 spr1a2 spr1a3 spr1a4 spr1a5 spr1a6 spr1a7 spr1a8 spr1a9 spr1aa spr1ab spr1ac spr1ad spr1ae spr1af + spr1b0 spr1b1 spr1b2 spr1b3 spr1b4 spr1b5 spr1b6 spr1b7 spr1b8 spr1b9 spr1ba spr1bb spr1bc spr1bd spr1be spr1bf + spr1c0 spr1c1 spr1c2 spr1c3 spr1c4 spr1c5 spr1c6 spr1c7 spr1c8 spr1c9 spr1ca spr1cb spr1cc spr1cd spr1ce spr1cf + spr1d0 spr1d1 spr1d2 spr1d3 spr1d4 spr1d5 spr1d6 spr1d7 spr1d8 spr1d9 spr1da spr1db spr1dc spr1dd spr1de spr1df + spr1e0 spr1e1 spr1e2 spr1e3 spr1e4 spr1e5 spr1e6 spr1e7 spr1e8 spr1e9 spr1ea spr1eb spr1ec spr1ed spr1ee spr1ef + spr1f0 spr1f1 spr1f2 spr1f3 spr1f4 spr1f5 spr1f6 spr1f7 spr1f8 spr1f9 spr1fa spr1fb spr1fc spr1fd spr1fe spr1ff + spr200 spr201 spr202 spr203 spr204 spr205 spr206 spr207 spr208 spr209 spr20a spr20b spr20c spr20d spr20e spr20f + spr210 spr211 spr212 spr213 spr214 spr215 spr216 spr217 spr218 spr219 spr21a spr21b spr21c spr21d spr21e spr21f + spr220 spr221 spr222 spr223 spr224 spr225 spr226 spr227 spr228 spr229 spr22a spr22b spr22c spr22d spr22e spr22f + spr230 spr231 spr232 spr233 spr234 spr235 spr236 spr237 spr238 spr239 spr23a spr23b spr23c spr23d spr23e spr23f + spr240 spr241 spr242 spr243 spr244 spr245 spr246 spr247 spr248 spr249 spr24a spr24b spr24c spr24d spr24e spr24f + spr250 spr251 spr252 spr253 spr254 spr255 spr256 spr257 spr258 spr259 spr25a spr25b spr25c spr25d spr25e spr25f + spr260 spr261 spr262 spr263 spr264 spr265 spr266 spr267 spr268 spr269 spr26a spr26b spr26c spr26d spr26e spr26f + spr270 spr271 spr272 spr273 spr274 spr275 spr276 spr277 spr278 spr279 spr27a spr27b spr27c spr27d spr27e spr27f + spr280 spr281 spr282 spr283 spr284 spr285 spr286 spr287 spr288 spr289 spr28a spr28b spr28c spr28d spr28e spr28f + spr290 spr291 spr292 spr293 spr294 spr295 spr296 spr297 spr298 spr299 spr29a spr29b spr29c spr29d spr29e spr29f + spr2a0 spr2a1 spr2a2 spr2a3 spr2a4 spr2a5 spr2a6 spr2a7 spr2a8 spr2a9 spr2aa spr2ab spr2ac spr2ad spr2ae spr2af + spr2b0 spr2b1 spr2b2 spr2b3 spr2b4 spr2b5 spr2b6 spr2b7 spr2b8 spr2b9 spr2ba spr2bb spr2bc spr2bd spr2be spr2bf + spr2c0 spr2c1 spr2c2 spr2c3 spr2c4 spr2c5 spr2c6 spr2c7 spr2c8 spr2c9 spr2ca spr2cb spr2cc spr2cd spr2ce spr2cf + spr2d0 spr2d1 spr2d2 spr2d3 spr2d4 spr2d5 spr2d6 spr2d7 spr2d8 spr2d9 spr2da spr2db spr2dc spr2dd spr2de spr2df + spr2e0 spr2e1 spr2e2 spr2e3 spr2e4 spr2e5 spr2e6 spr2e7 spr2e8 spr2e9 spr2ea spr2eb spr2ec spr2ed spr2ee spr2ef + spr2f0 spr2f1 spr2f2 spr2f3 spr2f4 spr2f5 spr2f6 spr2f7 spr2f8 spr2f9 spr2fa spr2fb spr2fc spr2fd spr2fe spr2ff + spr300 spr301 spr302 spr303 spr304 spr305 spr306 spr307 spr308 spr309 spr30a spr30b spr30c spr30d spr30e spr30f + spr310 spr311 spr312 spr313 spr314 spr315 spr316 spr317 spr318 spr319 spr31a spr31b spr31c spr31d spr31e spr31f + spr320 spr321 spr322 spr323 spr324 spr325 spr326 spr327 spr328 spr329 spr32a spr32b spr32c spr32d spr32e TAR + spr330 spr331 spr332 spr333 spr334 spr335 spr336 spr337 spr338 spr339 spr33a spr33b spr33c spr33d spr33e spr33f + spr340 spr341 spr342 spr343 spr344 spr345 spr346 spr347 spr348 spr349 spr34a spr34b spr34c spr34d spr34e spr34f + spr350 spr351 spr352 spr353 spr354 spr355 spr356 spr357 spr358 spr359 spr35a spr35b spr35c spr35d spr35e spr35f + spr360 spr361 spr362 spr363 spr364 spr365 spr366 spr367 spr368 spr369 spr36a spr36b spr36c spr36d spr36e spr36f + spr370 spr371 spr372 spr373 spr374 spr375 spr376 spr377 spr378 spr379 spr37a spr37b spr37c spr37d spr37e spr37f + spr380 spr381 spr382 spr383 spr384 spr385 spr386 spr387 spr388 spr389 spr38a spr38b spr38c spr38d spr38e spr38f + spr390 spr391 spr392 spr393 spr394 spr395 spr396 spr397 spr398 spr399 spr39a spr39b spr39c spr39d spr39e spr39f + spr3a0 spr3a1 spr3a2 spr3a3 spr3a4 spr3a5 spr3a6 spr3a7 spr3a8 spr3a9 spr3aa spr3ab spr3ac spr3ad spr3ae spr3af + spr3b0 spr3b1 spr3b2 spr3b3 spr3b4 spr3b5 spr3b6 spr3b7 spr3b8 spr3b9 spr3ba spr3bb spr3bc spr3bd spr3be spr3bf + spr3c0 spr3c1 spr3c2 spr3c3 spr3c4 spr3c5 spr3c6 spr3c7 spr3c8 spr3c9 spr3ca spr3cb spr3cc spr3cd spr3ce spr3cf + spr3d0 spr3d1 spr3d2 spr3d3 spr3d4 spr3d5 spr3d6 spr3d7 spr3d8 spr3d9 spr3da spr3db spr3dc spr3dd spr3de spr3df + spr3e0 spr3e1 spr3e2 spr3e3 spr3e4 spr3e5 spr3e6 spr3e7 spr3e8 spr3e9 spr3ea spr3eb spr3ec spr3ed spr3ee spr3ef + spr3f0 spr3f1 spr3f2 spr3f3 spr3f4 spr3f5 spr3f6 spr3f7 spr3f8 spr3f9 spr3fa spr3fb spr3fc spr3fd spr3fe spr3ff + ]; + + +# The floating point registers and the altivec vector registers OVERLAP to VSX registers +# This was not done correctly before and has now been fixed. Book 1, Chapter 7.2 has a +# very good diagram. + +# Support for Vector-Scalar Extension - i.e "VSX" + +define register offset=0x4000 size=16 + [ vs0 vs1 vs2 vs3 vs4 vs5 vs6 vs7 vs8 vs9 vs10 vs11 vs12 vs13 vs14 vs15 + vs16 vs17 vs18 vs19 vs20 vs21 vs22 vs23 vs24 vs25 vs26 vs27 vs28 vs29 vs30 vs31 + vs32 vs33 vs34 vs35 vs36 vs37 vs38 vs39 vs40 vs41 vs42 vs43 vs44 vs45 vs46 vs47 + vs48 vs49 vs50 vs51 vs52 vs53 vs54 vs55 vs56 vs57 vs58 vs59 vs60 vs61 vs62 vs63 + ]; +# Floating point registers +# These overlay the first 32 vsx regs with the gaps as indicated so fr0 is in vs0, fr1 is in vs1, etc. +# This also means we have to have 2 defs of this due to endian stuff. +@if ENDIAN == "big" +define register offset=0x4000 size=8 [ + f0 _ f1 _ f2 _ f3 _ f4 _ f5 _ f6 _ f7 _ f8 _ f9 _ f10 _ f11 _ f12 _ f13 _ f14 _ f15 _ + f16 _ f17 _ f18 _ f19 _ f20 _ f21 _ f22 _ f23 _ f24 _ f25 _ f26 _ f27 _ f28 _ f29 _ f30 _ f31 _ ]; + +@else +define register offset=0x4000 size=8 [ + _ f0 _ f1 _ f2 _ f3 _ f4 _ f5 _ f6 _ f7 _ f8 _ f9 _ f10 _ f11 _ f12 _ f13 _ f14 _ f15 + _ f16 _ f17 _ f18 _ f19 _ f20 _ f21 _ f22 _ f23 _ f24 _ f25 _ f26 _ f27 _ f28 _ f29 _ f30 _ f31 ]; +@endif + +# All the altivec regs need to start at offset 0x4200 +# Sleigh does not allow registers of the same size to overlay. This presents some issues as the normal +# Altivec registers overlay the top 32 VSX registers. What we have to do is use a sub-table to display +# the Altivec name, but export the matching VSX register. The original vrD, etc. tokens are now sub-tables. + +# Altivec vector registers (accessed by vrD vrA vrB vrS vrC) +# Altivec vector registers +#define register offset=0x4200 size=16 [ +# v0 v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 +# v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 ]; + +@if ENDIAN == "big" + +# Create psydo sub-registers for the Altivec vector registers to allow better easier vector instructions by 8 byte subregisters +define register offset=0x4200 size=8 [ # 64 bit access to vrN registers (psydo-registers) (accessed by vrD_64_N vrA_64_N vrB_64_N vrS_64_N vrC_64_N) +vr0_64_0 vr0_64_1 +vr1_64_0 vr1_64_1 +vr2_64_0 vr2_64_1 +vr3_64_0 vr3_64_1 +vr4_64_0 vr4_64_1 +vr5_64_0 vr5_64_1 +vr6_64_0 vr6_64_1 +vr7_64_0 vr7_64_1 +vr8_64_0 vr8_64_1 +vr9_64_0 vr9_64_1 +vr10_64_0 vr10_64_1 +vr11_64_0 vr11_64_1 +vr12_64_0 vr12_64_1 +vr13_64_0 vr13_64_1 +vr14_64_0 vr14_64_1 +vr15_64_0 vr15_64_1 +vr16_64_0 vr16_64_1 +vr17_64_0 vr17_64_1 +vr18_64_0 vr18_64_1 +vr19_64_0 vr19_64_1 +vr20_64_0 vr20_64_1 +vr21_64_0 vr21_64_1 +vr22_64_0 vr22_64_1 +vr23_64_0 vr23_64_1 +vr24_64_0 vr24_64_1 +vr25_64_0 vr25_64_1 +vr26_64_0 vr26_64_1 +vr27_64_0 vr27_64_1 +vr28_64_0 vr28_64_1 +vr29_64_0 vr29_64_1 +vr30_64_0 vr30_64_1 +vr31_64_0 vr31_64_1 +]; + +# Create psydo sub-registers for the Altivec vector registers to allow better easier vector instructions by 4 byte subregisters +define register offset=0x4200 size=4 [ # 32 bit access to vrN registers (psydo-registers) (accessed by vrD_32_N vrA_32_N vrB_32_N vrS_32_N vrC_32_N) +vr0_32_0 vr0_32_1 vr0_32_2 vr0_32_3 +vr1_32_0 vr1_32_1 vr1_32_2 vr1_32_3 +vr2_32_0 vr2_32_1 vr2_32_2 vr2_32_3 +vr3_32_0 vr3_32_1 vr3_32_2 vr3_32_3 +vr4_32_0 vr4_32_1 vr4_32_2 vr4_32_3 +vr5_32_0 vr5_32_1 vr5_32_2 vr5_32_3 +vr6_32_0 vr6_32_1 vr6_32_2 vr6_32_3 +vr7_32_0 vr7_32_1 vr7_32_2 vr7_32_3 +vr8_32_0 vr8_32_1 vr8_32_2 vr8_32_3 +vr9_32_0 vr9_32_1 vr9_32_2 vr9_32_3 +vr10_32_0 vr10_32_1 vr10_32_2 vr10_32_3 +vr11_32_0 vr11_32_1 vr11_32_2 vr11_32_3 +vr12_32_0 vr12_32_1 vr12_32_2 vr12_32_3 +vr13_32_0 vr13_32_1 vr13_32_2 vr13_32_3 +vr14_32_0 vr14_32_1 vr14_32_2 vr14_32_3 +vr15_32_0 vr15_32_1 vr15_32_2 vr15_32_3 +vr16_32_0 vr16_32_1 vr16_32_2 vr16_32_3 +vr17_32_0 vr17_32_1 vr17_32_2 vr17_32_3 +vr18_32_0 vr18_32_1 vr18_32_2 vr18_32_3 +vr19_32_0 vr19_32_1 vr19_32_2 vr19_32_3 +vr20_32_0 vr20_32_1 vr20_32_2 vr20_32_3 +vr21_32_0 vr21_32_1 vr21_32_2 vr21_32_3 +vr22_32_0 vr22_32_1 vr22_32_2 vr22_32_3 +vr23_32_0 vr23_32_1 vr23_32_2 vr23_32_3 +vr24_32_0 vr24_32_1 vr24_32_2 vr24_32_3 +vr25_32_0 vr25_32_1 vr25_32_2 vr25_32_3 +vr26_32_0 vr26_32_1 vr26_32_2 vr26_32_3 +vr27_32_0 vr27_32_1 vr27_32_2 vr27_32_3 +vr28_32_0 vr28_32_1 vr28_32_2 vr28_32_3 +vr29_32_0 vr29_32_1 vr29_32_2 vr29_32_3 +vr30_32_0 vr30_32_1 vr30_32_2 vr30_32_3 +vr31_32_0 vr31_32_1 vr31_32_2 vr31_32_3 +]; + +# Create psydo sub-registers for the Altivec vector registers to allow better easier vector instructions by 2 byte subregisters +define register offset=0x4200 size=2 [ # 16 bit access to vrN registers (psydo-registers) (accessed by vrD_16_N vrA_16_N vrB_16_N vrS_16_N vrC_16_N) +vr0_16_0 vr0_16_1 vr0_16_2 vr0_16_3 vr0_16_4 vr0_16_5 vr0_16_6 vr0_16_7 +vr1_16_0 vr1_16_1 vr1_16_2 vr1_16_3 vr1_16_4 vr1_16_5 vr1_16_6 vr1_16_7 +vr2_16_0 vr2_16_1 vr2_16_2 vr2_16_3 vr2_16_4 vr2_16_5 vr2_16_6 vr2_16_7 +vr3_16_0 vr3_16_1 vr3_16_2 vr3_16_3 vr3_16_4 vr3_16_5 vr3_16_6 vr3_16_7 +vr4_16_0 vr4_16_1 vr4_16_2 vr4_16_3 vr4_16_4 vr4_16_5 vr4_16_6 vr4_16_7 +vr5_16_0 vr5_16_1 vr5_16_2 vr5_16_3 vr5_16_4 vr5_16_5 vr5_16_6 vr5_16_7 +vr6_16_0 vr6_16_1 vr6_16_2 vr6_16_3 vr6_16_4 vr6_16_5 vr6_16_6 vr6_16_7 +vr7_16_0 vr7_16_1 vr7_16_2 vr7_16_3 vr7_16_4 vr7_16_5 vr7_16_6 vr7_16_7 +vr8_16_0 vr8_16_1 vr8_16_2 vr8_16_3 vr8_16_4 vr8_16_5 vr8_16_6 vr8_16_7 +vr9_16_0 vr9_16_1 vr9_16_2 vr9_16_3 vr9_16_4 vr9_16_5 vr9_16_6 vr9_16_7 +vr10_16_0 vr10_16_1 vr10_16_2 vr10_16_3 vr10_16_4 vr10_16_5 vr10_16_6 vr10_16_7 +vr11_16_0 vr11_16_1 vr11_16_2 vr11_16_3 vr11_16_4 vr11_16_5 vr11_16_6 vr11_16_7 +vr12_16_0 vr12_16_1 vr12_16_2 vr12_16_3 vr12_16_4 vr12_16_5 vr12_16_6 vr12_16_7 +vr13_16_0 vr13_16_1 vr13_16_2 vr13_16_3 vr13_16_4 vr13_16_5 vr13_16_6 vr13_16_7 +vr14_16_0 vr14_16_1 vr14_16_2 vr14_16_3 vr14_16_4 vr14_16_5 vr14_16_6 vr14_16_7 +vr15_16_0 vr15_16_1 vr15_16_2 vr15_16_3 vr15_16_4 vr15_16_5 vr15_16_6 vr15_16_7 +vr16_16_0 vr16_16_1 vr16_16_2 vr16_16_3 vr16_16_4 vr16_16_5 vr16_16_6 vr16_16_7 +vr17_16_0 vr17_16_1 vr17_16_2 vr17_16_3 vr17_16_4 vr17_16_5 vr17_16_6 vr17_16_7 +vr18_16_0 vr18_16_1 vr18_16_2 vr18_16_3 vr18_16_4 vr18_16_5 vr18_16_6 vr18_16_7 +vr19_16_0 vr19_16_1 vr19_16_2 vr19_16_3 vr19_16_4 vr19_16_5 vr19_16_6 vr19_16_7 +vr20_16_0 vr20_16_1 vr20_16_2 vr20_16_3 vr20_16_4 vr20_16_5 vr20_16_6 vr20_16_7 +vr21_16_0 vr21_16_1 vr21_16_2 vr21_16_3 vr21_16_4 vr21_16_5 vr21_16_6 vr21_16_7 +vr22_16_0 vr22_16_1 vr22_16_2 vr22_16_3 vr22_16_4 vr22_16_5 vr22_16_6 vr22_16_7 +vr23_16_0 vr23_16_1 vr23_16_2 vr23_16_3 vr23_16_4 vr23_16_5 vr23_16_6 vr23_16_7 +vr24_16_0 vr24_16_1 vr24_16_2 vr24_16_3 vr24_16_4 vr24_16_5 vr24_16_6 vr24_16_7 +vr25_16_0 vr25_16_1 vr25_16_2 vr25_16_3 vr25_16_4 vr25_16_5 vr25_16_6 vr25_16_7 +vr26_16_0 vr26_16_1 vr26_16_2 vr26_16_3 vr26_16_4 vr26_16_5 vr26_16_6 vr26_16_7 +vr27_16_0 vr27_16_1 vr27_16_2 vr27_16_3 vr27_16_4 vr27_16_5 vr27_16_6 vr27_16_7 +vr28_16_0 vr28_16_1 vr28_16_2 vr28_16_3 vr28_16_4 vr28_16_5 vr28_16_6 vr28_16_7 +vr29_16_0 vr29_16_1 vr29_16_2 vr29_16_3 vr29_16_4 vr29_16_5 vr29_16_6 vr29_16_7 +vr30_16_0 vr30_16_1 vr30_16_2 vr30_16_3 vr30_16_4 vr30_16_5 vr30_16_6 vr30_16_7 +vr31_16_0 vr31_16_1 vr31_16_2 vr31_16_3 vr31_16_4 vr31_16_5 vr31_16_6 vr31_16_7 +]; + +# Create psydo sub-registers for the Altivec vector registers to allow better easier vector instructions by 1 byte subregisters +define register offset=0x4200 size=1 [ # 8 bit access to vrN registers (psydo-registers) (accessed by vrD_8_N vrA_8_N vrB_8_N vrS_8_N vrC_8_N) +vr0_8_0 vr0_8_1 vr0_8_2 vr0_8_3 vr0_8_4 vr0_8_5 vr0_8_6 vr0_8_7 vr0_8_8 vr0_8_9 vr0_8_10 vr0_8_11 vr0_8_12 vr0_8_13 vr0_8_14 vr0_8_15 +vr1_8_0 vr1_8_1 vr1_8_2 vr1_8_3 vr1_8_4 vr1_8_5 vr1_8_6 vr1_8_7 vr1_8_8 vr1_8_9 vr1_8_10 vr1_8_11 vr1_8_12 vr1_8_13 vr1_8_14 vr1_8_15 +vr2_8_0 vr2_8_1 vr2_8_2 vr2_8_3 vr2_8_4 vr2_8_5 vr2_8_6 vr2_8_7 vr2_8_8 vr2_8_9 vr2_8_10 vr2_8_11 vr2_8_12 vr2_8_13 vr2_8_14 vr2_8_15 +vr3_8_0 vr3_8_1 vr3_8_2 vr3_8_3 vr3_8_4 vr3_8_5 vr3_8_6 vr3_8_7 vr3_8_8 vr3_8_9 vr3_8_10 vr3_8_11 vr3_8_12 vr3_8_13 vr3_8_14 vr3_8_15 +vr4_8_0 vr4_8_1 vr4_8_2 vr4_8_3 vr4_8_4 vr4_8_5 vr4_8_6 vr4_8_7 vr4_8_8 vr4_8_9 vr4_8_10 vr4_8_11 vr4_8_12 vr4_8_13 vr4_8_14 vr4_8_15 +vr5_8_0 vr5_8_1 vr5_8_2 vr5_8_3 vr5_8_4 vr5_8_5 vr5_8_6 vr5_8_7 vr5_8_8 vr5_8_9 vr5_8_10 vr5_8_11 vr5_8_12 vr5_8_13 vr5_8_14 vr5_8_15 +vr6_8_0 vr6_8_1 vr6_8_2 vr6_8_3 vr6_8_4 vr6_8_5 vr6_8_6 vr6_8_7 vr6_8_8 vr6_8_9 vr6_8_10 vr6_8_11 vr6_8_12 vr6_8_13 vr6_8_14 vr6_8_15 +vr7_8_0 vr7_8_1 vr7_8_2 vr7_8_3 vr7_8_4 vr7_8_5 vr7_8_6 vr7_8_7 vr7_8_8 vr7_8_9 vr7_8_10 vr7_8_11 vr7_8_12 vr7_8_13 vr7_8_14 vr7_8_15 +vr8_8_0 vr8_8_1 vr8_8_2 vr8_8_3 vr8_8_4 vr8_8_5 vr8_8_6 vr8_8_7 vr8_8_8 vr8_8_9 vr8_8_10 vr8_8_11 vr8_8_12 vr8_8_13 vr8_8_14 vr8_8_15 +vr9_8_0 vr9_8_1 vr9_8_2 vr9_8_3 vr9_8_4 vr9_8_5 vr9_8_6 vr9_8_7 vr9_8_8 vr9_8_9 vr9_8_10 vr9_8_11 vr9_8_12 vr9_8_13 vr9_8_14 vr9_8_15 +vr10_8_0 vr10_8_1 vr10_8_2 vr10_8_3 vr10_8_4 vr10_8_5 vr10_8_6 vr10_8_7 vr10_8_8 vr10_8_9 vr10_8_10 vr10_8_11 vr10_8_12 vr10_8_13 vr10_8_14 vr10_8_15 +vr11_8_0 vr11_8_1 vr11_8_2 vr11_8_3 vr11_8_4 vr11_8_5 vr11_8_6 vr11_8_7 vr11_8_8 vr11_8_9 vr11_8_10 vr11_8_11 vr11_8_12 vr11_8_13 vr11_8_14 vr11_8_15 +vr12_8_0 vr12_8_1 vr12_8_2 vr12_8_3 vr12_8_4 vr12_8_5 vr12_8_6 vr12_8_7 vr12_8_8 vr12_8_9 vr12_8_10 vr12_8_11 vr12_8_12 vr12_8_13 vr12_8_14 vr12_8_15 +vr13_8_0 vr13_8_1 vr13_8_2 vr13_8_3 vr13_8_4 vr13_8_5 vr13_8_6 vr13_8_7 vr13_8_8 vr13_8_9 vr13_8_10 vr13_8_11 vr13_8_12 vr13_8_13 vr13_8_14 vr13_8_15 +vr14_8_0 vr14_8_1 vr14_8_2 vr14_8_3 vr14_8_4 vr14_8_5 vr14_8_6 vr14_8_7 vr14_8_8 vr14_8_9 vr14_8_10 vr14_8_11 vr14_8_12 vr14_8_13 vr14_8_14 vr14_8_15 +vr15_8_0 vr15_8_1 vr15_8_2 vr15_8_3 vr15_8_4 vr15_8_5 vr15_8_6 vr15_8_7 vr15_8_8 vr15_8_9 vr15_8_10 vr15_8_11 vr15_8_12 vr15_8_13 vr15_8_14 vr15_8_15 +vr16_8_0 vr16_8_1 vr16_8_2 vr16_8_3 vr16_8_4 vr16_8_5 vr16_8_6 vr16_8_7 vr16_8_8 vr16_8_9 vr16_8_10 vr16_8_11 vr16_8_12 vr16_8_13 vr16_8_14 vr16_8_15 +vr17_8_0 vr17_8_1 vr17_8_2 vr17_8_3 vr17_8_4 vr17_8_5 vr17_8_6 vr17_8_7 vr17_8_8 vr17_8_9 vr17_8_10 vr17_8_11 vr17_8_12 vr17_8_13 vr17_8_14 vr17_8_15 +vr18_8_0 vr18_8_1 vr18_8_2 vr18_8_3 vr18_8_4 vr18_8_5 vr18_8_6 vr18_8_7 vr18_8_8 vr18_8_9 vr18_8_10 vr18_8_11 vr18_8_12 vr18_8_13 vr18_8_14 vr18_8_15 +vr19_8_0 vr19_8_1 vr19_8_2 vr19_8_3 vr19_8_4 vr19_8_5 vr19_8_6 vr19_8_7 vr19_8_8 vr19_8_9 vr19_8_10 vr19_8_11 vr19_8_12 vr19_8_13 vr19_8_14 vr19_8_15 +vr20_8_0 vr20_8_1 vr20_8_2 vr20_8_3 vr20_8_4 vr20_8_5 vr20_8_6 vr20_8_7 vr20_8_8 vr20_8_9 vr20_8_10 vr20_8_11 vr20_8_12 vr20_8_13 vr20_8_14 vr20_8_15 +vr21_8_0 vr21_8_1 vr21_8_2 vr21_8_3 vr21_8_4 vr21_8_5 vr21_8_6 vr21_8_7 vr21_8_8 vr21_8_9 vr21_8_10 vr21_8_11 vr21_8_12 vr21_8_13 vr21_8_14 vr21_8_15 +vr22_8_0 vr22_8_1 vr22_8_2 vr22_8_3 vr22_8_4 vr22_8_5 vr22_8_6 vr22_8_7 vr22_8_8 vr22_8_9 vr22_8_10 vr22_8_11 vr22_8_12 vr22_8_13 vr22_8_14 vr22_8_15 +vr23_8_0 vr23_8_1 vr23_8_2 vr23_8_3 vr23_8_4 vr23_8_5 vr23_8_6 vr23_8_7 vr23_8_8 vr23_8_9 vr23_8_10 vr23_8_11 vr23_8_12 vr23_8_13 vr23_8_14 vr23_8_15 +vr24_8_0 vr24_8_1 vr24_8_2 vr24_8_3 vr24_8_4 vr24_8_5 vr24_8_6 vr24_8_7 vr24_8_8 vr24_8_9 vr24_8_10 vr24_8_11 vr24_8_12 vr24_8_13 vr24_8_14 vr24_8_15 +vr25_8_0 vr25_8_1 vr25_8_2 vr25_8_3 vr25_8_4 vr25_8_5 vr25_8_6 vr25_8_7 vr25_8_8 vr25_8_9 vr25_8_10 vr25_8_11 vr25_8_12 vr25_8_13 vr25_8_14 vr25_8_15 +vr26_8_0 vr26_8_1 vr26_8_2 vr26_8_3 vr26_8_4 vr26_8_5 vr26_8_6 vr26_8_7 vr26_8_8 vr26_8_9 vr26_8_10 vr26_8_11 vr26_8_12 vr26_8_13 vr26_8_14 vr26_8_15 +vr27_8_0 vr27_8_1 vr27_8_2 vr27_8_3 vr27_8_4 vr27_8_5 vr27_8_6 vr27_8_7 vr27_8_8 vr27_8_9 vr27_8_10 vr27_8_11 vr27_8_12 vr27_8_13 vr27_8_14 vr27_8_15 +vr28_8_0 vr28_8_1 vr28_8_2 vr28_8_3 vr28_8_4 vr28_8_5 vr28_8_6 vr28_8_7 vr28_8_8 vr28_8_9 vr28_8_10 vr28_8_11 vr28_8_12 vr28_8_13 vr28_8_14 vr28_8_15 +vr29_8_0 vr29_8_1 vr29_8_2 vr29_8_3 vr29_8_4 vr29_8_5 vr29_8_6 vr29_8_7 vr29_8_8 vr29_8_9 vr29_8_10 vr29_8_11 vr29_8_12 vr29_8_13 vr29_8_14 vr29_8_15 +vr30_8_0 vr30_8_1 vr30_8_2 vr30_8_3 vr30_8_4 vr30_8_5 vr30_8_6 vr30_8_7 vr30_8_8 vr30_8_9 vr30_8_10 vr30_8_11 vr30_8_12 vr30_8_13 vr30_8_14 vr30_8_15 +vr31_8_0 vr31_8_1 vr31_8_2 vr31_8_3 vr31_8_4 vr31_8_5 vr31_8_6 vr31_8_7 vr31_8_8 vr31_8_9 vr31_8_10 vr31_8_11 vr31_8_12 vr31_8_13 vr31_8_14 vr31_8_15 +]; +@else +define register offset=0x4200 size=8 [ # 64 bit access to vrN registers (psydo-registers) (accessed by vrD_64_N vrA_64_N vrB_64_N vrS_64_N vrC_64_N) +vr0_64_1 vr0_64_0 +vr1_64_1 vr1_64_0 +vr2_64_1 vr2_64_0 +vr3_64_1 vr3_64_0 +vr4_64_1 vr4_64_0 +vr5_64_1 vr5_64_0 +vr6_64_1 vr6_64_0 +vr7_64_1 vr7_64_0 +vr8_64_1 vr8_64_0 +vr9_64_1 vr9_64_0 +vr10_64_1 vr10_64_0 +vr11_64_1 vr11_64_0 +vr12_64_1 vr12_64_0 +vr13_64_1 vr13_64_0 +vr14_64_1 vr14_64_0 +vr15_64_1 vr15_64_0 +vr16_64_1 vr16_64_0 +vr17_64_1 vr17_64_0 +vr18_64_1 vr18_64_0 +vr19_64_1 vr19_64_0 +vr20_64_1 vr20_64_0 +vr21_64_1 vr21_64_0 +vr22_64_1 vr22_64_0 +vr23_64_1 vr23_64_0 +vr24_64_1 vr24_64_0 +vr25_64_1 vr25_64_0 +vr26_64_1 vr26_64_0 +vr27_64_1 vr27_64_0 +vr28_64_1 vr28_64_0 +vr29_64_1 vr29_64_0 +vr30_64_1 vr30_64_0 +vr31_64_1 vr31_64_0 +]; + +define register offset=0x4200 size=4 [ # 32 bit access to vrN registers (psydo-registers) (accessed by vrD_32_N vrA_32_N vrB_32_N vrS_32_N vrC_32_N) +vr0_32_3 vr0_32_2 vr0_32_1 vr0_32_0 +vr1_32_3 vr1_32_2 vr1_32_1 vr1_32_0 +vr2_32_3 vr2_32_2 vr2_32_1 vr2_32_0 +vr3_32_3 vr3_32_2 vr3_32_1 vr3_32_0 +vr4_32_3 vr4_32_2 vr4_32_1 vr4_32_0 +vr5_32_3 vr5_32_2 vr5_32_1 vr5_32_0 +vr6_32_3 vr6_32_2 vr6_32_1 vr6_32_0 +vr7_32_3 vr7_32_2 vr7_32_1 vr7_32_0 +vr8_32_3 vr8_32_2 vr8_32_1 vr8_32_0 +vr9_32_3 vr9_32_2 vr9_32_1 vr9_32_0 +vr10_32_3 vr10_32_2 vr10_32_1 vr10_32_0 +vr11_32_3 vr11_32_2 vr11_32_1 vr11_32_0 +vr12_32_3 vr12_32_2 vr12_32_1 vr12_32_0 +vr13_32_3 vr13_32_2 vr13_32_1 vr13_32_0 +vr14_32_3 vr14_32_2 vr14_32_1 vr14_32_0 +vr15_32_3 vr15_32_2 vr15_32_1 vr15_32_0 +vr16_32_3 vr16_32_2 vr16_32_1 vr16_32_0 +vr17_32_3 vr17_32_2 vr17_32_1 vr17_32_0 +vr18_32_3 vr18_32_2 vr18_32_1 vr18_32_0 +vr19_32_3 vr19_32_2 vr19_32_1 vr19_32_0 +vr20_32_3 vr20_32_2 vr20_32_1 vr20_32_0 +vr21_32_3 vr21_32_2 vr21_32_1 vr21_32_0 +vr22_32_3 vr22_32_2 vr22_32_1 vr22_32_0 +vr23_32_3 vr23_32_2 vr23_32_1 vr23_32_0 +vr24_32_3 vr24_32_2 vr24_32_1 vr24_32_0 +vr25_32_3 vr25_32_2 vr25_32_1 vr25_32_0 +vr26_32_3 vr26_32_2 vr26_32_1 vr26_32_0 +vr27_32_3 vr27_32_2 vr27_32_1 vr27_32_0 +vr28_32_3 vr28_32_2 vr28_32_1 vr28_32_0 +vr29_32_3 vr29_32_2 vr29_32_1 vr29_32_0 +vr30_32_3 vr30_32_2 vr30_32_1 vr30_32_0 +vr31_32_3 vr31_32_2 vr31_32_1 vr31_32_0 +]; + +# Create psydo sub-registers for the Altivec vector registers to allow better easier vector instructions by 2 byte subregisters +define register offset=0x4200 size=2 [ # 16 bit access to vrN registers (psydo-registers) (accessed by vrD_16_N vrA_16_N vrB_16_N vrS_16_N vrC_16_N) +vr0_16_7 vr0_16_6 vr0_16_5 vr0_16_4 vr0_16_3 vr0_16_2 vr0_16_1 vr0_16_0 +vr1_16_7 vr1_16_6 vr1_16_5 vr1_16_4 vr1_16_3 vr1_16_2 vr1_16_1 vr1_16_0 +vr2_16_7 vr2_16_6 vr2_16_5 vr2_16_4 vr2_16_3 vr2_16_2 vr2_16_1 vr2_16_0 +vr3_16_7 vr3_16_6 vr3_16_5 vr3_16_4 vr3_16_3 vr3_16_2 vr3_16_1 vr3_16_0 +vr4_16_7 vr4_16_6 vr4_16_5 vr4_16_4 vr4_16_3 vr4_16_2 vr4_16_1 vr4_16_0 +vr5_16_7 vr5_16_6 vr5_16_5 vr5_16_4 vr5_16_3 vr5_16_2 vr5_16_1 vr5_16_0 +vr6_16_7 vr6_16_6 vr6_16_5 vr6_16_4 vr6_16_3 vr6_16_2 vr6_16_1 vr6_16_0 +vr7_16_7 vr7_16_6 vr7_16_5 vr7_16_4 vr7_16_3 vr7_16_2 vr7_16_1 vr7_16_0 +vr8_16_7 vr8_16_6 vr8_16_5 vr8_16_4 vr8_16_3 vr8_16_2 vr8_16_1 vr8_16_0 +vr9_16_7 vr9_16_6 vr9_16_5 vr9_16_4 vr9_16_3 vr9_16_2 vr9_16_1 vr9_16_0 +vr10_16_7 vr10_16_6 vr10_16_5 vr10_16_4 vr10_16_3 vr10_16_2 vr10_16_1 vr10_16_0 +vr11_16_7 vr11_16_6 vr11_16_5 vr11_16_4 vr11_16_3 vr11_16_2 vr11_16_1 vr11_16_0 +vr12_16_7 vr12_16_6 vr12_16_5 vr12_16_4 vr12_16_3 vr12_16_2 vr12_16_1 vr12_16_0 +vr13_16_7 vr13_16_6 vr13_16_5 vr13_16_4 vr13_16_3 vr13_16_2 vr13_16_1 vr13_16_0 +vr14_16_7 vr14_16_6 vr14_16_5 vr14_16_4 vr14_16_3 vr14_16_2 vr14_16_1 vr14_16_0 +vr15_16_7 vr15_16_6 vr15_16_5 vr15_16_4 vr15_16_3 vr15_16_2 vr15_16_1 vr15_16_0 +vr16_16_7 vr16_16_6 vr16_16_5 vr16_16_4 vr16_16_3 vr16_16_2 vr16_16_1 vr16_16_0 +vr17_16_7 vr17_16_6 vr17_16_5 vr17_16_4 vr17_16_3 vr17_16_2 vr17_16_1 vr17_16_0 +vr18_16_7 vr18_16_6 vr18_16_5 vr18_16_4 vr18_16_3 vr18_16_2 vr18_16_1 vr18_16_0 +vr19_16_7 vr19_16_6 vr19_16_5 vr19_16_4 vr19_16_3 vr19_16_2 vr19_16_1 vr19_16_0 +vr20_16_7 vr20_16_6 vr20_16_5 vr20_16_4 vr20_16_3 vr20_16_2 vr20_16_1 vr20_16_0 +vr21_16_7 vr21_16_6 vr21_16_5 vr21_16_4 vr21_16_3 vr21_16_2 vr21_16_1 vr21_16_0 +vr22_16_7 vr22_16_6 vr22_16_5 vr22_16_4 vr22_16_3 vr22_16_2 vr22_16_1 vr22_16_0 +vr23_16_7 vr23_16_6 vr23_16_5 vr23_16_4 vr23_16_3 vr23_16_2 vr23_16_1 vr23_16_0 +vr24_16_7 vr24_16_6 vr24_16_5 vr24_16_4 vr24_16_3 vr24_16_2 vr24_16_1 vr24_16_0 +vr25_16_7 vr25_16_6 vr25_16_5 vr25_16_4 vr25_16_3 vr25_16_2 vr25_16_1 vr25_16_0 +vr26_16_7 vr26_16_6 vr26_16_5 vr26_16_4 vr26_16_3 vr26_16_2 vr26_16_1 vr26_16_0 +vr27_16_7 vr27_16_6 vr27_16_5 vr27_16_4 vr27_16_3 vr27_16_2 vr27_16_1 vr27_16_0 +vr28_16_7 vr28_16_6 vr28_16_5 vr28_16_4 vr28_16_3 vr28_16_2 vr28_16_1 vr28_16_0 +vr29_16_7 vr29_16_6 vr29_16_5 vr29_16_4 vr29_16_3 vr29_16_2 vr29_16_1 vr29_16_0 +vr30_16_7 vr30_16_6 vr30_16_5 vr30_16_4 vr30_16_3 vr30_16_2 vr30_16_1 vr30_16_0 +vr31_16_7 vr31_16_6 vr31_16_5 vr31_16_4 vr31_16_3 vr31_16_2 vr31_16_1 vr31_16_0 +]; + +# Create psydo sub-registers for the Altivec vector registers to allow better easier vector instructions by 1 byte subregisters +define register offset=0x4200 size=1 [ # 8 bit access to vrN registers (psydo-registers) (accessed by vrD_8_N vrA_8_N vrB_8_N vrS_8_N vrC_8_N) +vr0_8_15 vr0_8_14 vr0_8_13 vr0_8_12 vr0_8_11 vr0_8_10 vr0_8_9 vr0_8_8 vr0_8_7 vr0_8_6 vr0_8_5 vr0_8_4 vr0_8_3 vr0_8_2 vr0_8_1 vr0_8_0 +vr1_8_15 vr1_8_14 vr1_8_13 vr1_8_12 vr1_8_11 vr1_8_10 vr1_8_9 vr1_8_8 vr1_8_7 vr1_8_6 vr1_8_5 vr1_8_4 vr1_8_3 vr1_8_2 vr1_8_1 vr1_8_0 +vr2_8_15 vr2_8_14 vr2_8_13 vr2_8_12 vr2_8_11 vr2_8_10 vr2_8_9 vr2_8_8 vr2_8_7 vr2_8_6 vr2_8_5 vr2_8_4 vr2_8_3 vr2_8_2 vr2_8_1 vr2_8_0 +vr3_8_15 vr3_8_14 vr3_8_13 vr3_8_12 vr3_8_11 vr3_8_10 vr3_8_9 vr3_8_8 vr3_8_7 vr3_8_6 vr3_8_5 vr3_8_4 vr3_8_3 vr3_8_2 vr3_8_1 vr3_8_0 +vr4_8_15 vr4_8_14 vr4_8_13 vr4_8_12 vr4_8_11 vr4_8_10 vr4_8_9 vr4_8_8 vr4_8_7 vr4_8_6 vr4_8_5 vr4_8_4 vr4_8_3 vr4_8_2 vr4_8_1 vr4_8_0 +vr5_8_15 vr5_8_14 vr5_8_13 vr5_8_12 vr5_8_11 vr5_8_10 vr5_8_9 vr5_8_8 vr5_8_7 vr5_8_6 vr5_8_5 vr5_8_4 vr5_8_3 vr5_8_2 vr5_8_1 vr5_8_0 +vr6_8_15 vr6_8_14 vr6_8_13 vr6_8_12 vr6_8_11 vr6_8_10 vr6_8_9 vr6_8_8 vr6_8_7 vr6_8_6 vr6_8_5 vr6_8_4 vr6_8_3 vr6_8_2 vr6_8_1 vr6_8_0 +vr7_8_15 vr7_8_14 vr7_8_13 vr7_8_12 vr7_8_11 vr7_8_10 vr7_8_9 vr7_8_8 vr7_8_7 vr7_8_6 vr7_8_5 vr7_8_4 vr7_8_3 vr7_8_2 vr7_8_1 vr7_8_0 +vr8_8_15 vr8_8_14 vr8_8_13 vr8_8_12 vr8_8_11 vr8_8_10 vr8_8_9 vr8_8_8 vr8_8_7 vr8_8_6 vr8_8_5 vr8_8_4 vr8_8_3 vr8_8_2 vr8_8_1 vr8_8_0 +vr9_8_15 vr9_8_14 vr9_8_13 vr9_8_12 vr9_8_11 vr9_8_10 vr9_8_9 vr9_8_8 vr9_8_7 vr9_8_6 vr9_8_5 vr9_8_4 vr9_8_3 vr9_8_2 vr9_8_1 vr9_8_0 +vr10_8_15 vr10_8_14 vr10_8_13 vr10_8_12 vr10_8_11 vr10_8_10 vr10_8_9 vr10_8_8 vr10_8_7 vr10_8_6 vr10_8_5 vr10_8_4 vr10_8_3 vr10_8_2 vr10_8_1 vr10_8_0 +vr11_8_15 vr11_8_14 vr11_8_13 vr11_8_12 vr11_8_11 vr11_8_10 vr11_8_9 vr11_8_8 vr11_8_7 vr11_8_6 vr11_8_5 vr11_8_4 vr11_8_3 vr11_8_2 vr11_8_1 vr11_8_0 +vr12_8_15 vr12_8_14 vr12_8_13 vr12_8_12 vr12_8_11 vr12_8_10 vr12_8_9 vr12_8_8 vr12_8_7 vr12_8_6 vr12_8_5 vr12_8_4 vr12_8_3 vr12_8_2 vr12_8_1 vr12_8_0 +vr13_8_15 vr13_8_14 vr13_8_13 vr13_8_12 vr13_8_11 vr13_8_10 vr13_8_9 vr13_8_8 vr13_8_7 vr13_8_6 vr13_8_5 vr13_8_4 vr13_8_3 vr13_8_2 vr13_8_1 vr13_8_0 +vr14_8_15 vr14_8_14 vr14_8_13 vr14_8_12 vr14_8_11 vr14_8_10 vr14_8_9 vr14_8_8 vr14_8_7 vr14_8_6 vr14_8_5 vr14_8_4 vr14_8_3 vr14_8_2 vr14_8_1 vr14_8_0 +vr15_8_15 vr15_8_14 vr15_8_13 vr15_8_12 vr15_8_11 vr15_8_10 vr15_8_9 vr15_8_8 vr15_8_7 vr15_8_6 vr15_8_5 vr15_8_4 vr15_8_3 vr15_8_2 vr15_8_1 vr15_8_0 +vr16_8_15 vr16_8_14 vr16_8_13 vr16_8_12 vr16_8_11 vr16_8_10 vr16_8_9 vr16_8_8 vr16_8_7 vr16_8_6 vr16_8_5 vr16_8_4 vr16_8_3 vr16_8_2 vr16_8_1 vr16_8_0 +vr17_8_15 vr17_8_14 vr17_8_13 vr17_8_12 vr17_8_11 vr17_8_10 vr17_8_9 vr17_8_8 vr17_8_7 vr17_8_6 vr17_8_5 vr17_8_4 vr17_8_3 vr17_8_2 vr17_8_1 vr17_8_0 +vr18_8_15 vr18_8_14 vr18_8_13 vr18_8_12 vr18_8_11 vr18_8_10 vr18_8_9 vr18_8_8 vr18_8_7 vr18_8_6 vr18_8_5 vr18_8_4 vr18_8_3 vr18_8_2 vr18_8_1 vr18_8_0 +vr19_8_15 vr19_8_14 vr19_8_13 vr19_8_12 vr19_8_11 vr19_8_10 vr19_8_9 vr19_8_8 vr19_8_7 vr19_8_6 vr19_8_5 vr19_8_4 vr19_8_3 vr19_8_2 vr19_8_1 vr19_8_0 +vr20_8_15 vr20_8_14 vr20_8_13 vr20_8_12 vr20_8_11 vr20_8_10 vr20_8_9 vr20_8_8 vr20_8_7 vr20_8_6 vr20_8_5 vr20_8_4 vr20_8_3 vr20_8_2 vr20_8_1 vr20_8_0 +vr21_8_15 vr21_8_14 vr21_8_13 vr21_8_12 vr21_8_11 vr21_8_10 vr21_8_9 vr21_8_8 vr21_8_7 vr21_8_6 vr21_8_5 vr21_8_4 vr21_8_3 vr21_8_2 vr21_8_1 vr21_8_0 +vr22_8_15 vr22_8_14 vr22_8_13 vr22_8_12 vr22_8_11 vr22_8_10 vr22_8_9 vr22_8_8 vr22_8_7 vr22_8_6 vr22_8_5 vr22_8_4 vr22_8_3 vr22_8_2 vr22_8_1 vr22_8_0 +vr23_8_15 vr23_8_14 vr23_8_13 vr23_8_12 vr23_8_11 vr23_8_10 vr23_8_9 vr23_8_8 vr23_8_7 vr23_8_6 vr23_8_5 vr23_8_4 vr23_8_3 vr23_8_2 vr23_8_1 vr23_8_0 +vr24_8_15 vr24_8_14 vr24_8_13 vr24_8_12 vr24_8_11 vr24_8_10 vr24_8_9 vr24_8_8 vr24_8_7 vr24_8_6 vr24_8_5 vr24_8_4 vr24_8_3 vr24_8_2 vr24_8_1 vr24_8_0 +vr25_8_15 vr25_8_14 vr25_8_13 vr25_8_12 vr25_8_11 vr25_8_10 vr25_8_9 vr25_8_8 vr25_8_7 vr25_8_6 vr25_8_5 vr25_8_4 vr25_8_3 vr25_8_2 vr25_8_1 vr25_8_0 +vr26_8_15 vr26_8_14 vr26_8_13 vr26_8_12 vr26_8_11 vr26_8_10 vr26_8_9 vr26_8_8 vr26_8_7 vr26_8_6 vr26_8_5 vr26_8_4 vr26_8_3 vr26_8_2 vr26_8_1 vr26_8_0 +vr27_8_15 vr27_8_14 vr27_8_13 vr27_8_12 vr27_8_11 vr27_8_10 vr27_8_9 vr27_8_8 vr27_8_7 vr27_8_6 vr27_8_5 vr27_8_4 vr27_8_3 vr27_8_2 vr27_8_1 vr27_8_0 +vr28_8_15 vr28_8_14 vr28_8_13 vr28_8_12 vr28_8_11 vr28_8_10 vr28_8_9 vr28_8_8 vr28_8_7 vr28_8_6 vr28_8_5 vr28_8_4 vr28_8_3 vr28_8_2 vr28_8_1 vr28_8_0 +vr29_8_15 vr29_8_14 vr29_8_13 vr29_8_12 vr29_8_11 vr29_8_10 vr29_8_9 vr29_8_8 vr29_8_7 vr29_8_6 vr29_8_5 vr29_8_4 vr29_8_3 vr29_8_2 vr29_8_1 vr29_8_0 +vr30_8_15 vr30_8_14 vr30_8_13 vr30_8_12 vr30_8_11 vr30_8_10 vr30_8_9 vr30_8_8 vr30_8_7 vr30_8_6 vr30_8_5 vr30_8_4 vr30_8_3 vr30_8_2 vr30_8_1 vr30_8_0 +vr31_8_15 vr31_8_14 vr31_8_13 vr31_8_12 vr31_8_11 vr31_8_10 vr31_8_9 vr31_8_8 vr31_8_7 vr31_8_6 vr31_8_5 vr31_8_4 vr31_8_3 vr31_8_2 vr31_8_1 vr31_8_0 +]; + +@endif +# Define context bits +define register offset=0x6000 size=4 contextreg; +define context contextreg + linkreg=(0,1) # 0 - no LR set, 1 - LR set (used to flag branch instructions to be treated as calls) + vle=(2,2) # Used to control inclusion/disassembly of vle instructions. '1' means use vle see NOTVLE/ISVLE @define below + # FIXME! while allowing vle context to flow is incorrect, the PowerPC disassembly action will not work at all without it + # and could easily flow the incorrect context when traversing between VLE and non-VLE sections. + + # transient context + lsmul=(3,7) noflow # Used for Load/store multiple parsing + regp=(8,12) noflow # Used in powerISA quad word instructions + regpset=(8,12) noflow # Used in powerISA quad word instructions +; + +@define NOTVLE "vle=0" +@define ISVLE "vle=1" + +# Define Device Control Registers (specific to IBM PowerPC Embedded Controller, see instructions mfdcr/mtdcr) +# Device Control Registers are defined with generic names +# These names may be replaced within register_data section within a PPC variant's pspec file +define register offset=0x7000 size=$(REGISTER_SIZE) + [ dcr000 dcr001 dcr002 dcr003 dcr004 dcr005 dcr006 dcr007 dcr008 dcr009 dcr00a dcr00b dcr00c dcr00d dcr00e dcr00f + dcr010 dcr011 dcr012 dcr013 dcr014 dcr015 dcr016 dcr017 dcr018 dcr019 dcr01a dcr01b dcr01c dcr01d dcr01e dcr01f + dcr020 dcr021 dcr022 dcr023 dcr024 dcr025 dcr026 dcr027 dcr028 dcr029 dcr02a dcr02b dcr02c dcr02d dcr02e dcr02f + dcr030 dcr031 dcr032 dcr033 dcr034 dcr035 dcr036 dcr037 dcr038 dcr039 dcr03a dcr03b dcr03c dcr03d dcr03e dcr03f + dcr040 dcr041 dcr042 dcr043 dcr044 dcr045 dcr046 dcr047 dcr048 dcr049 dcr04a dcr04b dcr04c dcr04d dcr04e dcr04f + dcr050 dcr051 dcr052 dcr053 dcr054 dcr055 dcr056 dcr057 dcr058 dcr059 dcr05a dcr05b dcr05c dcr05d dcr05e dcr05f + dcr060 dcr061 dcr062 dcr063 dcr064 dcr065 dcr066 dcr067 dcr068 dcr069 dcr06a dcr06b dcr06c dcr06d dcr06e dcr06f + dcr070 dcr071 dcr072 dcr073 dcr074 dcr075 dcr076 dcr077 dcr078 dcr079 dcr07a dcr07b dcr07c dcr07d dcr07e dcr07f + dcr080 dcr081 dcr082 dcr083 dcr084 dcr085 dcr086 dcr087 dcr088 dcr089 dcr08a dcr08b dcr08c dcr08d dcr08e dcr08f + dcr090 dcr091 dcr092 dcr093 dcr094 dcr095 dcr096 dcr097 dcr098 dcr099 dcr09a dcr09b dcr09c dcr09d dcr09e dcr09f + dcr0a0 dcr0a1 dcr0a2 dcr0a3 dcr0a4 dcr0a5 dcr0a6 dcr0a7 dcr0a8 dcr0a9 dcr0aa dcr0ab dcr0ac dcr0ad dcr0ae dcr0af + dcr0b0 dcr0b1 dcr0b2 dcr0b3 dcr0b4 dcr0b5 dcr0b6 dcr0b7 dcr0b8 dcr0b9 dcr0ba dcr0bb dcr0bc dcr0bd dcr0be dcr0bf + dcr0c0 dcr0c1 dcr0c2 dcr0c3 dcr0c4 dcr0c5 dcr0c6 dcr0c7 dcr0c8 dcr0c9 dcr0ca dcr0cb dcr0cc dcr0cd dcr0ce dcr0cf + dcr0d0 dcr0d1 dcr0d2 dcr0d3 dcr0d4 dcr0d5 dcr0d6 dcr0d7 dcr0d8 dcr0d9 dcr0da dcr0db dcr0dc dcr0dd dcr0de dcr0df + dcr0e0 dcr0e1 dcr0e2 dcr0e3 dcr0e4 dcr0e5 dcr0e6 dcr0e7 dcr0e8 dcr0e9 dcr0ea dcr0eb dcr0ec dcr0ed dcr0ee dcr0ef + dcr0f0 dcr0f1 dcr0f2 dcr0f3 dcr0f4 dcr0f5 dcr0f6 dcr0f7 dcr0f8 dcr0f9 dcr0fa dcr0fb dcr0fc dcr0fd dcr0fe dcr0ff + dcr100 dcr101 dcr102 dcr103 dcr104 dcr105 dcr106 dcr107 dcr108 dcr109 dcr10a dcr10b dcr10c dcr10d dcr10e dcr10f + dcr110 dcr111 dcr112 dcr113 dcr114 dcr115 dcr116 dcr117 dcr118 dcr119 dcr11a dcr11b dcr11c dcr11d dcr11e dcr11f + dcr120 dcr121 dcr122 dcr123 dcr124 dcr125 dcr126 dcr127 dcr128 dcr129 dcr12a dcr12b dcr12c dcr12d dcr12e dcr12f + dcr130 dcr131 dcr132 dcr133 dcr134 dcr135 dcr136 dcr137 dcr138 dcr139 dcr13a dcr13b dcr13c dcr13d dcr13e dcr13f + dcr140 dcr141 dcr142 dcr143 dcr144 dcr145 dcr146 dcr147 dcr148 dcr149 dcr14a dcr14b dcr14c dcr14d dcr14e dcr14f + dcr150 dcr151 dcr152 dcr153 dcr154 dcr155 dcr156 dcr157 dcr158 dcr159 dcr15a dcr15b dcr15c dcr15d dcr15e dcr15f + dcr160 dcr161 dcr162 dcr163 dcr164 dcr165 dcr166 dcr167 dcr168 dcr169 dcr16a dcr16b dcr16c dcr16d dcr16e dcr16f + dcr170 dcr171 dcr172 dcr173 dcr174 dcr175 dcr176 dcr177 dcr178 dcr179 dcr17a dcr17b dcr17c dcr17d dcr17e dcr17f + dcr180 dcr181 dcr182 dcr183 dcr184 dcr185 dcr186 dcr187 dcr188 dcr189 dcr18a dcr18b dcr18c dcr18d dcr18e dcr18f + dcr190 dcr191 dcr192 dcr193 dcr194 dcr195 dcr196 dcr197 dcr198 dcr199 dcr19a dcr19b dcr19c dcr19d dcr19e dcr19f + dcr1a0 dcr1a1 dcr1a2 dcr1a3 dcr1a4 dcr1a5 dcr1a6 dcr1a7 dcr1a8 dcr1a9 dcr1aa dcr1ab dcr1ac dcr1ad dcr1ae dcr1af + dcr1b0 dcr1b1 dcr1b2 dcr1b3 dcr1b4 dcr1b5 dcr1b6 dcr1b7 dcr1b8 dcr1b9 dcr1ba dcr1bb dcr1bc dcr1bd dcr1be dcr1bf + dcr1c0 dcr1c1 dcr1c2 dcr1c3 dcr1c4 dcr1c5 dcr1c6 dcr1c7 dcr1c8 dcr1c9 dcr1ca dcr1cb dcr1cc dcr1cd dcr1ce dcr1cf + dcr1d0 dcr1d1 dcr1d2 dcr1d3 dcr1d4 dcr1d5 dcr1d6 dcr1d7 dcr1d8 dcr1d9 dcr1da dcr1db dcr1dc dcr1dd dcr1de dcr1df + dcr1e0 dcr1e1 dcr1e2 dcr1e3 dcr1e4 dcr1e5 dcr1e6 dcr1e7 dcr1e8 dcr1e9 dcr1ea dcr1eb dcr1ec dcr1ed dcr1ee dcr1ef + dcr1f0 dcr1f1 dcr1f2 dcr1f3 dcr1f4 dcr1f5 dcr1f6 dcr1f7 dcr1f8 dcr1f9 dcr1fa dcr1fb dcr1fc dcr1fd dcr1fe dcr1ff + dcr200 dcr201 dcr202 dcr203 dcr204 dcr205 dcr206 dcr207 dcr208 dcr209 dcr20a dcr20b dcr20c dcr20d dcr20e dcr20f + dcr210 dcr211 dcr212 dcr213 dcr214 dcr215 dcr216 dcr217 dcr218 dcr219 dcr21a dcr21b dcr21c dcr21d dcr21e dcr21f + dcr220 dcr221 dcr222 dcr223 dcr224 dcr225 dcr226 dcr227 dcr228 dcr229 dcr22a dcr22b dcr22c dcr22d dcr22e dcr22f + dcr230 dcr231 dcr232 dcr233 dcr234 dcr235 dcr236 dcr237 dcr238 dcr239 dcr23a dcr23b dcr23c dcr23d dcr23e dcr23f + dcr240 dcr241 dcr242 dcr243 dcr244 dcr245 dcr246 dcr247 dcr248 dcr249 dcr24a dcr24b dcr24c dcr24d dcr24e dcr24f + dcr250 dcr251 dcr252 dcr253 dcr254 dcr255 dcr256 dcr257 dcr258 dcr259 dcr25a dcr25b dcr25c dcr25d dcr25e dcr25f + dcr260 dcr261 dcr262 dcr263 dcr264 dcr265 dcr266 dcr267 dcr268 dcr269 dcr26a dcr26b dcr26c dcr26d dcr26e dcr26f + dcr270 dcr271 dcr272 dcr273 dcr274 dcr275 dcr276 dcr277 dcr278 dcr279 dcr27a dcr27b dcr27c dcr27d dcr27e dcr27f + dcr280 dcr281 dcr282 dcr283 dcr284 dcr285 dcr286 dcr287 dcr288 dcr289 dcr28a dcr28b dcr28c dcr28d dcr28e dcr28f + dcr290 dcr291 dcr292 dcr293 dcr294 dcr295 dcr296 dcr297 dcr298 dcr299 dcr29a dcr29b dcr29c dcr29d dcr29e dcr29f + dcr2a0 dcr2a1 dcr2a2 dcr2a3 dcr2a4 dcr2a5 dcr2a6 dcr2a7 dcr2a8 dcr2a9 dcr2aa dcr2ab dcr2ac dcr2ad dcr2ae dcr2af + dcr2b0 dcr2b1 dcr2b2 dcr2b3 dcr2b4 dcr2b5 dcr2b6 dcr2b7 dcr2b8 dcr2b9 dcr2ba dcr2bb dcr2bc dcr2bd dcr2be dcr2bf + dcr2c0 dcr2c1 dcr2c2 dcr2c3 dcr2c4 dcr2c5 dcr2c6 dcr2c7 dcr2c8 dcr2c9 dcr2ca dcr2cb dcr2cc dcr2cd dcr2ce dcr2cf + dcr2d0 dcr2d1 dcr2d2 dcr2d3 dcr2d4 dcr2d5 dcr2d6 dcr2d7 dcr2d8 dcr2d9 dcr2da dcr2db dcr2dc dcr2dd dcr2de dcr2df + dcr2e0 dcr2e1 dcr2e2 dcr2e3 dcr2e4 dcr2e5 dcr2e6 dcr2e7 dcr2e8 dcr2e9 dcr2ea dcr2eb dcr2ec dcr2ed dcr2ee dcr2ef + dcr2f0 dcr2f1 dcr2f2 dcr2f3 dcr2f4 dcr2f5 dcr2f6 dcr2f7 dcr2f8 dcr2f9 dcr2fa dcr2fb dcr2fc dcr2fd dcr2fe dcr2ff + dcr300 dcr301 dcr302 dcr303 dcr304 dcr305 dcr306 dcr307 dcr308 dcr309 dcr30a dcr30b dcr30c dcr30d dcr30e dcr30f + dcr310 dcr311 dcr312 dcr313 dcr314 dcr315 dcr316 dcr317 dcr318 dcr319 dcr31a dcr31b dcr31c dcr31d dcr31e dcr31f + dcr320 dcr321 dcr322 dcr323 dcr324 dcr325 dcr326 dcr327 dcr328 dcr329 dcr32a dcr32b dcr32c dcr32d dcr32e dcr32f + dcr330 dcr331 dcr332 dcr333 dcr334 dcr335 dcr336 dcr337 dcr338 dcr339 dcr33a dcr33b dcr33c dcr33d dcr33e dcr33f + dcr340 dcr341 dcr342 dcr343 dcr344 dcr345 dcr346 dcr347 dcr348 dcr349 dcr34a dcr34b dcr34c dcr34d dcr34e dcr34f + dcr350 dcr351 dcr352 dcr353 dcr354 dcr355 dcr356 dcr357 dcr358 dcr359 dcr35a dcr35b dcr35c dcr35d dcr35e dcr35f + dcr360 dcr361 dcr362 dcr363 dcr364 dcr365 dcr366 dcr367 dcr368 dcr369 dcr36a dcr36b dcr36c dcr36d dcr36e dcr36f + dcr370 dcr371 dcr372 dcr373 dcr374 dcr375 dcr376 dcr377 dcr378 dcr379 dcr37a dcr37b dcr37c dcr37d dcr37e dcr37f + dcr380 dcr381 dcr382 dcr383 dcr384 dcr385 dcr386 dcr387 dcr388 dcr389 dcr38a dcr38b dcr38c dcr38d dcr38e dcr38f + dcr390 dcr391 dcr392 dcr393 dcr394 dcr395 dcr396 dcr397 dcr398 dcr399 dcr39a dcr39b dcr39c dcr39d dcr39e dcr39f + dcr3a0 dcr3a1 dcr3a2 dcr3a3 dcr3a4 dcr3a5 dcr3a6 dcr3a7 dcr3a8 dcr3a9 dcr3aa dcr3ab dcr3ac dcr3ad dcr3ae dcr3af + dcr3b0 dcr3b1 dcr3b2 dcr3b3 dcr3b4 dcr3b5 dcr3b6 dcr3b7 dcr3b8 dcr3b9 dcr3ba dcr3bb dcr3bc dcr3bd dcr3be dcr3bf + dcr3c0 dcr3c1 dcr3c2 dcr3c3 dcr3c4 dcr3c5 dcr3c6 dcr3c7 dcr3c8 dcr3c9 dcr3ca dcr3cb dcr3cc dcr3cd dcr3ce dcr3cf + dcr3d0 dcr3d1 dcr3d2 dcr3d3 dcr3d4 dcr3d5 dcr3d6 dcr3d7 dcr3d8 dcr3d9 dcr3da dcr3db dcr3dc dcr3dd dcr3de dcr3df + dcr3e0 dcr3e1 dcr3e2 dcr3e3 dcr3e4 dcr3e5 dcr3e6 dcr3e7 dcr3e8 dcr3e9 dcr3ea dcr3eb dcr3ec dcr3ed dcr3ee dcr3ef + dcr3f0 dcr3f1 dcr3f2 dcr3f3 dcr3f4 dcr3f5 dcr3f6 dcr3f7 dcr3f8 dcr3f9 dcr3fa dcr3fb dcr3fc dcr3fd dcr3fe dcr3ff + ]; + +# ACC and SPEFSCR are part of the "EREF: A Reference for Motorola Book E and e500 Core" spec +# SPEFSCR is a reperposed spr200 +define register offset=0x10000 size=8 [ACC]; + +# OP=17 & BITS_21_25=0 & BITS_16_20=0(ok) & BITS_5_11=LEV & BITS_2_4=0 & BIT_1=1 & BIT_0=0 + +define token instr(32) + A=(16,20) + AA=(1,1) + A_BITS=(16,20) + A_BITSS=(16,20) signed + AX=(2,2) + B=(11,15) + B_BITS=(11,15) + BD=(2,15) signed + BF=(17,24) + BFA=(0,2) + BFA2=(18,20) + BF2=(23,25) + BH=(11,12) + BH_BITS=(11,12) + BH_RBE=(11,20) + BH_RET=(11,11) + BI_BITS=(16,20) + BI_CC=(16,17) + BI_CR=(18,20) + BIT_A=(25,25) + BIT_L=(21,21) + BIT_R=(21,21) + BIT_0=(0,0) + BIT_10=(10,10) + BIT_1=(1,1) + BIT_11=(11,11) + BIT_15=(15,15) + BIT_16=(16,16) + BIT_17=(17,17) + BIT_18=(18,18) + BIT_20=(20,20) + BIT_22=(22,22) + BIT_25=(25,25) + BIT_9=(9,9) + BIT_6=(6,6) + BITS_0_1=(0,1) + BITS_0_17=(0,17) + BITS_0_2=(0,2) + BITS_0_3=(0,3) + BITS_1_10=(1,10) + BITS_11_13=(11,13) + BITS_11_15=(11,15) + BITS_11_17=(11,17) + BITS_11_20=(11,20) + BITS_11_22=(11,22) + BITS_11_24=(11,24) + BITS_11_25=(11,25) + BITS_12_15=(12,15) + BITS_12_19=(12,19) + BITS_12_25=(12,25) + BITS_13_15=(13,15) + BITS_14_15=(14,15) + BITS_16_17=(12,15) + BITS_16_18=(16,18) + BITS_16_19=(16,19) + BITS_16_20=(16,20) + BITS_16_22=(16,22) + BITS_16_25=(16,25) + BITS_17_20=(17,20) + BITS_17_24=(17,24) + BITS_18_19=(18,19) + BITS_18_20=(18,20) + BITS_1_9=(1,9) + BITS_19_20=(19,20) + BITS_20_20=(20,20) + BITS_21_22=(21,22) + BITS_21_23=(21,23) + BITS_21_24=(7,10) + BITS_21_25=(21,25) + BITS_21_28=(21,28) + BITS_22_24=(22,24) + BITS_22_25=(22,25) + BITS_22_26=(22,26) + BITS_2_25=(2,25) + BITS_23_24=(23,24) + BITS_23_25=(23,25) + BITS_2_4=(2,4) + BITS_24_25=(24,25) + BITS_3_7=(3,7) + BITS_4_5=(4,5) + BITS_6_10=(6,10) + BO_0=(25,25) + BO_1=(24,24) + BO=(21,25) + BO_2=(23,23) + BO_3=(22, 22) + BO_BITS=(21,25) + BX=(1,1) + C=(6,10) + COND_BRANCH_CTRL=(22,25) + CR_A=(18,20) + CR_A_CC=(16,17) + CR_B=(13,15) + CR_B_CC=(11,12) + CRBD=(21,25) + CRBR=(6,10) + CR_D=(23,25) + CR_D_CC=(21,22) + crfD=(23,25) + CRFD=(23,25) + CRFS=(18,20) + CRM0=(19,19) + CRM1=(18,18) + CRM=(12,19) + CRM2=(17,17) + CRM3=(16,16) + CRM4=(15,15) + CRM5=(14,14) + CRM6=(13,13) + CRM7=(12,12) + CR_X=(8,10) + CR_X_CC=(6,7) + CT=(21,25) + CT2=(21,24) + CX=(3,3) + D0=(6,15) signed + D1=(16,20) + D2=(0,0) + D=(21,25) + Dp=(21,25) + DC6=(6,6) + DCM=(10,15) + DCMX=(16,22) + DCRN=(11,20) + DGM=(10,15) + DM=(8,9) + DM2=(2,2) + DQ=(4,15) + DQs=(4,15) signed + DS=(2,15) + DSs=(2,15) signed + DX=(16,20) + DUI=(21,25) + DUIS=(11,20) + EX=(0,0) + fA=(16,20) + fB=(11,15) + fC=(6,10) + fD=(21,25) + FM0=(24,24) + FM1=(23,23) + FM=(17,24) + FM2=(22,22) + FM3=(21,21) + FM4=(20,20) + FM5=(19,19) + FM6=(18,18) + FM7=(17,17) + FNC=(11,15) + fS=(21,25) + fT=(21,25) + IMM=(11,15) + + EVUIMM=(11,15) + BU_UIMM=(16,20) + BU_SIMM=(16,20) + EVUIMM_8=(11,15) + EVUIMM_4=(11,15) + EVUIMM_2=(11,15) + + L= (21,22) + L2=(21,21) + L16=(16,17) + LEV=(5,11) + LI=(2,25) signed + LK=(0,0) + MBH=(5,5) + MBL=(6,10) + ME=(1,5) + MO=(21,25) + MSR_L=(16,16) + NB= (11,15) + O=(9,9) + OE=(10,10) + OP=(26,31) + PS=(9,9) + Rc=(0,0) + Rc2=(10,10) + RMC=(9,10) + + RA=(16,20) + RB=(11,15) + RS=(21,25) + RT=(21,25) + R0=(0,0) + R16=(16,16) + + S=(21,25) + SBE=(11,11) + SH16=(10,15) + SHB=(6,9) + SHH=(1,1) + SHL=(11,15) + SHW=(8,9) + S8IMM=(0,7) signed + S5IMM=(11,15) signed + SIMM=(0,15) signed + SIMM_DS=(2,15) signed + SIMM_SIGN=(15,15) + SIX=(11,14) + SP=(19,20) + SPRVAL=(11,20) + SR=(16,19) + ST=(15,15) + STRM=(21,22) + SX=(0,0) + SX3=(3,3) + T=(21,25) + TOA=(21,25) + TBR=(11,20) + TH=(21,25) + TMP_6_10=(21,25) + TO=(21,25) + TX=(0,0) + TX3=(3,3) + UI=(11,15) + UI_11_s8=(16,20) + UI_16_s8=(11,15) + UI_16_s16=(0,15) + UIMM8=(11,18) + UIMM=(0,15) + UIM=(16,17) + UIMB=(16,19) + UIMH=(16,18) + UIMW=(16,17) + UIMT=(16,21) + + vrAR=(16,20) # AltVect Vector register vrN selector (128 bit) + vrAD=(16,20) + + vrA_64_0=(16,20) # AltVect Vector register vrN selector (64 bit) + vrA_64_1=(16,20) + + vrA_32_0=(16,20) # AltVect Vector register vrN selector (32 bit) + vrA_32_1=(16,20) + vrA_32_2=(16,20) + vrA_32_3=(16,20) + + vrA_16_0=(16,20) # AltVect Vector register vrN selector (16 bit) + vrA_16_1=(16,20) + vrA_16_2=(16,20) + vrA_16_3=(16,20) + vrA_16_4=(16,20) + vrA_16_5=(16,20) + vrA_16_6=(16,20) + vrA_16_7=(16,20) + + vrA_8_0=(16,20) # AltVect Vector register vrN selector (8 bit) + vrA_8_1=(16,20) + vrA_8_2=(16,20) + vrA_8_3=(16,20) + vrA_8_4=(16,20) + vrA_8_5=(16,20) + vrA_8_6=(16,20) + vrA_8_7=(16,20) + vrA_8_8=(16,20) + vrA_8_9=(16,20) + vrA_8_10=(16,20) + vrA_8_11=(16,20) + vrA_8_12=(16,20) + vrA_8_13=(16,20) + vrA_8_14=(16,20) + vrA_8_15=(16,20) + + vrBR=(11,15) # AltVect Vector register vrN selector (128 bit) + vrBD=(11,15) + + vrB_64_0=(11,15) # AltVect Vector register vrN selector (64 bit) + vrB_64_1=(11,15) + + vrB_32_0=(11,15) # AltVect Vector register vrN selector (32 bit) + vrB_32_1=(11,15) + vrB_32_2=(11,15) + vrB_32_3=(11,15) + + vrB_16_0=(11,15) # AltVect Vector register vrN selector (16 bit) + vrB_16_1=(11,15) + vrB_16_2=(11,15) + vrB_16_3=(11,15) + vrB_16_4=(11,15) + vrB_16_5=(11,15) + vrB_16_6=(11,15) + vrB_16_7=(11,15) + + vrB_8_0=(11,15) # AltVect Vector register vrN selector (8 bit) + vrB_8_1=(11,15) + vrB_8_2=(11,15) + vrB_8_3=(11,15) + vrB_8_4=(11,15) + vrB_8_5=(11,15) + vrB_8_6=(11,15) + vrB_8_7=(11,15) + vrB_8_8=(11,15) + vrB_8_9=(11,15) + vrB_8_10=(11,15) + vrB_8_11=(11,15) + vrB_8_12=(11,15) + vrB_8_13=(11,15) + vrB_8_14=(11,15) + vrB_8_15=(11,15) + + + vrCR=(6,10) # AltVect Vector register vrN selector (128 bit) + vrCD=(6,10) + + vrC_64_0=(6,10) # AltVect Vector register vrN selector (64 bit) + vrC_64_1=(6,10) + + vrC_32_0=(6,10) # AltVect Vector register vrN selector (32 bit) + vrC_32_1=(6,10) + vrC_32_2=(6,10) + vrC_32_3=(6,10) + + vrC_16_0=(6,10) # AltVect Vector register vrN selector (16 bit) + vrC_16_1=(6,10) + vrC_16_2=(6,10) + vrC_16_3=(6,10) + vrC_16_4=(6,10) + vrC_16_5=(6,10) + vrC_16_6=(6,10) + vrC_16_7=(6,10) + + + vrC_8_0=(6,10) # AltVect Vector register vrN selector (8 bit) + vrC_8_1=(6,10) + vrC_8_2=(6,10) + vrC_8_3=(6,10) + vrC_8_4=(6,10) + vrC_8_5=(6,10) + vrC_8_6=(6,10) + vrC_8_7=(6,10) + vrC_8_8=(6,10) + vrC_8_9=(6,10) + vrC_8_10=(6,10) + vrC_8_11=(6,10) + vrC_8_12=(6,10) + vrC_8_13=(6,10) + vrC_8_14=(6,10) + vrC_8_15=(6,10) + + + vrDR=(21,25) # AltVect Vector register vrN selector (128 bit) + vrDD=(21,25) + + vrD_64_0=(21,25) # AltVect Vector register vrN selector (64 bit) + vrD_64_1=(21,25) + + vrD_32_0=(21,25) # AltVect Vector register vrN selector (32 bit) + vrD_32_1=(21,25) + vrD_32_2=(21,25) + vrD_32_3=(21,25) + + vrD_16_0=(21,25) # AltVect Vector register vrN selector (16 bit) + vrD_16_1=(21,25) + vrD_16_2=(21,25) + vrD_16_3=(21,25) + vrD_16_4=(21,25) + vrD_16_5=(21,25) + vrD_16_6=(21,25) + vrD_16_7=(21,25) + + vrD_8_0=(21,25) # AltVect Vector register vrN selector (8 bit) + vrD_8_1=(21,25) + vrD_8_2=(21,25) + vrD_8_3=(21,25) + vrD_8_4=(21,25) + vrD_8_5=(21,25) + vrD_8_6=(21,25) + vrD_8_7=(21,25) + vrD_8_8=(21,25) + vrD_8_9=(21,25) + vrD_8_10=(21,25) + vrD_8_11=(21,25) + vrD_8_12=(21,25) + vrD_8_13=(21,25) + vrD_8_14=(21,25) + vrD_8_15=(21,25) + + + vrSR=(21,25) # AltVect Vector register vrN selector (128 bit) + vrSD=(21,25) + + vrS_64_0=(21,25) # AltVect Vector register vrN selector (64 bit) + vrS_64_1=(21,25) + + vrS_32_0=(21,25) # AltVect Vector register vrN selector (32 bit) + vrS_32_1=(21,25) + vrS_32_2=(21,25) + vrS_32_3=(21,25) + + vrS_16_0=(21,25) # AltVect Vector register vrN selector (16 bit) + vrS_16_1=(21,25) + vrS_16_2=(21,25) + vrS_16_3=(21,25) + vrS_16_4=(21,25) + vrS_16_5=(21,25) + vrS_16_6=(21,25) + vrS_16_7=(21,25) + + vrS_8_0=(21,25) # AltVect Vector register vrN selector (8 bit) + vrS_8_1=(21,25) + vrS_8_2=(21,25) + vrS_8_3=(21,25) + vrS_8_4=(21,25) + vrS_8_5=(21,25) + vrS_8_6=(21,25) + vrS_8_7=(21,25) + vrS_8_8=(21,25) + vrS_8_9=(21,25) + vrS_8_10=(21,25) + vrS_8_11=(21,25) + vrS_8_12=(21,25) + vrS_8_13=(21,25) + vrS_8_14=(21,25) + vrS_8_15=(21,25) + + WC=(21,22) + + XOP_0_10=(0,10) + XOP_0_5=(0,5) + XOP_0_8=(0,8) + XOP_0_9=(0,9) + XOP_1_10=(1,10) + XOP_1_4=(1,4) + XOP_1_5=(1,5) + XOP_1_8=(1,8) + XOP_1_9=(1,9) + XOP_2_10=(2,10) + XOP_2_4=(2,4) + XOP_3_5=(3,5) + XOP_3_10=(3,10) + XOP_3_9=(3,9) + XOP_7_10=(7,10) +# support VSX args + Avsa=(16,20) + Avsb=(16,20) + Bvsa=(11,15) + Bvsb=(11,15) + Cvsa=(6,10) + Cvsb=(6,10) + Svsa=(21,25) + Svsb=(21,25) + Svsbx=(21,25) + Tvsa=(21,25) + Tvsb=(21,25) + Tvsbx=(21,25) + + BD15_VLE=(1,15) signed + BD24_VLE=(1,24) signed + BF_VLE=(21,22) + BI_CC_VLE=(16,17) + BI_CR_VLE=(18,19) + BO_VLE=(20,21) + + IMM8=(0,7) + IMM_0_10_VLE=(0,10) + IMM_11_15_VLE=(11,14) + IMM_16_20_VLE=(16,20) + IMM_21_25_VLE=(21,25) + SIMM_11_14_VLE=(11,14) signed + SIMM_21_25_VLE=(21,25) signed + SCL_VLE=(8,9) + + LEV_VLE=(11,15) + XOP_8_VLE=(8,15) + XOP_11_VLE=(11,15) + XOP_12_VLE=(12,15) + + XOP_VLE=(22,25) +; + +define token instrvle(16) + OP4_VLE=(12,15) + OP5_VLE=(11,15) + OP6_VLE=(10,15) + OP15_VLE=(1,15) + OP16_VLE=(0,15) + + OIM5_VLE=(4,8) + OIM7_VLE=(4,10) + SD4_VLE=(8,11) + UI7_VLE=(4,10) + UI5_VLE=(4,8) + XORR_VLE=(8,9) + XOR_VLE=(4,9) + + ARX_VLE=(0,3) + ARY_VLE=(4,7) + RY_VLE=(4,7) + RZ_VLE=(4,7) + RX_VLE=(0,3) + + BO16_VLE=(10,10) + BIT9_VLE=(9,9) + BIT8_VLE=(8,8) + BI16_VLE=(8,9) + BITS_8_9=(8,9) + BD8_VLE=(0,7) signed + + LK8_VLE=(8,8) + LK0_VLE=(0,0) +; + +EVUIMM_2_RAt: val^"("^A^")" is A & EVUIMM_2 [ val = EVUIMM_2*2; ] { tmp:4 = A+(EVUIMM_2*2); export tmp; } +EVUIMM_4_RAt: val^"("^A^")" is A & EVUIMM_4 [ val = EVUIMM_4*4; ] { tmp:4 = A+(EVUIMM_4*4); export tmp; } +EVUIMM_8_RAt: val^"("^A^")" is A & EVUIMM_8 [ val = EVUIMM_8*8; ] { tmp:4 = A+(EVUIMM_8*8); export tmp; } + +attach variables [ T ] + [ vs0 vs1 vs2 vs3 vs4 vs5 vs6 vs7 vs8 vs9 vs10 vs11 vs12 vs13 vs14 vs15 + vs16 vs17 vs18 vs19 vs20 vs21 vs22 vs23 vs24 vs25 vs26 vs27 vs28 vs29 vs30 vs31 + ]; + +attach variables [ RX_VLE RY_VLE RZ_VLE] + [ r0 r1 r2 r3 r4 r5 r6 r7 r24 r25 r26 r27 r28 r29 r30 r31]; + +attach variables [ ARX_VLE ARY_VLE] + [ r8 r9 r10 r11 r12 r13 r14 r15 r16 r17 r18 r19 r20 r21 r22 r23]; + +attach variables [ D A B C S TH RA RB RS RT regp] + [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 + r16 r17 r18 r19 r20 r21 r22 r23 r24 r25 r26 r27 r28 r29 r30 r31 ]; + +attach variables [ BFA BI_CR CRFD CRFS CR_A CR_B CR_D CR_X ] + [cr0 cr1 cr2 cr3 cr4 cr5 cr6 cr7] ; + +attach variables [ BI_CR_VLE BF_VLE ] + [cr0 cr1 cr2 cr3 ] ; + +attach variables [ fD fB fA fC fS fT ] + [ f0 f1 f2 f3 f4 f5 f6 f7 + f8 f9 f10 f11 f12 f13 f14 f15 + f16 f17 f18 f19 f20 f21 f22 f23 + f24 f25 f26 f27 f28 f29 f30 f31 ]; + +attach variables [ CRBD CRBR ] + [ fp_fx fp_fex fp_vx fp_ox + fp_ux fp_zx fp_xx fp_vxsnan + fp_vxisi fp_vxidi fp_vxzdz fp_vximz + fp_vxvc fp_fr fp_fi fp_c + fp_cc0 fp_cc1 fp_cc2 fp_cc3 + fp_reserve1 fp_vxsoft fp_vxsqrt fp_vxcvi + fp_ve fp_oe fp_ue fp_ze + fp_xe fp_ni fp_rn0 fp_rn1 + ]; + +attach variables SR [ + sr0 sr1 sr2 sr3 sr4 sr5 sr6 sr7 sr8 sr9 sr10 sr11 sr12 sr13 sr14 sr15 ]; + +## +## Attach the spr register to the token SPRVAL made up of the bits sprL/sprH +## the low bits are shifted up, so the table is inverted and indexed by sprH,sprL +## This could have been done by computing sprVal = sprH * 32 + sprL but it would +## have resulted in multiple instructions instead of the original single prototype. +## Thus this massive inverted table. +attach variables SPRVAL [ + spr000 spr020 spr040 spr060 spr080 spr0a0 spr0c0 spr0e0 spr100 spr120 spr140 spr160 spr180 spr1a0 spr1c0 spr1e0 spr200 spr220 spr240 spr260 spr280 spr2a0 spr2c0 spr2e0 spr300 spr320 spr340 spr360 spr380 spr3a0 spr3c0 spr3e0 + XER spr021 spr041 spr061 spr081 spr0a1 spr0c1 spr0e1 spr101 spr121 spr141 spr161 spr181 spr1a1 spr1c1 spr1e1 spr201 spr221 spr241 spr261 spr281 spr2a1 spr2c1 spr2e1 spr301 spr321 spr341 spr361 spr381 spr3a1 spr3c1 spr3e1 + spr002 spr022 spr042 spr062 spr082 spr0a2 spr0c2 spr0e2 spr102 spr122 spr142 spr162 spr182 spr1a2 spr1c2 spr1e2 spr202 spr222 spr242 spr262 spr282 spr2a2 spr2c2 spr2e2 spr302 spr322 spr342 spr362 spr382 spr3a2 spr3c2 spr3e2 + spr003 spr023 spr043 spr063 spr083 spr0a3 spr0c3 spr0e3 spr103 spr123 spr143 spr163 spr183 spr1a3 spr1c3 spr1e3 spr203 spr223 spr243 spr263 spr283 spr2a3 spr2c3 spr2e3 spr303 spr323 spr343 spr363 spr383 spr3a3 spr3c3 spr3e3 + spr004 spr024 spr044 spr064 spr084 spr0a4 spr0c4 spr0e4 spr104 spr124 spr144 spr164 spr184 spr1a4 spr1c4 spr1e4 spr204 spr224 spr244 spr264 spr284 spr2a4 spr2c4 spr2e4 spr304 spr324 spr344 spr364 spr384 spr3a4 spr3c4 spr3e4 + spr005 spr025 spr045 spr065 spr085 spr0a5 spr0c5 spr0e5 spr105 spr125 spr145 spr165 spr185 spr1a5 spr1c5 spr1e5 spr205 spr225 spr245 spr265 spr285 spr2a5 spr2c5 spr2e5 spr305 spr325 spr345 spr365 spr385 spr3a5 spr3c5 spr3e5 + spr006 spr026 spr046 spr066 spr086 spr0a6 spr0c6 spr0e6 spr106 spr126 spr146 spr166 spr186 spr1a6 spr1c6 spr1e6 spr206 spr226 spr246 spr266 spr286 spr2a6 spr2c6 spr2e6 spr306 spr326 spr346 spr366 spr386 spr3a6 spr3c6 spr3e6 + spr007 spr027 spr047 spr067 spr087 spr0a7 spr0c7 spr0e7 spr107 spr127 spr147 spr167 spr187 spr1a7 spr1c7 spr1e7 spr207 spr227 spr247 spr267 spr287 spr2a7 spr2c7 spr2e7 spr307 spr327 spr347 spr367 spr387 spr3a7 spr3c7 spr3e7 + LR spr028 spr048 spr068 spr088 spr0a8 spr0c8 spr0e8 spr108 spr128 spr148 spr168 spr188 spr1a8 spr1c8 spr1e8 spr208 spr228 spr248 spr268 spr288 spr2a8 spr2c8 spr2e8 spr308 spr328 spr348 spr368 spr388 spr3a8 spr3c8 spr3e8 + CTR spr029 spr049 spr069 spr089 spr0a9 spr0c9 spr0e9 spr109 spr129 spr149 spr169 spr189 spr1a9 spr1c9 spr1e9 spr209 spr229 spr249 spr269 spr289 spr2a9 spr2c9 spr2e9 spr309 spr329 spr349 spr369 spr389 spr3a9 spr3c9 spr3e9 + spr00a spr02a spr04a spr06a spr08a spr0aa spr0ca spr0ea spr10a spr12a spr14a spr16a spr18a spr1aa spr1ca spr1ea spr20a spr22a spr24a spr26a spr28a spr2aa spr2ca spr2ea spr30a spr32a spr34a spr36a spr38a spr3aa spr3ca spr3ea + spr00b spr02b spr04b spr06b spr08b spr0ab spr0cb spr0eb spr10b spr12b spr14b spr16b spr18b spr1ab spr1cb spr1eb spr20b spr22b spr24b spr26b spr28b spr2ab spr2cb spr2eb spr30b spr32b spr34b spr36b spr38b spr3ab spr3cb spr3eb + spr00c spr02c spr04c spr06c spr08c spr0ac spr0cc spr0ec TBLr spr12c spr14c spr16c spr18c spr1ac spr1cc spr1ec spr20c spr22c spr24c spr26c spr28c spr2ac spr2cc spr2ec spr30c spr32c spr34c spr36c spr38c spr3ac spr3cc spr3ec + spr00d spr02d spr04d spr06d spr08d spr0ad spr0cd spr0ed TBUr spr12d spr14d spr16d spr18d spr1ad spr1cd spr1ed spr20d spr22d spr24d spr26d spr28d spr2ad spr2cd spr2ed spr30d spr32d spr34d spr36d spr38d spr3ad spr3cd spr3ed + spr00e spr02e spr04e spr06e spr08e spr0ae spr0ce spr0ee spr10e spr12e spr14e spr16e spr18e spr1ae spr1ce spr1ee spr20e spr22e spr24e spr26e spr28e spr2ae spr2ce spr2ee spr30e spr32e spr34e spr36e spr38e spr3ae spr3ce spr3ee + spr00f spr02f spr04f spr06f spr08f spr0af spr0cf spr0ef spr10f spr12f spr14f spr16f spr18f spr1af spr1cf spr1ef spr20f spr22f spr24f spr26f spr28f spr2af spr2cf spr2ef spr30f TAR spr34f spr36f spr38f spr3af spr3cf spr3ef + spr010 spr030 spr050 spr070 spr090 spr0b0 spr0d0 spr0f0 spr110 spr130 spr150 spr170 spr190 spr1b0 spr1d0 spr1f0 spr210 spr230 spr250 spr270 spr290 spr2b0 spr2d0 spr2f0 spr310 spr330 spr350 spr370 spr390 spr3b0 spr3d0 spr3f0 + spr011 spr031 spr051 spr071 spr091 spr0b1 spr0d1 spr0f1 spr111 spr131 spr151 spr171 spr191 spr1b1 spr1d1 spr1f1 spr211 spr231 spr251 spr271 spr291 spr2b1 spr2d1 spr2f1 spr311 spr331 spr351 spr371 spr391 spr3b1 spr3d1 spr3f1 + spr012 spr032 spr052 spr072 spr092 spr0b2 spr0d2 spr0f2 spr112 spr132 spr152 spr172 spr192 spr1b2 spr1d2 spr1f2 spr212 spr232 spr252 spr272 spr292 spr2b2 spr2d2 spr2f2 spr312 spr332 spr352 spr372 spr392 spr3b2 spr3d2 spr3f2 + spr013 spr033 spr053 spr073 spr093 spr0b3 spr0d3 spr0f3 spr113 spr133 spr153 spr173 spr193 spr1b3 spr1d3 spr1f3 spr213 spr233 spr253 spr273 spr293 spr2b3 spr2d3 spr2f3 spr313 spr333 spr353 spr373 spr393 spr3b3 spr3d3 spr3f3 + spr014 spr034 spr054 spr074 spr094 spr0b4 spr0d4 spr0f4 spr114 spr134 spr154 spr174 spr194 spr1b4 spr1d4 spr1f4 spr214 spr234 spr254 spr274 spr294 spr2b4 spr2d4 spr2f4 spr314 spr334 spr354 spr374 spr394 spr3b4 spr3d4 spr3f4 + spr015 spr035 spr055 spr075 spr095 spr0b5 spr0d5 spr0f5 spr115 spr135 spr155 spr175 spr195 spr1b5 spr1d5 spr1f5 spr215 spr235 spr255 spr275 spr295 spr2b5 spr2d5 spr2f5 spr315 spr335 spr355 spr375 spr395 spr3b5 spr3d5 spr3f5 + spr016 spr036 spr056 spr076 spr096 spr0b6 spr0d6 spr0f6 spr116 spr136 spr156 spr176 spr196 spr1b6 spr1d6 spr1f6 spr216 spr236 spr256 spr276 spr296 spr2b6 spr2d6 spr2f6 spr316 spr336 spr356 spr376 spr396 spr3b6 spr3d6 spr3f6 + spr017 spr037 spr057 spr077 spr097 spr0b7 spr0d7 spr0f7 spr117 spr137 spr157 spr177 spr197 spr1b7 spr1d7 spr1f7 spr217 spr237 spr257 spr277 spr297 spr2b7 spr2d7 spr2f7 spr317 spr337 spr357 spr377 spr397 spr3b7 spr3d7 spr3f7 + spr018 spr038 spr058 spr078 spr098 spr0b8 spr0d8 spr0f8 spr118 spr138 spr158 spr178 spr198 spr1b8 spr1d8 spr1f8 spr218 spr238 spr258 spr278 spr298 spr2b8 spr2d8 spr2f8 spr318 spr338 spr358 spr378 spr398 spr3b8 spr3d8 spr3f8 + spr019 spr039 spr059 spr079 spr099 spr0b9 spr0d9 spr0f9 spr119 spr139 spr159 spr179 spr199 spr1b9 spr1d9 spr1f9 spr219 spr239 spr259 spr279 spr299 spr2b9 spr2d9 spr2f9 spr319 spr339 spr359 spr379 spr399 spr3b9 spr3d9 spr3f9 + SRR0 CSRR0 spr05a spr07a spr09a spr0ba spr0da spr0fa spr11a spr13a spr15a spr17a spr19a spr1ba spr1da spr1fa spr21a spr23a spr25a spr27a spr29a spr2ba spr2da spr2fa spr31a spr33a spr35a spr37a spr39a spr3ba spr3da spr3fa + SRR1 CSRR1 spr05b spr07b spr09b spr0bb spr0db spr0fb spr11b spr13b spr15b spr17b spr19b spr1bb spr1db spr1fb spr21b spr23b spr25b spr27b spr29b spr2bb spr2db spr2fb spr31b spr33b spr35b spr37b spr39b spr3bb spr3db spr3fb + spr01c spr03c spr05c spr07c spr09c spr0bc spr0dc spr0fc TBLw spr13c spr15c spr17c spr19c spr1bc spr1dc spr1fc spr21c spr23c spr25c spr27c spr29c spr2bc spr2dc spr2fc spr31c spr33c spr35c spr37c spr39c spr3bc spr3dc spr3fc + spr01d spr03d spr05d spr07d spr09d spr0bd spr0dd spr0fd TBUw spr13d spr15d spr17d spr19d spr1bd spr1dd spr1fd spr21d spr23d spr25d spr27d spr29d spr2bd spr2dd spr2fd spr31d spr33d spr35d spr37d spr39d spr3bd spr3dd spr3fd + spr01e spr03e spr05e spr07e spr09e spr0be spr0de spr0fe spr11e spr13e spr15e spr17e spr19e spr1be spr1de spr1fe spr21e spr23e spr25e spr27e spr29e spr2be spr2de spr2fe spr31e spr33e spr35e spr37e spr39e spr3be spr3de spr3fe + spr01f spr03f spr05f spr07f spr09f spr0bf spr0df spr0ff spr11f spr13f spr15f spr17f spr19f spr1bf spr1df spr1ff spr21f spr23f spr25f spr27f spr29f spr2bf spr2df spr2ff spr31f spr33f spr35f spr37f spr39f spr3bf spr3df spr3ff +]; + +## +## Attach the dcr register to the token DCRN made up of the bits dcrnL/dcrnH +## the low bits are shifted up, so the table is inverted and indexed by dcrnH,dcrnL +## This could have been done by computing DCRN = dcrnH * 32 + dcrnL but it would +## have resulted in multiple instructions instead of the original single prototype. +## Thus this massive inverted table. +attach variables DCRN [ + dcr000 dcr020 dcr040 dcr060 dcr080 dcr0a0 dcr0c0 dcr0e0 dcr100 dcr120 dcr140 dcr160 dcr180 dcr1a0 dcr1c0 dcr1e0 dcr200 dcr220 dcr240 dcr260 dcr280 dcr2a0 dcr2c0 dcr2e0 dcr300 dcr320 dcr340 dcr360 dcr380 dcr3a0 dcr3c0 dcr3e0 + dcr001 dcr021 dcr041 dcr061 dcr081 dcr0a1 dcr0c1 dcr0e1 dcr101 dcr121 dcr141 dcr161 dcr181 dcr1a1 dcr1c1 dcr1e1 dcr201 dcr221 dcr241 dcr261 dcr281 dcr2a1 dcr2c1 dcr2e1 dcr301 dcr321 dcr341 dcr361 dcr381 dcr3a1 dcr3c1 dcr3e1 + dcr002 dcr022 dcr042 dcr062 dcr082 dcr0a2 dcr0c2 dcr0e2 dcr102 dcr122 dcr142 dcr162 dcr182 dcr1a2 dcr1c2 dcr1e2 dcr202 dcr222 dcr242 dcr262 dcr282 dcr2a2 dcr2c2 dcr2e2 dcr302 dcr322 dcr342 dcr362 dcr382 dcr3a2 dcr3c2 dcr3e2 + dcr003 dcr023 dcr043 dcr063 dcr083 dcr0a3 dcr0c3 dcr0e3 dcr103 dcr123 dcr143 dcr163 dcr183 dcr1a3 dcr1c3 dcr1e3 dcr203 dcr223 dcr243 dcr263 dcr283 dcr2a3 dcr2c3 dcr2e3 dcr303 dcr323 dcr343 dcr363 dcr383 dcr3a3 dcr3c3 dcr3e3 + dcr004 dcr024 dcr044 dcr064 dcr084 dcr0a4 dcr0c4 dcr0e4 dcr104 dcr124 dcr144 dcr164 dcr184 dcr1a4 dcr1c4 dcr1e4 dcr204 dcr224 dcr244 dcr264 dcr284 dcr2a4 dcr2c4 dcr2e4 dcr304 dcr324 dcr344 dcr364 dcr384 dcr3a4 dcr3c4 dcr3e4 + dcr005 dcr025 dcr045 dcr065 dcr085 dcr0a5 dcr0c5 dcr0e5 dcr105 dcr125 dcr145 dcr165 dcr185 dcr1a5 dcr1c5 dcr1e5 dcr205 dcr225 dcr245 dcr265 dcr285 dcr2a5 dcr2c5 dcr2e5 dcr305 dcr325 dcr345 dcr365 dcr385 dcr3a5 dcr3c5 dcr3e5 + dcr006 dcr026 dcr046 dcr066 dcr086 dcr0a6 dcr0c6 dcr0e6 dcr106 dcr126 dcr146 dcr166 dcr186 dcr1a6 dcr1c6 dcr1e6 dcr206 dcr226 dcr246 dcr266 dcr286 dcr2a6 dcr2c6 dcr2e6 dcr306 dcr326 dcr346 dcr366 dcr386 dcr3a6 dcr3c6 dcr3e6 + dcr007 dcr027 dcr047 dcr067 dcr087 dcr0a7 dcr0c7 dcr0e7 dcr107 dcr127 dcr147 dcr167 dcr187 dcr1a7 dcr1c7 dcr1e7 dcr207 dcr227 dcr247 dcr267 dcr287 dcr2a7 dcr2c7 dcr2e7 dcr307 dcr327 dcr347 dcr367 dcr387 dcr3a7 dcr3c7 dcr3e7 + dcr008 dcr028 dcr048 dcr068 dcr088 dcr0a8 dcr0c8 dcr0e8 dcr108 dcr128 dcr148 dcr168 dcr188 dcr1a8 dcr1c8 dcr1e8 dcr208 dcr228 dcr248 dcr268 dcr288 dcr2a8 dcr2c8 dcr2e8 dcr308 dcr328 dcr348 dcr368 dcr388 dcr3a8 dcr3c8 dcr3e8 + dcr009 dcr029 dcr049 dcr069 dcr089 dcr0a9 dcr0c9 dcr0e9 dcr109 dcr129 dcr149 dcr169 dcr189 dcr1a9 dcr1c9 dcr1e9 dcr209 dcr229 dcr249 dcr269 dcr289 dcr2a9 dcr2c9 dcr2e9 dcr309 dcr329 dcr349 dcr369 dcr389 dcr3a9 dcr3c9 dcr3e9 + dcr00a dcr02a dcr04a dcr06a dcr08a dcr0aa dcr0ca dcr0ea dcr10a dcr12a dcr14a dcr16a dcr18a dcr1aa dcr1ca dcr1ea dcr20a dcr22a dcr24a dcr26a dcr28a dcr2aa dcr2ca dcr2ea dcr30a dcr32a dcr34a dcr36a dcr38a dcr3aa dcr3ca dcr3ea + dcr00b dcr02b dcr04b dcr06b dcr08b dcr0ab dcr0cb dcr0eb dcr10b dcr12b dcr14b dcr16b dcr18b dcr1ab dcr1cb dcr1eb dcr20b dcr22b dcr24b dcr26b dcr28b dcr2ab dcr2cb dcr2eb dcr30b dcr32b dcr34b dcr36b dcr38b dcr3ab dcr3cb dcr3eb + dcr00c dcr02c dcr04c dcr06c dcr08c dcr0ac dcr0cc dcr0ec dcr10c dcr12c dcr14c dcr16c dcr18c dcr1ac dcr1cc dcr1ec dcr20c dcr22c dcr24c dcr26c dcr28c dcr2ac dcr2cc dcr2ec dcr30c dcr32c dcr34c dcr36c dcr38c dcr3ac dcr3cc dcr3ec + dcr00d dcr02d dcr04d dcr06d dcr08d dcr0ad dcr0cd dcr0ed dcr10d dcr12d dcr14d dcr16d dcr18d dcr1ad dcr1cd dcr1ed dcr20d dcr22d dcr24d dcr26d dcr28d dcr2ad dcr2cd dcr2ed dcr30d dcr32d dcr34d dcr36d dcr38d dcr3ad dcr3cd dcr3ed + dcr00e dcr02e dcr04e dcr06e dcr08e dcr0ae dcr0ce dcr0ee dcr10e dcr12e dcr14e dcr16e dcr18e dcr1ae dcr1ce dcr1ee dcr20e dcr22e dcr24e dcr26e dcr28e dcr2ae dcr2ce dcr2ee dcr30e dcr32e dcr34e dcr36e dcr38e dcr3ae dcr3ce dcr3ee + dcr00f dcr02f dcr04f dcr06f dcr08f dcr0af dcr0cf dcr0ef dcr10f dcr12f dcr14f dcr16f dcr18f dcr1af dcr1cf dcr1ef dcr20f dcr22f dcr24f dcr26f dcr28f dcr2af dcr2cf dcr2ef dcr30f dcr32f dcr34f dcr36f dcr38f dcr3af dcr3cf dcr3ef + dcr010 dcr030 dcr050 dcr070 dcr090 dcr0b0 dcr0d0 dcr0f0 dcr110 dcr130 dcr150 dcr170 dcr190 dcr1b0 dcr1d0 dcr1f0 dcr210 dcr230 dcr250 dcr270 dcr290 dcr2b0 dcr2d0 dcr2f0 dcr310 dcr330 dcr350 dcr370 dcr390 dcr3b0 dcr3d0 dcr3f0 + dcr011 dcr031 dcr051 dcr071 dcr091 dcr0b1 dcr0d1 dcr0f1 dcr111 dcr131 dcr151 dcr171 dcr191 dcr1b1 dcr1d1 dcr1f1 dcr211 dcr231 dcr251 dcr271 dcr291 dcr2b1 dcr2d1 dcr2f1 dcr311 dcr331 dcr351 dcr371 dcr391 dcr3b1 dcr3d1 dcr3f1 + dcr012 dcr032 dcr052 dcr072 dcr092 dcr0b2 dcr0d2 dcr0f2 dcr112 dcr132 dcr152 dcr172 dcr192 dcr1b2 dcr1d2 dcr1f2 dcr212 dcr232 dcr252 dcr272 dcr292 dcr2b2 dcr2d2 dcr2f2 dcr312 dcr332 dcr352 dcr372 dcr392 dcr3b2 dcr3d2 dcr3f2 + dcr013 dcr033 dcr053 dcr073 dcr093 dcr0b3 dcr0d3 dcr0f3 dcr113 dcr133 dcr153 dcr173 dcr193 dcr1b3 dcr1d3 dcr1f3 dcr213 dcr233 dcr253 dcr273 dcr293 dcr2b3 dcr2d3 dcr2f3 dcr313 dcr333 dcr353 dcr373 dcr393 dcr3b3 dcr3d3 dcr3f3 + dcr014 dcr034 dcr054 dcr074 dcr094 dcr0b4 dcr0d4 dcr0f4 dcr114 dcr134 dcr154 dcr174 dcr194 dcr1b4 dcr1d4 dcr1f4 dcr214 dcr234 dcr254 dcr274 dcr294 dcr2b4 dcr2d4 dcr2f4 dcr314 dcr334 dcr354 dcr374 dcr394 dcr3b4 dcr3d4 dcr3f4 + dcr015 dcr035 dcr055 dcr075 dcr095 dcr0b5 dcr0d5 dcr0f5 dcr115 dcr135 dcr155 dcr175 dcr195 dcr1b5 dcr1d5 dcr1f5 dcr215 dcr235 dcr255 dcr275 dcr295 dcr2b5 dcr2d5 dcr2f5 dcr315 dcr335 dcr355 dcr375 dcr395 dcr3b5 dcr3d5 dcr3f5 + dcr016 dcr036 dcr056 dcr076 dcr096 dcr0b6 dcr0d6 dcr0f6 dcr116 dcr136 dcr156 dcr176 dcr196 dcr1b6 dcr1d6 dcr1f6 dcr216 dcr236 dcr256 dcr276 dcr296 dcr2b6 dcr2d6 dcr2f6 dcr316 dcr336 dcr356 dcr376 dcr396 dcr3b6 dcr3d6 dcr3f6 + dcr017 dcr037 dcr057 dcr077 dcr097 dcr0b7 dcr0d7 dcr0f7 dcr117 dcr137 dcr157 dcr177 dcr197 dcr1b7 dcr1d7 dcr1f7 dcr217 dcr237 dcr257 dcr277 dcr297 dcr2b7 dcr2d7 dcr2f7 dcr317 dcr337 dcr357 dcr377 dcr397 dcr3b7 dcr3d7 dcr3f7 + dcr018 dcr038 dcr058 dcr078 dcr098 dcr0b8 dcr0d8 dcr0f8 dcr118 dcr138 dcr158 dcr178 dcr198 dcr1b8 dcr1d8 dcr1f8 dcr218 dcr238 dcr258 dcr278 dcr298 dcr2b8 dcr2d8 dcr2f8 dcr318 dcr338 dcr358 dcr378 dcr398 dcr3b8 dcr3d8 dcr3f8 + dcr019 dcr039 dcr059 dcr079 dcr099 dcr0b9 dcr0d9 dcr0f9 dcr119 dcr139 dcr159 dcr179 dcr199 dcr1b9 dcr1d9 dcr1f9 dcr219 dcr239 dcr259 dcr279 dcr299 dcr2b9 dcr2d9 dcr2f9 dcr319 dcr339 dcr359 dcr379 dcr399 dcr3b9 dcr3d9 dcr3f9 + dcr01a dcr03a dcr05a dcr07a dcr09a dcr0ba dcr0da dcr0fa dcr11a dcr13a dcr15a dcr17a dcr19a dcr1ba dcr1da dcr1fa dcr21a dcr23a dcr25a dcr27a dcr29a dcr2ba dcr2da dcr2fa dcr31a dcr33a dcr35a dcr37a dcr39a dcr3ba dcr3da dcr3fa + dcr01b dcr03b dcr05b dcr07b dcr09b dcr0bb dcr0db dcr0fb dcr11b dcr13b dcr15b dcr17b dcr19b dcr1bb dcr1db dcr1fb dcr21b dcr23b dcr25b dcr27b dcr29b dcr2bb dcr2db dcr2fb dcr31b dcr33b dcr35b dcr37b dcr39b dcr3bb dcr3db dcr3fb + dcr01c dcr03c dcr05c dcr07c dcr09c dcr0bc dcr0dc dcr0fc dcr11c dcr13c dcr15c dcr17c dcr19c dcr1bc dcr1dc dcr1fc dcr21c dcr23c dcr25c dcr27c dcr29c dcr2bc dcr2dc dcr2fc dcr31c dcr33c dcr35c dcr37c dcr39c dcr3bc dcr3dc dcr3fc + dcr01d dcr03d dcr05d dcr07d dcr09d dcr0bd dcr0dd dcr0fd dcr11d dcr13d dcr15d dcr17d dcr19d dcr1bd dcr1dd dcr1fd dcr21d dcr23d dcr25d dcr27d dcr29d dcr2bd dcr2dd dcr2fd dcr31d dcr33d dcr35d dcr37d dcr39d dcr3bd dcr3dd dcr3fd + dcr01e dcr03e dcr05e dcr07e dcr09e dcr0be dcr0de dcr0fe dcr11e dcr13e dcr15e dcr17e dcr19e dcr1be dcr1de dcr1fe dcr21e dcr23e dcr25e dcr27e dcr29e dcr2be dcr2de dcr2fe dcr31e dcr33e dcr35e dcr37e dcr39e dcr3be dcr3de dcr3fe + dcr01f dcr03f dcr05f dcr07f dcr09f dcr0bf dcr0df dcr0ff dcr11f dcr13f dcr15f dcr17f dcr19f dcr1bf dcr1df dcr1ff dcr21f dcr23f dcr25f dcr27f dcr29f dcr2bf dcr2df dcr2ff dcr31f dcr33f dcr35f dcr37f dcr39f dcr3bf dcr3df dcr3ff +]; + +attach variables [vrDR vrAR vrBR vrSR vrCR] + [ vs32 vs33 vs34 vs35 vs36 vs37 vs38 vs39 vs40 vs41 vs42 vs43 vs44 vs45 vs46 vs47 + vs48 vs49 vs50 vs51 vs52 vs53 vs54 vs55 vs56 vs57 vs58 vs59 vs60 vs61 vs62 vs63 ]; + +## These attaches are for the Altivec instructions +attach names [ vrDD vrAD vrBD vrSD vrCD] + [ v0 v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 + v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 ]; + +vrD: vrDD is vrDD & vrDR { export vrDR; } +vrA: vrAD is vrAD & vrAR { export vrAR; } +vrB: vrBD is vrBD & vrBR { export vrBR; } +vrC: vrCD is vrCD & vrCR { export vrCR; } +vrS: vrSD is vrSD & vrSR { export vrSR; } + +# AltVect Vector vrD sub-piece selectors + +# AltVect Vector vrD sub-piece selectors for size 64 +attach variables vrD_64_0 [vr0_64_0 vr1_64_0 vr2_64_0 vr3_64_0 vr4_64_0 vr5_64_0 vr6_64_0 vr7_64_0 vr8_64_0 vr9_64_0 vr10_64_0 vr11_64_0 vr12_64_0 vr13_64_0 vr14_64_0 vr15_64_0 vr16_64_0 vr17_64_0 vr18_64_0 vr19_64_0 vr20_64_0 vr21_64_0 vr22_64_0 vr23_64_0 vr24_64_0 vr25_64_0 vr26_64_0 vr27_64_0 vr28_64_0 vr29_64_0 vr30_64_0 vr31_64_0 ]; +attach variables vrD_64_1 [vr0_64_1 vr1_64_1 vr2_64_1 vr3_64_1 vr4_64_1 vr5_64_1 vr6_64_1 vr7_64_1 vr8_64_1 vr9_64_1 vr10_64_1 vr11_64_1 vr12_64_1 vr13_64_1 vr14_64_1 vr15_64_1 vr16_64_1 vr17_64_1 vr18_64_1 vr19_64_1 vr20_64_1 vr21_64_1 vr22_64_1 vr23_64_1 vr24_64_1 vr25_64_1 vr26_64_1 vr27_64_1 vr28_64_1 vr29_64_1 vr30_64_1 vr31_64_1 ]; + +# AltVect Vector vrD sub-piece selectors for size 32 +attach variables vrD_32_0 [vr0_32_0 vr1_32_0 vr2_32_0 vr3_32_0 vr4_32_0 vr5_32_0 vr6_32_0 vr7_32_0 vr8_32_0 vr9_32_0 vr10_32_0 vr11_32_0 vr12_32_0 vr13_32_0 vr14_32_0 vr15_32_0 vr16_32_0 vr17_32_0 vr18_32_0 vr19_32_0 vr20_32_0 vr21_32_0 vr22_32_0 vr23_32_0 vr24_32_0 vr25_32_0 vr26_32_0 vr27_32_0 vr28_32_0 vr29_32_0 vr30_32_0 vr31_32_0 ]; +attach variables vrD_32_1 [vr0_32_1 vr1_32_1 vr2_32_1 vr3_32_1 vr4_32_1 vr5_32_1 vr6_32_1 vr7_32_1 vr8_32_1 vr9_32_1 vr10_32_1 vr11_32_1 vr12_32_1 vr13_32_1 vr14_32_1 vr15_32_1 vr16_32_1 vr17_32_1 vr18_32_1 vr19_32_1 vr20_32_1 vr21_32_1 vr22_32_1 vr23_32_1 vr24_32_1 vr25_32_1 vr26_32_1 vr27_32_1 vr28_32_1 vr29_32_1 vr30_32_1 vr31_32_1 ]; +attach variables vrD_32_2 [vr0_32_2 vr1_32_2 vr2_32_2 vr3_32_2 vr4_32_2 vr5_32_2 vr6_32_2 vr7_32_2 vr8_32_2 vr9_32_2 vr10_32_2 vr11_32_2 vr12_32_2 vr13_32_2 vr14_32_2 vr15_32_2 vr16_32_2 vr17_32_2 vr18_32_2 vr19_32_2 vr20_32_2 vr21_32_2 vr22_32_2 vr23_32_2 vr24_32_2 vr25_32_2 vr26_32_2 vr27_32_2 vr28_32_2 vr29_32_2 vr30_32_2 vr31_32_2 ]; +attach variables vrD_32_3 [vr0_32_3 vr1_32_3 vr2_32_3 vr3_32_3 vr4_32_3 vr5_32_3 vr6_32_3 vr7_32_3 vr8_32_3 vr9_32_3 vr10_32_3 vr11_32_3 vr12_32_3 vr13_32_3 vr14_32_3 vr15_32_3 vr16_32_3 vr17_32_3 vr18_32_3 vr19_32_3 vr20_32_3 vr21_32_3 vr22_32_3 vr23_32_3 vr24_32_3 vr25_32_3 vr26_32_3 vr27_32_3 vr28_32_3 vr29_32_3 vr30_32_3 vr31_32_3 ]; + +# AltVect Vector vrD sub-piece selectors for size 16 +attach variables vrD_16_0 [vr0_16_0 vr1_16_0 vr2_16_0 vr3_16_0 vr4_16_0 vr5_16_0 vr6_16_0 vr7_16_0 vr8_16_0 vr9_16_0 vr10_16_0 vr11_16_0 vr12_16_0 vr13_16_0 vr14_16_0 vr15_16_0 vr16_16_0 vr17_16_0 vr18_16_0 vr19_16_0 vr20_16_0 vr21_16_0 vr22_16_0 vr23_16_0 vr24_16_0 vr25_16_0 vr26_16_0 vr27_16_0 vr28_16_0 vr29_16_0 vr30_16_0 vr31_16_0 ]; +attach variables vrD_16_1 [vr0_16_1 vr1_16_1 vr2_16_1 vr3_16_1 vr4_16_1 vr5_16_1 vr6_16_1 vr7_16_1 vr8_16_1 vr9_16_1 vr10_16_1 vr11_16_1 vr12_16_1 vr13_16_1 vr14_16_1 vr15_16_1 vr16_16_1 vr17_16_1 vr18_16_1 vr19_16_1 vr20_16_1 vr21_16_1 vr22_16_1 vr23_16_1 vr24_16_1 vr25_16_1 vr26_16_1 vr27_16_1 vr28_16_1 vr29_16_1 vr30_16_1 vr31_16_1 ]; +attach variables vrD_16_2 [vr0_16_2 vr1_16_2 vr2_16_2 vr3_16_2 vr4_16_2 vr5_16_2 vr6_16_2 vr7_16_2 vr8_16_2 vr9_16_2 vr10_16_2 vr11_16_2 vr12_16_2 vr13_16_2 vr14_16_2 vr15_16_2 vr16_16_2 vr17_16_2 vr18_16_2 vr19_16_2 vr20_16_2 vr21_16_2 vr22_16_2 vr23_16_2 vr24_16_2 vr25_16_2 vr26_16_2 vr27_16_2 vr28_16_2 vr29_16_2 vr30_16_2 vr31_16_2 ]; +attach variables vrD_16_3 [vr0_16_3 vr1_16_3 vr2_16_3 vr3_16_3 vr4_16_3 vr5_16_3 vr6_16_3 vr7_16_3 vr8_16_3 vr9_16_3 vr10_16_3 vr11_16_3 vr12_16_3 vr13_16_3 vr14_16_3 vr15_16_3 vr16_16_3 vr17_16_3 vr18_16_3 vr19_16_3 vr20_16_3 vr21_16_3 vr22_16_3 vr23_16_3 vr24_16_3 vr25_16_3 vr26_16_3 vr27_16_3 vr28_16_3 vr29_16_3 vr30_16_3 vr31_16_3 ]; +attach variables vrD_16_4 [vr0_16_4 vr1_16_4 vr2_16_4 vr3_16_4 vr4_16_4 vr5_16_4 vr6_16_4 vr7_16_4 vr8_16_4 vr9_16_4 vr10_16_4 vr11_16_4 vr12_16_4 vr13_16_4 vr14_16_4 vr15_16_4 vr16_16_4 vr17_16_4 vr18_16_4 vr19_16_4 vr20_16_4 vr21_16_4 vr22_16_4 vr23_16_4 vr24_16_4 vr25_16_4 vr26_16_4 vr27_16_4 vr28_16_4 vr29_16_4 vr30_16_4 vr31_16_4 ]; +attach variables vrD_16_5 [vr0_16_5 vr1_16_5 vr2_16_5 vr3_16_5 vr4_16_5 vr5_16_5 vr6_16_5 vr7_16_5 vr8_16_5 vr9_16_5 vr10_16_5 vr11_16_5 vr12_16_5 vr13_16_5 vr14_16_5 vr15_16_5 vr16_16_5 vr17_16_5 vr18_16_5 vr19_16_5 vr20_16_5 vr21_16_5 vr22_16_5 vr23_16_5 vr24_16_5 vr25_16_5 vr26_16_5 vr27_16_5 vr28_16_5 vr29_16_5 vr30_16_5 vr31_16_5 ]; +attach variables vrD_16_6 [vr0_16_6 vr1_16_6 vr2_16_6 vr3_16_6 vr4_16_6 vr5_16_6 vr6_16_6 vr7_16_6 vr8_16_6 vr9_16_6 vr10_16_6 vr11_16_6 vr12_16_6 vr13_16_6 vr14_16_6 vr15_16_6 vr16_16_6 vr17_16_6 vr18_16_6 vr19_16_6 vr20_16_6 vr21_16_6 vr22_16_6 vr23_16_6 vr24_16_6 vr25_16_6 vr26_16_6 vr27_16_6 vr28_16_6 vr29_16_6 vr30_16_6 vr31_16_6 ]; +attach variables vrD_16_7 [vr0_16_7 vr1_16_7 vr2_16_7 vr3_16_7 vr4_16_7 vr5_16_7 vr6_16_7 vr7_16_7 vr8_16_7 vr9_16_7 vr10_16_7 vr11_16_7 vr12_16_7 vr13_16_7 vr14_16_7 vr15_16_7 vr16_16_7 vr17_16_7 vr18_16_7 vr19_16_7 vr20_16_7 vr21_16_7 vr22_16_7 vr23_16_7 vr24_16_7 vr25_16_7 vr26_16_7 vr27_16_7 vr28_16_7 vr29_16_7 vr30_16_7 vr31_16_7 ]; + +# AltVect Vector vrD sub-piece selectors for size 8 +attach variables vrD_8_0 [vr0_8_0 vr1_8_0 vr2_8_0 vr3_8_0 vr4_8_0 vr5_8_0 vr6_8_0 vr7_8_0 vr8_8_0 vr9_8_0 vr10_8_0 vr11_8_0 vr12_8_0 vr13_8_0 vr14_8_0 vr15_8_0 vr16_8_0 vr17_8_0 vr18_8_0 vr19_8_0 vr20_8_0 vr21_8_0 vr22_8_0 vr23_8_0 vr24_8_0 vr25_8_0 vr26_8_0 vr27_8_0 vr28_8_0 vr29_8_0 vr30_8_0 vr31_8_0 ]; +attach variables vrD_8_1 [vr0_8_1 vr1_8_1 vr2_8_1 vr3_8_1 vr4_8_1 vr5_8_1 vr6_8_1 vr7_8_1 vr8_8_1 vr9_8_1 vr10_8_1 vr11_8_1 vr12_8_1 vr13_8_1 vr14_8_1 vr15_8_1 vr16_8_1 vr17_8_1 vr18_8_1 vr19_8_1 vr20_8_1 vr21_8_1 vr22_8_1 vr23_8_1 vr24_8_1 vr25_8_1 vr26_8_1 vr27_8_1 vr28_8_1 vr29_8_1 vr30_8_1 vr31_8_1 ]; +attach variables vrD_8_2 [vr0_8_2 vr1_8_2 vr2_8_2 vr3_8_2 vr4_8_2 vr5_8_2 vr6_8_2 vr7_8_2 vr8_8_2 vr9_8_2 vr10_8_2 vr11_8_2 vr12_8_2 vr13_8_2 vr14_8_2 vr15_8_2 vr16_8_2 vr17_8_2 vr18_8_2 vr19_8_2 vr20_8_2 vr21_8_2 vr22_8_2 vr23_8_2 vr24_8_2 vr25_8_2 vr26_8_2 vr27_8_2 vr28_8_2 vr29_8_2 vr30_8_2 vr31_8_2 ]; +attach variables vrD_8_3 [vr0_8_3 vr1_8_3 vr2_8_3 vr3_8_3 vr4_8_3 vr5_8_3 vr6_8_3 vr7_8_3 vr8_8_3 vr9_8_3 vr10_8_3 vr11_8_3 vr12_8_3 vr13_8_3 vr14_8_3 vr15_8_3 vr16_8_3 vr17_8_3 vr18_8_3 vr19_8_3 vr20_8_3 vr21_8_3 vr22_8_3 vr23_8_3 vr24_8_3 vr25_8_3 vr26_8_3 vr27_8_3 vr28_8_3 vr29_8_3 vr30_8_3 vr31_8_3 ]; +attach variables vrD_8_4 [vr0_8_4 vr1_8_4 vr2_8_4 vr3_8_4 vr4_8_4 vr5_8_4 vr6_8_4 vr7_8_4 vr8_8_4 vr9_8_4 vr10_8_4 vr11_8_4 vr12_8_4 vr13_8_4 vr14_8_4 vr15_8_4 vr16_8_4 vr17_8_4 vr18_8_4 vr19_8_4 vr20_8_4 vr21_8_4 vr22_8_4 vr23_8_4 vr24_8_4 vr25_8_4 vr26_8_4 vr27_8_4 vr28_8_4 vr29_8_4 vr30_8_4 vr31_8_4 ]; +attach variables vrD_8_5 [vr0_8_5 vr1_8_5 vr2_8_5 vr3_8_5 vr4_8_5 vr5_8_5 vr6_8_5 vr7_8_5 vr8_8_5 vr9_8_5 vr10_8_5 vr11_8_5 vr12_8_5 vr13_8_5 vr14_8_5 vr15_8_5 vr16_8_5 vr17_8_5 vr18_8_5 vr19_8_5 vr20_8_5 vr21_8_5 vr22_8_5 vr23_8_5 vr24_8_5 vr25_8_5 vr26_8_5 vr27_8_5 vr28_8_5 vr29_8_5 vr30_8_5 vr31_8_5 ]; +attach variables vrD_8_6 [vr0_8_6 vr1_8_6 vr2_8_6 vr3_8_6 vr4_8_6 vr5_8_6 vr6_8_6 vr7_8_6 vr8_8_6 vr9_8_6 vr10_8_6 vr11_8_6 vr12_8_6 vr13_8_6 vr14_8_6 vr15_8_6 vr16_8_6 vr17_8_6 vr18_8_6 vr19_8_6 vr20_8_6 vr21_8_6 vr22_8_6 vr23_8_6 vr24_8_6 vr25_8_6 vr26_8_6 vr27_8_6 vr28_8_6 vr29_8_6 vr30_8_6 vr31_8_6 ]; +attach variables vrD_8_7 [vr0_8_7 vr1_8_7 vr2_8_7 vr3_8_7 vr4_8_7 vr5_8_7 vr6_8_7 vr7_8_7 vr8_8_7 vr9_8_7 vr10_8_7 vr11_8_7 vr12_8_7 vr13_8_7 vr14_8_7 vr15_8_7 vr16_8_7 vr17_8_7 vr18_8_7 vr19_8_7 vr20_8_7 vr21_8_7 vr22_8_7 vr23_8_7 vr24_8_7 vr25_8_7 vr26_8_7 vr27_8_7 vr28_8_7 vr29_8_7 vr30_8_7 vr31_8_7 ]; +attach variables vrD_8_8 [vr0_8_8 vr1_8_8 vr2_8_8 vr3_8_8 vr4_8_8 vr5_8_8 vr6_8_8 vr7_8_8 vr8_8_8 vr9_8_8 vr10_8_8 vr11_8_8 vr12_8_8 vr13_8_8 vr14_8_8 vr15_8_8 vr16_8_8 vr17_8_8 vr18_8_8 vr19_8_8 vr20_8_8 vr21_8_8 vr22_8_8 vr23_8_8 vr24_8_8 vr25_8_8 vr26_8_8 vr27_8_8 vr28_8_8 vr29_8_8 vr30_8_8 vr31_8_8 ]; +attach variables vrD_8_9 [vr0_8_9 vr1_8_9 vr2_8_9 vr3_8_9 vr4_8_9 vr5_8_9 vr6_8_9 vr7_8_9 vr8_8_9 vr9_8_9 vr10_8_9 vr11_8_9 vr12_8_9 vr13_8_9 vr14_8_9 vr15_8_9 vr16_8_9 vr17_8_9 vr18_8_9 vr19_8_9 vr20_8_9 vr21_8_9 vr22_8_9 vr23_8_9 vr24_8_9 vr25_8_9 vr26_8_9 vr27_8_9 vr28_8_9 vr29_8_9 vr30_8_9 vr31_8_9 ]; +attach variables vrD_8_10 [vr0_8_10 vr1_8_10 vr2_8_10 vr3_8_10 vr4_8_10 vr5_8_10 vr6_8_10 vr7_8_10 vr8_8_10 vr9_8_10 vr10_8_10 vr11_8_10 vr12_8_10 vr13_8_10 vr14_8_10 vr15_8_10 vr16_8_10 vr17_8_10 vr18_8_10 vr19_8_10 vr20_8_10 vr21_8_10 vr22_8_10 vr23_8_10 vr24_8_10 vr25_8_10 vr26_8_10 vr27_8_10 vr28_8_10 vr29_8_10 vr30_8_10 vr31_8_10 ]; +attach variables vrD_8_11 [vr0_8_11 vr1_8_11 vr2_8_11 vr3_8_11 vr4_8_11 vr5_8_11 vr6_8_11 vr7_8_11 vr8_8_11 vr9_8_11 vr10_8_11 vr11_8_11 vr12_8_11 vr13_8_11 vr14_8_11 vr15_8_11 vr16_8_11 vr17_8_11 vr18_8_11 vr19_8_11 vr20_8_11 vr21_8_11 vr22_8_11 vr23_8_11 vr24_8_11 vr25_8_11 vr26_8_11 vr27_8_11 vr28_8_11 vr29_8_11 vr30_8_11 vr31_8_11 ]; +attach variables vrD_8_12 [vr0_8_12 vr1_8_12 vr2_8_12 vr3_8_12 vr4_8_12 vr5_8_12 vr6_8_12 vr7_8_12 vr8_8_12 vr9_8_12 vr10_8_12 vr11_8_12 vr12_8_12 vr13_8_12 vr14_8_12 vr15_8_12 vr16_8_12 vr17_8_12 vr18_8_12 vr19_8_12 vr20_8_12 vr21_8_12 vr22_8_12 vr23_8_12 vr24_8_12 vr25_8_12 vr26_8_12 vr27_8_12 vr28_8_12 vr29_8_12 vr30_8_12 vr31_8_12 ]; +attach variables vrD_8_13 [vr0_8_13 vr1_8_13 vr2_8_13 vr3_8_13 vr4_8_13 vr5_8_13 vr6_8_13 vr7_8_13 vr8_8_13 vr9_8_13 vr10_8_13 vr11_8_13 vr12_8_13 vr13_8_13 vr14_8_13 vr15_8_13 vr16_8_13 vr17_8_13 vr18_8_13 vr19_8_13 vr20_8_13 vr21_8_13 vr22_8_13 vr23_8_13 vr24_8_13 vr25_8_13 vr26_8_13 vr27_8_13 vr28_8_13 vr29_8_13 vr30_8_13 vr31_8_13 ]; +attach variables vrD_8_14 [vr0_8_14 vr1_8_14 vr2_8_14 vr3_8_14 vr4_8_14 vr5_8_14 vr6_8_14 vr7_8_14 vr8_8_14 vr9_8_14 vr10_8_14 vr11_8_14 vr12_8_14 vr13_8_14 vr14_8_14 vr15_8_14 vr16_8_14 vr17_8_14 vr18_8_14 vr19_8_14 vr20_8_14 vr21_8_14 vr22_8_14 vr23_8_14 vr24_8_14 vr25_8_14 vr26_8_14 vr27_8_14 vr28_8_14 vr29_8_14 vr30_8_14 vr31_8_14 ]; +attach variables vrD_8_15 [vr0_8_15 vr1_8_15 vr2_8_15 vr3_8_15 vr4_8_15 vr5_8_15 vr6_8_15 vr7_8_15 vr8_8_15 vr9_8_15 vr10_8_15 vr11_8_15 vr12_8_15 vr13_8_15 vr14_8_15 vr15_8_15 vr16_8_15 vr17_8_15 vr18_8_15 vr19_8_15 vr20_8_15 vr21_8_15 vr22_8_15 vr23_8_15 vr24_8_15 vr25_8_15 vr26_8_15 vr27_8_15 vr28_8_15 vr29_8_15 vr30_8_15 vr31_8_15 ]; + + +# AltVect Vector vrA sub-piece selectors + +# AltVect Vector vrA sub-piece selectors for size 64 +attach variables vrA_64_0 [vr0_64_0 vr1_64_0 vr2_64_0 vr3_64_0 vr4_64_0 vr5_64_0 vr6_64_0 vr7_64_0 vr8_64_0 vr9_64_0 vr10_64_0 vr11_64_0 vr12_64_0 vr13_64_0 vr14_64_0 vr15_64_0 vr16_64_0 vr17_64_0 vr18_64_0 vr19_64_0 vr20_64_0 vr21_64_0 vr22_64_0 vr23_64_0 vr24_64_0 vr25_64_0 vr26_64_0 vr27_64_0 vr28_64_0 vr29_64_0 vr30_64_0 vr31_64_0 ]; +attach variables vrA_64_1 [vr0_64_1 vr1_64_1 vr2_64_1 vr3_64_1 vr4_64_1 vr5_64_1 vr6_64_1 vr7_64_1 vr8_64_1 vr9_64_1 vr10_64_1 vr11_64_1 vr12_64_1 vr13_64_1 vr14_64_1 vr15_64_1 vr16_64_1 vr17_64_1 vr18_64_1 vr19_64_1 vr20_64_1 vr21_64_1 vr22_64_1 vr23_64_1 vr24_64_1 vr25_64_1 vr26_64_1 vr27_64_1 vr28_64_1 vr29_64_1 vr30_64_1 vr31_64_1 ]; + +# AltVect Vector vrA sub-piece selectors for size 32 +attach variables vrA_32_0 [vr0_32_0 vr1_32_0 vr2_32_0 vr3_32_0 vr4_32_0 vr5_32_0 vr6_32_0 vr7_32_0 vr8_32_0 vr9_32_0 vr10_32_0 vr11_32_0 vr12_32_0 vr13_32_0 vr14_32_0 vr15_32_0 vr16_32_0 vr17_32_0 vr18_32_0 vr19_32_0 vr20_32_0 vr21_32_0 vr22_32_0 vr23_32_0 vr24_32_0 vr25_32_0 vr26_32_0 vr27_32_0 vr28_32_0 vr29_32_0 vr30_32_0 vr31_32_0 ]; +attach variables vrA_32_1 [vr0_32_1 vr1_32_1 vr2_32_1 vr3_32_1 vr4_32_1 vr5_32_1 vr6_32_1 vr7_32_1 vr8_32_1 vr9_32_1 vr10_32_1 vr11_32_1 vr12_32_1 vr13_32_1 vr14_32_1 vr15_32_1 vr16_32_1 vr17_32_1 vr18_32_1 vr19_32_1 vr20_32_1 vr21_32_1 vr22_32_1 vr23_32_1 vr24_32_1 vr25_32_1 vr26_32_1 vr27_32_1 vr28_32_1 vr29_32_1 vr30_32_1 vr31_32_1 ]; +attach variables vrA_32_2 [vr0_32_2 vr1_32_2 vr2_32_2 vr3_32_2 vr4_32_2 vr5_32_2 vr6_32_2 vr7_32_2 vr8_32_2 vr9_32_2 vr10_32_2 vr11_32_2 vr12_32_2 vr13_32_2 vr14_32_2 vr15_32_2 vr16_32_2 vr17_32_2 vr18_32_2 vr19_32_2 vr20_32_2 vr21_32_2 vr22_32_2 vr23_32_2 vr24_32_2 vr25_32_2 vr26_32_2 vr27_32_2 vr28_32_2 vr29_32_2 vr30_32_2 vr31_32_2 ]; +attach variables vrA_32_3 [vr0_32_3 vr1_32_3 vr2_32_3 vr3_32_3 vr4_32_3 vr5_32_3 vr6_32_3 vr7_32_3 vr8_32_3 vr9_32_3 vr10_32_3 vr11_32_3 vr12_32_3 vr13_32_3 vr14_32_3 vr15_32_3 vr16_32_3 vr17_32_3 vr18_32_3 vr19_32_3 vr20_32_3 vr21_32_3 vr22_32_3 vr23_32_3 vr24_32_3 vr25_32_3 vr26_32_3 vr27_32_3 vr28_32_3 vr29_32_3 vr30_32_3 vr31_32_3 ]; + +# AltVect Vector vrA sub-piece selectors for size 16 +attach variables vrA_16_0 [vr0_16_0 vr1_16_0 vr2_16_0 vr3_16_0 vr4_16_0 vr5_16_0 vr6_16_0 vr7_16_0 vr8_16_0 vr9_16_0 vr10_16_0 vr11_16_0 vr12_16_0 vr13_16_0 vr14_16_0 vr15_16_0 vr16_16_0 vr17_16_0 vr18_16_0 vr19_16_0 vr20_16_0 vr21_16_0 vr22_16_0 vr23_16_0 vr24_16_0 vr25_16_0 vr26_16_0 vr27_16_0 vr28_16_0 vr29_16_0 vr30_16_0 vr31_16_0 ]; +attach variables vrA_16_1 [vr0_16_1 vr1_16_1 vr2_16_1 vr3_16_1 vr4_16_1 vr5_16_1 vr6_16_1 vr7_16_1 vr8_16_1 vr9_16_1 vr10_16_1 vr11_16_1 vr12_16_1 vr13_16_1 vr14_16_1 vr15_16_1 vr16_16_1 vr17_16_1 vr18_16_1 vr19_16_1 vr20_16_1 vr21_16_1 vr22_16_1 vr23_16_1 vr24_16_1 vr25_16_1 vr26_16_1 vr27_16_1 vr28_16_1 vr29_16_1 vr30_16_1 vr31_16_1 ]; +attach variables vrA_16_2 [vr0_16_2 vr1_16_2 vr2_16_2 vr3_16_2 vr4_16_2 vr5_16_2 vr6_16_2 vr7_16_2 vr8_16_2 vr9_16_2 vr10_16_2 vr11_16_2 vr12_16_2 vr13_16_2 vr14_16_2 vr15_16_2 vr16_16_2 vr17_16_2 vr18_16_2 vr19_16_2 vr20_16_2 vr21_16_2 vr22_16_2 vr23_16_2 vr24_16_2 vr25_16_2 vr26_16_2 vr27_16_2 vr28_16_2 vr29_16_2 vr30_16_2 vr31_16_2 ]; +attach variables vrA_16_3 [vr0_16_3 vr1_16_3 vr2_16_3 vr3_16_3 vr4_16_3 vr5_16_3 vr6_16_3 vr7_16_3 vr8_16_3 vr9_16_3 vr10_16_3 vr11_16_3 vr12_16_3 vr13_16_3 vr14_16_3 vr15_16_3 vr16_16_3 vr17_16_3 vr18_16_3 vr19_16_3 vr20_16_3 vr21_16_3 vr22_16_3 vr23_16_3 vr24_16_3 vr25_16_3 vr26_16_3 vr27_16_3 vr28_16_3 vr29_16_3 vr30_16_3 vr31_16_3 ]; +attach variables vrA_16_4 [vr0_16_4 vr1_16_4 vr2_16_4 vr3_16_4 vr4_16_4 vr5_16_4 vr6_16_4 vr7_16_4 vr8_16_4 vr9_16_4 vr10_16_4 vr11_16_4 vr12_16_4 vr13_16_4 vr14_16_4 vr15_16_4 vr16_16_4 vr17_16_4 vr18_16_4 vr19_16_4 vr20_16_4 vr21_16_4 vr22_16_4 vr23_16_4 vr24_16_4 vr25_16_4 vr26_16_4 vr27_16_4 vr28_16_4 vr29_16_4 vr30_16_4 vr31_16_4 ]; +attach variables vrA_16_5 [vr0_16_5 vr1_16_5 vr2_16_5 vr3_16_5 vr4_16_5 vr5_16_5 vr6_16_5 vr7_16_5 vr8_16_5 vr9_16_5 vr10_16_5 vr11_16_5 vr12_16_5 vr13_16_5 vr14_16_5 vr15_16_5 vr16_16_5 vr17_16_5 vr18_16_5 vr19_16_5 vr20_16_5 vr21_16_5 vr22_16_5 vr23_16_5 vr24_16_5 vr25_16_5 vr26_16_5 vr27_16_5 vr28_16_5 vr29_16_5 vr30_16_5 vr31_16_5 ]; +attach variables vrA_16_6 [vr0_16_6 vr1_16_6 vr2_16_6 vr3_16_6 vr4_16_6 vr5_16_6 vr6_16_6 vr7_16_6 vr8_16_6 vr9_16_6 vr10_16_6 vr11_16_6 vr12_16_6 vr13_16_6 vr14_16_6 vr15_16_6 vr16_16_6 vr17_16_6 vr18_16_6 vr19_16_6 vr20_16_6 vr21_16_6 vr22_16_6 vr23_16_6 vr24_16_6 vr25_16_6 vr26_16_6 vr27_16_6 vr28_16_6 vr29_16_6 vr30_16_6 vr31_16_6 ]; +attach variables vrA_16_7 [vr0_16_7 vr1_16_7 vr2_16_7 vr3_16_7 vr4_16_7 vr5_16_7 vr6_16_7 vr7_16_7 vr8_16_7 vr9_16_7 vr10_16_7 vr11_16_7 vr12_16_7 vr13_16_7 vr14_16_7 vr15_16_7 vr16_16_7 vr17_16_7 vr18_16_7 vr19_16_7 vr20_16_7 vr21_16_7 vr22_16_7 vr23_16_7 vr24_16_7 vr25_16_7 vr26_16_7 vr27_16_7 vr28_16_7 vr29_16_7 vr30_16_7 vr31_16_7 ]; + +# AltVect Vector vrA sub-piece selectors for size 8 +attach variables vrA_8_0 [vr0_8_0 vr1_8_0 vr2_8_0 vr3_8_0 vr4_8_0 vr5_8_0 vr6_8_0 vr7_8_0 vr8_8_0 vr9_8_0 vr10_8_0 vr11_8_0 vr12_8_0 vr13_8_0 vr14_8_0 vr15_8_0 vr16_8_0 vr17_8_0 vr18_8_0 vr19_8_0 vr20_8_0 vr21_8_0 vr22_8_0 vr23_8_0 vr24_8_0 vr25_8_0 vr26_8_0 vr27_8_0 vr28_8_0 vr29_8_0 vr30_8_0 vr31_8_0 ]; +attach variables vrA_8_1 [vr0_8_1 vr1_8_1 vr2_8_1 vr3_8_1 vr4_8_1 vr5_8_1 vr6_8_1 vr7_8_1 vr8_8_1 vr9_8_1 vr10_8_1 vr11_8_1 vr12_8_1 vr13_8_1 vr14_8_1 vr15_8_1 vr16_8_1 vr17_8_1 vr18_8_1 vr19_8_1 vr20_8_1 vr21_8_1 vr22_8_1 vr23_8_1 vr24_8_1 vr25_8_1 vr26_8_1 vr27_8_1 vr28_8_1 vr29_8_1 vr30_8_1 vr31_8_1 ]; +attach variables vrA_8_2 [vr0_8_2 vr1_8_2 vr2_8_2 vr3_8_2 vr4_8_2 vr5_8_2 vr6_8_2 vr7_8_2 vr8_8_2 vr9_8_2 vr10_8_2 vr11_8_2 vr12_8_2 vr13_8_2 vr14_8_2 vr15_8_2 vr16_8_2 vr17_8_2 vr18_8_2 vr19_8_2 vr20_8_2 vr21_8_2 vr22_8_2 vr23_8_2 vr24_8_2 vr25_8_2 vr26_8_2 vr27_8_2 vr28_8_2 vr29_8_2 vr30_8_2 vr31_8_2 ]; +attach variables vrA_8_3 [vr0_8_3 vr1_8_3 vr2_8_3 vr3_8_3 vr4_8_3 vr5_8_3 vr6_8_3 vr7_8_3 vr8_8_3 vr9_8_3 vr10_8_3 vr11_8_3 vr12_8_3 vr13_8_3 vr14_8_3 vr15_8_3 vr16_8_3 vr17_8_3 vr18_8_3 vr19_8_3 vr20_8_3 vr21_8_3 vr22_8_3 vr23_8_3 vr24_8_3 vr25_8_3 vr26_8_3 vr27_8_3 vr28_8_3 vr29_8_3 vr30_8_3 vr31_8_3 ]; +attach variables vrA_8_4 [vr0_8_4 vr1_8_4 vr2_8_4 vr3_8_4 vr4_8_4 vr5_8_4 vr6_8_4 vr7_8_4 vr8_8_4 vr9_8_4 vr10_8_4 vr11_8_4 vr12_8_4 vr13_8_4 vr14_8_4 vr15_8_4 vr16_8_4 vr17_8_4 vr18_8_4 vr19_8_4 vr20_8_4 vr21_8_4 vr22_8_4 vr23_8_4 vr24_8_4 vr25_8_4 vr26_8_4 vr27_8_4 vr28_8_4 vr29_8_4 vr30_8_4 vr31_8_4 ]; +attach variables vrA_8_5 [vr0_8_5 vr1_8_5 vr2_8_5 vr3_8_5 vr4_8_5 vr5_8_5 vr6_8_5 vr7_8_5 vr8_8_5 vr9_8_5 vr10_8_5 vr11_8_5 vr12_8_5 vr13_8_5 vr14_8_5 vr15_8_5 vr16_8_5 vr17_8_5 vr18_8_5 vr19_8_5 vr20_8_5 vr21_8_5 vr22_8_5 vr23_8_5 vr24_8_5 vr25_8_5 vr26_8_5 vr27_8_5 vr28_8_5 vr29_8_5 vr30_8_5 vr31_8_5 ]; +attach variables vrA_8_6 [vr0_8_6 vr1_8_6 vr2_8_6 vr3_8_6 vr4_8_6 vr5_8_6 vr6_8_6 vr7_8_6 vr8_8_6 vr9_8_6 vr10_8_6 vr11_8_6 vr12_8_6 vr13_8_6 vr14_8_6 vr15_8_6 vr16_8_6 vr17_8_6 vr18_8_6 vr19_8_6 vr20_8_6 vr21_8_6 vr22_8_6 vr23_8_6 vr24_8_6 vr25_8_6 vr26_8_6 vr27_8_6 vr28_8_6 vr29_8_6 vr30_8_6 vr31_8_6 ]; +attach variables vrA_8_7 [vr0_8_7 vr1_8_7 vr2_8_7 vr3_8_7 vr4_8_7 vr5_8_7 vr6_8_7 vr7_8_7 vr8_8_7 vr9_8_7 vr10_8_7 vr11_8_7 vr12_8_7 vr13_8_7 vr14_8_7 vr15_8_7 vr16_8_7 vr17_8_7 vr18_8_7 vr19_8_7 vr20_8_7 vr21_8_7 vr22_8_7 vr23_8_7 vr24_8_7 vr25_8_7 vr26_8_7 vr27_8_7 vr28_8_7 vr29_8_7 vr30_8_7 vr31_8_7 ]; +attach variables vrA_8_8 [vr0_8_8 vr1_8_8 vr2_8_8 vr3_8_8 vr4_8_8 vr5_8_8 vr6_8_8 vr7_8_8 vr8_8_8 vr9_8_8 vr10_8_8 vr11_8_8 vr12_8_8 vr13_8_8 vr14_8_8 vr15_8_8 vr16_8_8 vr17_8_8 vr18_8_8 vr19_8_8 vr20_8_8 vr21_8_8 vr22_8_8 vr23_8_8 vr24_8_8 vr25_8_8 vr26_8_8 vr27_8_8 vr28_8_8 vr29_8_8 vr30_8_8 vr31_8_8 ]; +attach variables vrA_8_9 [vr0_8_9 vr1_8_9 vr2_8_9 vr3_8_9 vr4_8_9 vr5_8_9 vr6_8_9 vr7_8_9 vr8_8_9 vr9_8_9 vr10_8_9 vr11_8_9 vr12_8_9 vr13_8_9 vr14_8_9 vr15_8_9 vr16_8_9 vr17_8_9 vr18_8_9 vr19_8_9 vr20_8_9 vr21_8_9 vr22_8_9 vr23_8_9 vr24_8_9 vr25_8_9 vr26_8_9 vr27_8_9 vr28_8_9 vr29_8_9 vr30_8_9 vr31_8_9 ]; +attach variables vrA_8_10 [vr0_8_10 vr1_8_10 vr2_8_10 vr3_8_10 vr4_8_10 vr5_8_10 vr6_8_10 vr7_8_10 vr8_8_10 vr9_8_10 vr10_8_10 vr11_8_10 vr12_8_10 vr13_8_10 vr14_8_10 vr15_8_10 vr16_8_10 vr17_8_10 vr18_8_10 vr19_8_10 vr20_8_10 vr21_8_10 vr22_8_10 vr23_8_10 vr24_8_10 vr25_8_10 vr26_8_10 vr27_8_10 vr28_8_10 vr29_8_10 vr30_8_10 vr31_8_10 ]; +attach variables vrA_8_11 [vr0_8_11 vr1_8_11 vr2_8_11 vr3_8_11 vr4_8_11 vr5_8_11 vr6_8_11 vr7_8_11 vr8_8_11 vr9_8_11 vr10_8_11 vr11_8_11 vr12_8_11 vr13_8_11 vr14_8_11 vr15_8_11 vr16_8_11 vr17_8_11 vr18_8_11 vr19_8_11 vr20_8_11 vr21_8_11 vr22_8_11 vr23_8_11 vr24_8_11 vr25_8_11 vr26_8_11 vr27_8_11 vr28_8_11 vr29_8_11 vr30_8_11 vr31_8_11 ]; +attach variables vrA_8_12 [vr0_8_12 vr1_8_12 vr2_8_12 vr3_8_12 vr4_8_12 vr5_8_12 vr6_8_12 vr7_8_12 vr8_8_12 vr9_8_12 vr10_8_12 vr11_8_12 vr12_8_12 vr13_8_12 vr14_8_12 vr15_8_12 vr16_8_12 vr17_8_12 vr18_8_12 vr19_8_12 vr20_8_12 vr21_8_12 vr22_8_12 vr23_8_12 vr24_8_12 vr25_8_12 vr26_8_12 vr27_8_12 vr28_8_12 vr29_8_12 vr30_8_12 vr31_8_12 ]; +attach variables vrA_8_13 [vr0_8_13 vr1_8_13 vr2_8_13 vr3_8_13 vr4_8_13 vr5_8_13 vr6_8_13 vr7_8_13 vr8_8_13 vr9_8_13 vr10_8_13 vr11_8_13 vr12_8_13 vr13_8_13 vr14_8_13 vr15_8_13 vr16_8_13 vr17_8_13 vr18_8_13 vr19_8_13 vr20_8_13 vr21_8_13 vr22_8_13 vr23_8_13 vr24_8_13 vr25_8_13 vr26_8_13 vr27_8_13 vr28_8_13 vr29_8_13 vr30_8_13 vr31_8_13 ]; +attach variables vrA_8_14 [vr0_8_14 vr1_8_14 vr2_8_14 vr3_8_14 vr4_8_14 vr5_8_14 vr6_8_14 vr7_8_14 vr8_8_14 vr9_8_14 vr10_8_14 vr11_8_14 vr12_8_14 vr13_8_14 vr14_8_14 vr15_8_14 vr16_8_14 vr17_8_14 vr18_8_14 vr19_8_14 vr20_8_14 vr21_8_14 vr22_8_14 vr23_8_14 vr24_8_14 vr25_8_14 vr26_8_14 vr27_8_14 vr28_8_14 vr29_8_14 vr30_8_14 vr31_8_14 ]; +attach variables vrA_8_15 [vr0_8_15 vr1_8_15 vr2_8_15 vr3_8_15 vr4_8_15 vr5_8_15 vr6_8_15 vr7_8_15 vr8_8_15 vr9_8_15 vr10_8_15 vr11_8_15 vr12_8_15 vr13_8_15 vr14_8_15 vr15_8_15 vr16_8_15 vr17_8_15 vr18_8_15 vr19_8_15 vr20_8_15 vr21_8_15 vr22_8_15 vr23_8_15 vr24_8_15 vr25_8_15 vr26_8_15 vr27_8_15 vr28_8_15 vr29_8_15 vr30_8_15 vr31_8_15 ]; + + +# AltVect Vector vrB sub-piece selectors + +# AltVect Vector vrB sub-piece selectors for size 64 +attach variables vrB_64_0 [vr0_64_0 vr1_64_0 vr2_64_0 vr3_64_0 vr4_64_0 vr5_64_0 vr6_64_0 vr7_64_0 vr8_64_0 vr9_64_0 vr10_64_0 vr11_64_0 vr12_64_0 vr13_64_0 vr14_64_0 vr15_64_0 vr16_64_0 vr17_64_0 vr18_64_0 vr19_64_0 vr20_64_0 vr21_64_0 vr22_64_0 vr23_64_0 vr24_64_0 vr25_64_0 vr26_64_0 vr27_64_0 vr28_64_0 vr29_64_0 vr30_64_0 vr31_64_0 ]; +attach variables vrB_64_1 [vr0_64_1 vr1_64_1 vr2_64_1 vr3_64_1 vr4_64_1 vr5_64_1 vr6_64_1 vr7_64_1 vr8_64_1 vr9_64_1 vr10_64_1 vr11_64_1 vr12_64_1 vr13_64_1 vr14_64_1 vr15_64_1 vr16_64_1 vr17_64_1 vr18_64_1 vr19_64_1 vr20_64_1 vr21_64_1 vr22_64_1 vr23_64_1 vr24_64_1 vr25_64_1 vr26_64_1 vr27_64_1 vr28_64_1 vr29_64_1 vr30_64_1 vr31_64_1 ]; + +# AltVect Vector vrB sub-piece selectors for size 32 +attach variables vrB_32_0 [vr0_32_0 vr1_32_0 vr2_32_0 vr3_32_0 vr4_32_0 vr5_32_0 vr6_32_0 vr7_32_0 vr8_32_0 vr9_32_0 vr10_32_0 vr11_32_0 vr12_32_0 vr13_32_0 vr14_32_0 vr15_32_0 vr16_32_0 vr17_32_0 vr18_32_0 vr19_32_0 vr20_32_0 vr21_32_0 vr22_32_0 vr23_32_0 vr24_32_0 vr25_32_0 vr26_32_0 vr27_32_0 vr28_32_0 vr29_32_0 vr30_32_0 vr31_32_0 ]; +attach variables vrB_32_1 [vr0_32_1 vr1_32_1 vr2_32_1 vr3_32_1 vr4_32_1 vr5_32_1 vr6_32_1 vr7_32_1 vr8_32_1 vr9_32_1 vr10_32_1 vr11_32_1 vr12_32_1 vr13_32_1 vr14_32_1 vr15_32_1 vr16_32_1 vr17_32_1 vr18_32_1 vr19_32_1 vr20_32_1 vr21_32_1 vr22_32_1 vr23_32_1 vr24_32_1 vr25_32_1 vr26_32_1 vr27_32_1 vr28_32_1 vr29_32_1 vr30_32_1 vr31_32_1 ]; +attach variables vrB_32_2 [vr0_32_2 vr1_32_2 vr2_32_2 vr3_32_2 vr4_32_2 vr5_32_2 vr6_32_2 vr7_32_2 vr8_32_2 vr9_32_2 vr10_32_2 vr11_32_2 vr12_32_2 vr13_32_2 vr14_32_2 vr15_32_2 vr16_32_2 vr17_32_2 vr18_32_2 vr19_32_2 vr20_32_2 vr21_32_2 vr22_32_2 vr23_32_2 vr24_32_2 vr25_32_2 vr26_32_2 vr27_32_2 vr28_32_2 vr29_32_2 vr30_32_2 vr31_32_2 ]; +attach variables vrB_32_3 [vr0_32_3 vr1_32_3 vr2_32_3 vr3_32_3 vr4_32_3 vr5_32_3 vr6_32_3 vr7_32_3 vr8_32_3 vr9_32_3 vr10_32_3 vr11_32_3 vr12_32_3 vr13_32_3 vr14_32_3 vr15_32_3 vr16_32_3 vr17_32_3 vr18_32_3 vr19_32_3 vr20_32_3 vr21_32_3 vr22_32_3 vr23_32_3 vr24_32_3 vr25_32_3 vr26_32_3 vr27_32_3 vr28_32_3 vr29_32_3 vr30_32_3 vr31_32_3 ]; + +# AltVect Vector vrB sub-piece selectors for size 16 +attach variables vrB_16_0 [vr0_16_0 vr1_16_0 vr2_16_0 vr3_16_0 vr4_16_0 vr5_16_0 vr6_16_0 vr7_16_0 vr8_16_0 vr9_16_0 vr10_16_0 vr11_16_0 vr12_16_0 vr13_16_0 vr14_16_0 vr15_16_0 vr16_16_0 vr17_16_0 vr18_16_0 vr19_16_0 vr20_16_0 vr21_16_0 vr22_16_0 vr23_16_0 vr24_16_0 vr25_16_0 vr26_16_0 vr27_16_0 vr28_16_0 vr29_16_0 vr30_16_0 vr31_16_0 ]; +attach variables vrB_16_1 [vr0_16_1 vr1_16_1 vr2_16_1 vr3_16_1 vr4_16_1 vr5_16_1 vr6_16_1 vr7_16_1 vr8_16_1 vr9_16_1 vr10_16_1 vr11_16_1 vr12_16_1 vr13_16_1 vr14_16_1 vr15_16_1 vr16_16_1 vr17_16_1 vr18_16_1 vr19_16_1 vr20_16_1 vr21_16_1 vr22_16_1 vr23_16_1 vr24_16_1 vr25_16_1 vr26_16_1 vr27_16_1 vr28_16_1 vr29_16_1 vr30_16_1 vr31_16_1 ]; +attach variables vrB_16_2 [vr0_16_2 vr1_16_2 vr2_16_2 vr3_16_2 vr4_16_2 vr5_16_2 vr6_16_2 vr7_16_2 vr8_16_2 vr9_16_2 vr10_16_2 vr11_16_2 vr12_16_2 vr13_16_2 vr14_16_2 vr15_16_2 vr16_16_2 vr17_16_2 vr18_16_2 vr19_16_2 vr20_16_2 vr21_16_2 vr22_16_2 vr23_16_2 vr24_16_2 vr25_16_2 vr26_16_2 vr27_16_2 vr28_16_2 vr29_16_2 vr30_16_2 vr31_16_2 ]; +attach variables vrB_16_3 [vr0_16_3 vr1_16_3 vr2_16_3 vr3_16_3 vr4_16_3 vr5_16_3 vr6_16_3 vr7_16_3 vr8_16_3 vr9_16_3 vr10_16_3 vr11_16_3 vr12_16_3 vr13_16_3 vr14_16_3 vr15_16_3 vr16_16_3 vr17_16_3 vr18_16_3 vr19_16_3 vr20_16_3 vr21_16_3 vr22_16_3 vr23_16_3 vr24_16_3 vr25_16_3 vr26_16_3 vr27_16_3 vr28_16_3 vr29_16_3 vr30_16_3 vr31_16_3 ]; +attach variables vrB_16_4 [vr0_16_4 vr1_16_4 vr2_16_4 vr3_16_4 vr4_16_4 vr5_16_4 vr6_16_4 vr7_16_4 vr8_16_4 vr9_16_4 vr10_16_4 vr11_16_4 vr12_16_4 vr13_16_4 vr14_16_4 vr15_16_4 vr16_16_4 vr17_16_4 vr18_16_4 vr19_16_4 vr20_16_4 vr21_16_4 vr22_16_4 vr23_16_4 vr24_16_4 vr25_16_4 vr26_16_4 vr27_16_4 vr28_16_4 vr29_16_4 vr30_16_4 vr31_16_4 ]; +attach variables vrB_16_5 [vr0_16_5 vr1_16_5 vr2_16_5 vr3_16_5 vr4_16_5 vr5_16_5 vr6_16_5 vr7_16_5 vr8_16_5 vr9_16_5 vr10_16_5 vr11_16_5 vr12_16_5 vr13_16_5 vr14_16_5 vr15_16_5 vr16_16_5 vr17_16_5 vr18_16_5 vr19_16_5 vr20_16_5 vr21_16_5 vr22_16_5 vr23_16_5 vr24_16_5 vr25_16_5 vr26_16_5 vr27_16_5 vr28_16_5 vr29_16_5 vr30_16_5 vr31_16_5 ]; +attach variables vrB_16_6 [vr0_16_6 vr1_16_6 vr2_16_6 vr3_16_6 vr4_16_6 vr5_16_6 vr6_16_6 vr7_16_6 vr8_16_6 vr9_16_6 vr10_16_6 vr11_16_6 vr12_16_6 vr13_16_6 vr14_16_6 vr15_16_6 vr16_16_6 vr17_16_6 vr18_16_6 vr19_16_6 vr20_16_6 vr21_16_6 vr22_16_6 vr23_16_6 vr24_16_6 vr25_16_6 vr26_16_6 vr27_16_6 vr28_16_6 vr29_16_6 vr30_16_6 vr31_16_6 ]; +attach variables vrB_16_7 [vr0_16_7 vr1_16_7 vr2_16_7 vr3_16_7 vr4_16_7 vr5_16_7 vr6_16_7 vr7_16_7 vr8_16_7 vr9_16_7 vr10_16_7 vr11_16_7 vr12_16_7 vr13_16_7 vr14_16_7 vr15_16_7 vr16_16_7 vr17_16_7 vr18_16_7 vr19_16_7 vr20_16_7 vr21_16_7 vr22_16_7 vr23_16_7 vr24_16_7 vr25_16_7 vr26_16_7 vr27_16_7 vr28_16_7 vr29_16_7 vr30_16_7 vr31_16_7 ]; + +# AltVect Vector vrB sub-piece selectors for size 8 +attach variables vrB_8_0 [vr0_8_0 vr1_8_0 vr2_8_0 vr3_8_0 vr4_8_0 vr5_8_0 vr6_8_0 vr7_8_0 vr8_8_0 vr9_8_0 vr10_8_0 vr11_8_0 vr12_8_0 vr13_8_0 vr14_8_0 vr15_8_0 vr16_8_0 vr17_8_0 vr18_8_0 vr19_8_0 vr20_8_0 vr21_8_0 vr22_8_0 vr23_8_0 vr24_8_0 vr25_8_0 vr26_8_0 vr27_8_0 vr28_8_0 vr29_8_0 vr30_8_0 vr31_8_0 ]; +attach variables vrB_8_1 [vr0_8_1 vr1_8_1 vr2_8_1 vr3_8_1 vr4_8_1 vr5_8_1 vr6_8_1 vr7_8_1 vr8_8_1 vr9_8_1 vr10_8_1 vr11_8_1 vr12_8_1 vr13_8_1 vr14_8_1 vr15_8_1 vr16_8_1 vr17_8_1 vr18_8_1 vr19_8_1 vr20_8_1 vr21_8_1 vr22_8_1 vr23_8_1 vr24_8_1 vr25_8_1 vr26_8_1 vr27_8_1 vr28_8_1 vr29_8_1 vr30_8_1 vr31_8_1 ]; +attach variables vrB_8_2 [vr0_8_2 vr1_8_2 vr2_8_2 vr3_8_2 vr4_8_2 vr5_8_2 vr6_8_2 vr7_8_2 vr8_8_2 vr9_8_2 vr10_8_2 vr11_8_2 vr12_8_2 vr13_8_2 vr14_8_2 vr15_8_2 vr16_8_2 vr17_8_2 vr18_8_2 vr19_8_2 vr20_8_2 vr21_8_2 vr22_8_2 vr23_8_2 vr24_8_2 vr25_8_2 vr26_8_2 vr27_8_2 vr28_8_2 vr29_8_2 vr30_8_2 vr31_8_2 ]; +attach variables vrB_8_3 [vr0_8_3 vr1_8_3 vr2_8_3 vr3_8_3 vr4_8_3 vr5_8_3 vr6_8_3 vr7_8_3 vr8_8_3 vr9_8_3 vr10_8_3 vr11_8_3 vr12_8_3 vr13_8_3 vr14_8_3 vr15_8_3 vr16_8_3 vr17_8_3 vr18_8_3 vr19_8_3 vr20_8_3 vr21_8_3 vr22_8_3 vr23_8_3 vr24_8_3 vr25_8_3 vr26_8_3 vr27_8_3 vr28_8_3 vr29_8_3 vr30_8_3 vr31_8_3 ]; +attach variables vrB_8_4 [vr0_8_4 vr1_8_4 vr2_8_4 vr3_8_4 vr4_8_4 vr5_8_4 vr6_8_4 vr7_8_4 vr8_8_4 vr9_8_4 vr10_8_4 vr11_8_4 vr12_8_4 vr13_8_4 vr14_8_4 vr15_8_4 vr16_8_4 vr17_8_4 vr18_8_4 vr19_8_4 vr20_8_4 vr21_8_4 vr22_8_4 vr23_8_4 vr24_8_4 vr25_8_4 vr26_8_4 vr27_8_4 vr28_8_4 vr29_8_4 vr30_8_4 vr31_8_4 ]; +attach variables vrB_8_5 [vr0_8_5 vr1_8_5 vr2_8_5 vr3_8_5 vr4_8_5 vr5_8_5 vr6_8_5 vr7_8_5 vr8_8_5 vr9_8_5 vr10_8_5 vr11_8_5 vr12_8_5 vr13_8_5 vr14_8_5 vr15_8_5 vr16_8_5 vr17_8_5 vr18_8_5 vr19_8_5 vr20_8_5 vr21_8_5 vr22_8_5 vr23_8_5 vr24_8_5 vr25_8_5 vr26_8_5 vr27_8_5 vr28_8_5 vr29_8_5 vr30_8_5 vr31_8_5 ]; +attach variables vrB_8_6 [vr0_8_6 vr1_8_6 vr2_8_6 vr3_8_6 vr4_8_6 vr5_8_6 vr6_8_6 vr7_8_6 vr8_8_6 vr9_8_6 vr10_8_6 vr11_8_6 vr12_8_6 vr13_8_6 vr14_8_6 vr15_8_6 vr16_8_6 vr17_8_6 vr18_8_6 vr19_8_6 vr20_8_6 vr21_8_6 vr22_8_6 vr23_8_6 vr24_8_6 vr25_8_6 vr26_8_6 vr27_8_6 vr28_8_6 vr29_8_6 vr30_8_6 vr31_8_6 ]; +attach variables vrB_8_7 [vr0_8_7 vr1_8_7 vr2_8_7 vr3_8_7 vr4_8_7 vr5_8_7 vr6_8_7 vr7_8_7 vr8_8_7 vr9_8_7 vr10_8_7 vr11_8_7 vr12_8_7 vr13_8_7 vr14_8_7 vr15_8_7 vr16_8_7 vr17_8_7 vr18_8_7 vr19_8_7 vr20_8_7 vr21_8_7 vr22_8_7 vr23_8_7 vr24_8_7 vr25_8_7 vr26_8_7 vr27_8_7 vr28_8_7 vr29_8_7 vr30_8_7 vr31_8_7 ]; +attach variables vrB_8_8 [vr0_8_8 vr1_8_8 vr2_8_8 vr3_8_8 vr4_8_8 vr5_8_8 vr6_8_8 vr7_8_8 vr8_8_8 vr9_8_8 vr10_8_8 vr11_8_8 vr12_8_8 vr13_8_8 vr14_8_8 vr15_8_8 vr16_8_8 vr17_8_8 vr18_8_8 vr19_8_8 vr20_8_8 vr21_8_8 vr22_8_8 vr23_8_8 vr24_8_8 vr25_8_8 vr26_8_8 vr27_8_8 vr28_8_8 vr29_8_8 vr30_8_8 vr31_8_8 ]; +attach variables vrB_8_9 [vr0_8_9 vr1_8_9 vr2_8_9 vr3_8_9 vr4_8_9 vr5_8_9 vr6_8_9 vr7_8_9 vr8_8_9 vr9_8_9 vr10_8_9 vr11_8_9 vr12_8_9 vr13_8_9 vr14_8_9 vr15_8_9 vr16_8_9 vr17_8_9 vr18_8_9 vr19_8_9 vr20_8_9 vr21_8_9 vr22_8_9 vr23_8_9 vr24_8_9 vr25_8_9 vr26_8_9 vr27_8_9 vr28_8_9 vr29_8_9 vr30_8_9 vr31_8_9 ]; +attach variables vrB_8_10 [vr0_8_10 vr1_8_10 vr2_8_10 vr3_8_10 vr4_8_10 vr5_8_10 vr6_8_10 vr7_8_10 vr8_8_10 vr9_8_10 vr10_8_10 vr11_8_10 vr12_8_10 vr13_8_10 vr14_8_10 vr15_8_10 vr16_8_10 vr17_8_10 vr18_8_10 vr19_8_10 vr20_8_10 vr21_8_10 vr22_8_10 vr23_8_10 vr24_8_10 vr25_8_10 vr26_8_10 vr27_8_10 vr28_8_10 vr29_8_10 vr30_8_10 vr31_8_10 ]; +attach variables vrB_8_11 [vr0_8_11 vr1_8_11 vr2_8_11 vr3_8_11 vr4_8_11 vr5_8_11 vr6_8_11 vr7_8_11 vr8_8_11 vr9_8_11 vr10_8_11 vr11_8_11 vr12_8_11 vr13_8_11 vr14_8_11 vr15_8_11 vr16_8_11 vr17_8_11 vr18_8_11 vr19_8_11 vr20_8_11 vr21_8_11 vr22_8_11 vr23_8_11 vr24_8_11 vr25_8_11 vr26_8_11 vr27_8_11 vr28_8_11 vr29_8_11 vr30_8_11 vr31_8_11 ]; +attach variables vrB_8_12 [vr0_8_12 vr1_8_12 vr2_8_12 vr3_8_12 vr4_8_12 vr5_8_12 vr6_8_12 vr7_8_12 vr8_8_12 vr9_8_12 vr10_8_12 vr11_8_12 vr12_8_12 vr13_8_12 vr14_8_12 vr15_8_12 vr16_8_12 vr17_8_12 vr18_8_12 vr19_8_12 vr20_8_12 vr21_8_12 vr22_8_12 vr23_8_12 vr24_8_12 vr25_8_12 vr26_8_12 vr27_8_12 vr28_8_12 vr29_8_12 vr30_8_12 vr31_8_12 ]; +attach variables vrB_8_13 [vr0_8_13 vr1_8_13 vr2_8_13 vr3_8_13 vr4_8_13 vr5_8_13 vr6_8_13 vr7_8_13 vr8_8_13 vr9_8_13 vr10_8_13 vr11_8_13 vr12_8_13 vr13_8_13 vr14_8_13 vr15_8_13 vr16_8_13 vr17_8_13 vr18_8_13 vr19_8_13 vr20_8_13 vr21_8_13 vr22_8_13 vr23_8_13 vr24_8_13 vr25_8_13 vr26_8_13 vr27_8_13 vr28_8_13 vr29_8_13 vr30_8_13 vr31_8_13 ]; +attach variables vrB_8_14 [vr0_8_14 vr1_8_14 vr2_8_14 vr3_8_14 vr4_8_14 vr5_8_14 vr6_8_14 vr7_8_14 vr8_8_14 vr9_8_14 vr10_8_14 vr11_8_14 vr12_8_14 vr13_8_14 vr14_8_14 vr15_8_14 vr16_8_14 vr17_8_14 vr18_8_14 vr19_8_14 vr20_8_14 vr21_8_14 vr22_8_14 vr23_8_14 vr24_8_14 vr25_8_14 vr26_8_14 vr27_8_14 vr28_8_14 vr29_8_14 vr30_8_14 vr31_8_14 ]; +attach variables vrB_8_15 [vr0_8_15 vr1_8_15 vr2_8_15 vr3_8_15 vr4_8_15 vr5_8_15 vr6_8_15 vr7_8_15 vr8_8_15 vr9_8_15 vr10_8_15 vr11_8_15 vr12_8_15 vr13_8_15 vr14_8_15 vr15_8_15 vr16_8_15 vr17_8_15 vr18_8_15 vr19_8_15 vr20_8_15 vr21_8_15 vr22_8_15 vr23_8_15 vr24_8_15 vr25_8_15 vr26_8_15 vr27_8_15 vr28_8_15 vr29_8_15 vr30_8_15 vr31_8_15 ]; + + +# AltVect Vector vrS sub-piece selectors + +# AltVect Vector vrS sub-piece selectors for size 64 +attach variables vrS_64_0 [vr0_64_0 vr1_64_0 vr2_64_0 vr3_64_0 vr4_64_0 vr5_64_0 vr6_64_0 vr7_64_0 vr8_64_0 vr9_64_0 vr10_64_0 vr11_64_0 vr12_64_0 vr13_64_0 vr14_64_0 vr15_64_0 vr16_64_0 vr17_64_0 vr18_64_0 vr19_64_0 vr20_64_0 vr21_64_0 vr22_64_0 vr23_64_0 vr24_64_0 vr25_64_0 vr26_64_0 vr27_64_0 vr28_64_0 vr29_64_0 vr30_64_0 vr31_64_0 ]; +attach variables vrS_64_1 [vr0_64_1 vr1_64_1 vr2_64_1 vr3_64_1 vr4_64_1 vr5_64_1 vr6_64_1 vr7_64_1 vr8_64_1 vr9_64_1 vr10_64_1 vr11_64_1 vr12_64_1 vr13_64_1 vr14_64_1 vr15_64_1 vr16_64_1 vr17_64_1 vr18_64_1 vr19_64_1 vr20_64_1 vr21_64_1 vr22_64_1 vr23_64_1 vr24_64_1 vr25_64_1 vr26_64_1 vr27_64_1 vr28_64_1 vr29_64_1 vr30_64_1 vr31_64_1 ]; + +# AltVect Vector vrS sub-piece selectors for size 32 +attach variables vrS_32_0 [vr0_32_0 vr1_32_0 vr2_32_0 vr3_32_0 vr4_32_0 vr5_32_0 vr6_32_0 vr7_32_0 vr8_32_0 vr9_32_0 vr10_32_0 vr11_32_0 vr12_32_0 vr13_32_0 vr14_32_0 vr15_32_0 vr16_32_0 vr17_32_0 vr18_32_0 vr19_32_0 vr20_32_0 vr21_32_0 vr22_32_0 vr23_32_0 vr24_32_0 vr25_32_0 vr26_32_0 vr27_32_0 vr28_32_0 vr29_32_0 vr30_32_0 vr31_32_0 ]; +attach variables vrS_32_1 [vr0_32_1 vr1_32_1 vr2_32_1 vr3_32_1 vr4_32_1 vr5_32_1 vr6_32_1 vr7_32_1 vr8_32_1 vr9_32_1 vr10_32_1 vr11_32_1 vr12_32_1 vr13_32_1 vr14_32_1 vr15_32_1 vr16_32_1 vr17_32_1 vr18_32_1 vr19_32_1 vr20_32_1 vr21_32_1 vr22_32_1 vr23_32_1 vr24_32_1 vr25_32_1 vr26_32_1 vr27_32_1 vr28_32_1 vr29_32_1 vr30_32_1 vr31_32_1 ]; +attach variables vrS_32_2 [vr0_32_2 vr1_32_2 vr2_32_2 vr3_32_2 vr4_32_2 vr5_32_2 vr6_32_2 vr7_32_2 vr8_32_2 vr9_32_2 vr10_32_2 vr11_32_2 vr12_32_2 vr13_32_2 vr14_32_2 vr15_32_2 vr16_32_2 vr17_32_2 vr18_32_2 vr19_32_2 vr20_32_2 vr21_32_2 vr22_32_2 vr23_32_2 vr24_32_2 vr25_32_2 vr26_32_2 vr27_32_2 vr28_32_2 vr29_32_2 vr30_32_2 vr31_32_2 ]; +attach variables vrS_32_3 [vr0_32_3 vr1_32_3 vr2_32_3 vr3_32_3 vr4_32_3 vr5_32_3 vr6_32_3 vr7_32_3 vr8_32_3 vr9_32_3 vr10_32_3 vr11_32_3 vr12_32_3 vr13_32_3 vr14_32_3 vr15_32_3 vr16_32_3 vr17_32_3 vr18_32_3 vr19_32_3 vr20_32_3 vr21_32_3 vr22_32_3 vr23_32_3 vr24_32_3 vr25_32_3 vr26_32_3 vr27_32_3 vr28_32_3 vr29_32_3 vr30_32_3 vr31_32_3 ]; + +# AltVect Vector vrS sub-piece selectors for size 16 +attach variables vrS_16_0 [vr0_16_0 vr1_16_0 vr2_16_0 vr3_16_0 vr4_16_0 vr5_16_0 vr6_16_0 vr7_16_0 vr8_16_0 vr9_16_0 vr10_16_0 vr11_16_0 vr12_16_0 vr13_16_0 vr14_16_0 vr15_16_0 vr16_16_0 vr17_16_0 vr18_16_0 vr19_16_0 vr20_16_0 vr21_16_0 vr22_16_0 vr23_16_0 vr24_16_0 vr25_16_0 vr26_16_0 vr27_16_0 vr28_16_0 vr29_16_0 vr30_16_0 vr31_16_0 ]; +attach variables vrS_16_1 [vr0_16_1 vr1_16_1 vr2_16_1 vr3_16_1 vr4_16_1 vr5_16_1 vr6_16_1 vr7_16_1 vr8_16_1 vr9_16_1 vr10_16_1 vr11_16_1 vr12_16_1 vr13_16_1 vr14_16_1 vr15_16_1 vr16_16_1 vr17_16_1 vr18_16_1 vr19_16_1 vr20_16_1 vr21_16_1 vr22_16_1 vr23_16_1 vr24_16_1 vr25_16_1 vr26_16_1 vr27_16_1 vr28_16_1 vr29_16_1 vr30_16_1 vr31_16_1 ]; +attach variables vrS_16_2 [vr0_16_2 vr1_16_2 vr2_16_2 vr3_16_2 vr4_16_2 vr5_16_2 vr6_16_2 vr7_16_2 vr8_16_2 vr9_16_2 vr10_16_2 vr11_16_2 vr12_16_2 vr13_16_2 vr14_16_2 vr15_16_2 vr16_16_2 vr17_16_2 vr18_16_2 vr19_16_2 vr20_16_2 vr21_16_2 vr22_16_2 vr23_16_2 vr24_16_2 vr25_16_2 vr26_16_2 vr27_16_2 vr28_16_2 vr29_16_2 vr30_16_2 vr31_16_2 ]; +attach variables vrS_16_3 [vr0_16_3 vr1_16_3 vr2_16_3 vr3_16_3 vr4_16_3 vr5_16_3 vr6_16_3 vr7_16_3 vr8_16_3 vr9_16_3 vr10_16_3 vr11_16_3 vr12_16_3 vr13_16_3 vr14_16_3 vr15_16_3 vr16_16_3 vr17_16_3 vr18_16_3 vr19_16_3 vr20_16_3 vr21_16_3 vr22_16_3 vr23_16_3 vr24_16_3 vr25_16_3 vr26_16_3 vr27_16_3 vr28_16_3 vr29_16_3 vr30_16_3 vr31_16_3 ]; +attach variables vrS_16_4 [vr0_16_4 vr1_16_4 vr2_16_4 vr3_16_4 vr4_16_4 vr5_16_4 vr6_16_4 vr7_16_4 vr8_16_4 vr9_16_4 vr10_16_4 vr11_16_4 vr12_16_4 vr13_16_4 vr14_16_4 vr15_16_4 vr16_16_4 vr17_16_4 vr18_16_4 vr19_16_4 vr20_16_4 vr21_16_4 vr22_16_4 vr23_16_4 vr24_16_4 vr25_16_4 vr26_16_4 vr27_16_4 vr28_16_4 vr29_16_4 vr30_16_4 vr31_16_4 ]; +attach variables vrS_16_5 [vr0_16_5 vr1_16_5 vr2_16_5 vr3_16_5 vr4_16_5 vr5_16_5 vr6_16_5 vr7_16_5 vr8_16_5 vr9_16_5 vr10_16_5 vr11_16_5 vr12_16_5 vr13_16_5 vr14_16_5 vr15_16_5 vr16_16_5 vr17_16_5 vr18_16_5 vr19_16_5 vr20_16_5 vr21_16_5 vr22_16_5 vr23_16_5 vr24_16_5 vr25_16_5 vr26_16_5 vr27_16_5 vr28_16_5 vr29_16_5 vr30_16_5 vr31_16_5 ]; +attach variables vrS_16_6 [vr0_16_6 vr1_16_6 vr2_16_6 vr3_16_6 vr4_16_6 vr5_16_6 vr6_16_6 vr7_16_6 vr8_16_6 vr9_16_6 vr10_16_6 vr11_16_6 vr12_16_6 vr13_16_6 vr14_16_6 vr15_16_6 vr16_16_6 vr17_16_6 vr18_16_6 vr19_16_6 vr20_16_6 vr21_16_6 vr22_16_6 vr23_16_6 vr24_16_6 vr25_16_6 vr26_16_6 vr27_16_6 vr28_16_6 vr29_16_6 vr30_16_6 vr31_16_6 ]; +attach variables vrS_16_7 [vr0_16_7 vr1_16_7 vr2_16_7 vr3_16_7 vr4_16_7 vr5_16_7 vr6_16_7 vr7_16_7 vr8_16_7 vr9_16_7 vr10_16_7 vr11_16_7 vr12_16_7 vr13_16_7 vr14_16_7 vr15_16_7 vr16_16_7 vr17_16_7 vr18_16_7 vr19_16_7 vr20_16_7 vr21_16_7 vr22_16_7 vr23_16_7 vr24_16_7 vr25_16_7 vr26_16_7 vr27_16_7 vr28_16_7 vr29_16_7 vr30_16_7 vr31_16_7 ]; + +# AltVect Vector vrS sub-piece selectors for size 8 +attach variables vrS_8_0 [vr0_8_0 vr1_8_0 vr2_8_0 vr3_8_0 vr4_8_0 vr5_8_0 vr6_8_0 vr7_8_0 vr8_8_0 vr9_8_0 vr10_8_0 vr11_8_0 vr12_8_0 vr13_8_0 vr14_8_0 vr15_8_0 vr16_8_0 vr17_8_0 vr18_8_0 vr19_8_0 vr20_8_0 vr21_8_0 vr22_8_0 vr23_8_0 vr24_8_0 vr25_8_0 vr26_8_0 vr27_8_0 vr28_8_0 vr29_8_0 vr30_8_0 vr31_8_0 ]; +attach variables vrS_8_1 [vr0_8_1 vr1_8_1 vr2_8_1 vr3_8_1 vr4_8_1 vr5_8_1 vr6_8_1 vr7_8_1 vr8_8_1 vr9_8_1 vr10_8_1 vr11_8_1 vr12_8_1 vr13_8_1 vr14_8_1 vr15_8_1 vr16_8_1 vr17_8_1 vr18_8_1 vr19_8_1 vr20_8_1 vr21_8_1 vr22_8_1 vr23_8_1 vr24_8_1 vr25_8_1 vr26_8_1 vr27_8_1 vr28_8_1 vr29_8_1 vr30_8_1 vr31_8_1 ]; +attach variables vrS_8_2 [vr0_8_2 vr1_8_2 vr2_8_2 vr3_8_2 vr4_8_2 vr5_8_2 vr6_8_2 vr7_8_2 vr8_8_2 vr9_8_2 vr10_8_2 vr11_8_2 vr12_8_2 vr13_8_2 vr14_8_2 vr15_8_2 vr16_8_2 vr17_8_2 vr18_8_2 vr19_8_2 vr20_8_2 vr21_8_2 vr22_8_2 vr23_8_2 vr24_8_2 vr25_8_2 vr26_8_2 vr27_8_2 vr28_8_2 vr29_8_2 vr30_8_2 vr31_8_2 ]; +attach variables vrS_8_3 [vr0_8_3 vr1_8_3 vr2_8_3 vr3_8_3 vr4_8_3 vr5_8_3 vr6_8_3 vr7_8_3 vr8_8_3 vr9_8_3 vr10_8_3 vr11_8_3 vr12_8_3 vr13_8_3 vr14_8_3 vr15_8_3 vr16_8_3 vr17_8_3 vr18_8_3 vr19_8_3 vr20_8_3 vr21_8_3 vr22_8_3 vr23_8_3 vr24_8_3 vr25_8_3 vr26_8_3 vr27_8_3 vr28_8_3 vr29_8_3 vr30_8_3 vr31_8_3 ]; +attach variables vrS_8_4 [vr0_8_4 vr1_8_4 vr2_8_4 vr3_8_4 vr4_8_4 vr5_8_4 vr6_8_4 vr7_8_4 vr8_8_4 vr9_8_4 vr10_8_4 vr11_8_4 vr12_8_4 vr13_8_4 vr14_8_4 vr15_8_4 vr16_8_4 vr17_8_4 vr18_8_4 vr19_8_4 vr20_8_4 vr21_8_4 vr22_8_4 vr23_8_4 vr24_8_4 vr25_8_4 vr26_8_4 vr27_8_4 vr28_8_4 vr29_8_4 vr30_8_4 vr31_8_4 ]; +attach variables vrS_8_5 [vr0_8_5 vr1_8_5 vr2_8_5 vr3_8_5 vr4_8_5 vr5_8_5 vr6_8_5 vr7_8_5 vr8_8_5 vr9_8_5 vr10_8_5 vr11_8_5 vr12_8_5 vr13_8_5 vr14_8_5 vr15_8_5 vr16_8_5 vr17_8_5 vr18_8_5 vr19_8_5 vr20_8_5 vr21_8_5 vr22_8_5 vr23_8_5 vr24_8_5 vr25_8_5 vr26_8_5 vr27_8_5 vr28_8_5 vr29_8_5 vr30_8_5 vr31_8_5 ]; +attach variables vrS_8_6 [vr0_8_6 vr1_8_6 vr2_8_6 vr3_8_6 vr4_8_6 vr5_8_6 vr6_8_6 vr7_8_6 vr8_8_6 vr9_8_6 vr10_8_6 vr11_8_6 vr12_8_6 vr13_8_6 vr14_8_6 vr15_8_6 vr16_8_6 vr17_8_6 vr18_8_6 vr19_8_6 vr20_8_6 vr21_8_6 vr22_8_6 vr23_8_6 vr24_8_6 vr25_8_6 vr26_8_6 vr27_8_6 vr28_8_6 vr29_8_6 vr30_8_6 vr31_8_6 ]; +attach variables vrS_8_7 [vr0_8_7 vr1_8_7 vr2_8_7 vr3_8_7 vr4_8_7 vr5_8_7 vr6_8_7 vr7_8_7 vr8_8_7 vr9_8_7 vr10_8_7 vr11_8_7 vr12_8_7 vr13_8_7 vr14_8_7 vr15_8_7 vr16_8_7 vr17_8_7 vr18_8_7 vr19_8_7 vr20_8_7 vr21_8_7 vr22_8_7 vr23_8_7 vr24_8_7 vr25_8_7 vr26_8_7 vr27_8_7 vr28_8_7 vr29_8_7 vr30_8_7 vr31_8_7 ]; +attach variables vrS_8_8 [vr0_8_8 vr1_8_8 vr2_8_8 vr3_8_8 vr4_8_8 vr5_8_8 vr6_8_8 vr7_8_8 vr8_8_8 vr9_8_8 vr10_8_8 vr11_8_8 vr12_8_8 vr13_8_8 vr14_8_8 vr15_8_8 vr16_8_8 vr17_8_8 vr18_8_8 vr19_8_8 vr20_8_8 vr21_8_8 vr22_8_8 vr23_8_8 vr24_8_8 vr25_8_8 vr26_8_8 vr27_8_8 vr28_8_8 vr29_8_8 vr30_8_8 vr31_8_8 ]; +attach variables vrS_8_9 [vr0_8_9 vr1_8_9 vr2_8_9 vr3_8_9 vr4_8_9 vr5_8_9 vr6_8_9 vr7_8_9 vr8_8_9 vr9_8_9 vr10_8_9 vr11_8_9 vr12_8_9 vr13_8_9 vr14_8_9 vr15_8_9 vr16_8_9 vr17_8_9 vr18_8_9 vr19_8_9 vr20_8_9 vr21_8_9 vr22_8_9 vr23_8_9 vr24_8_9 vr25_8_9 vr26_8_9 vr27_8_9 vr28_8_9 vr29_8_9 vr30_8_9 vr31_8_9 ]; +attach variables vrS_8_10 [vr0_8_10 vr1_8_10 vr2_8_10 vr3_8_10 vr4_8_10 vr5_8_10 vr6_8_10 vr7_8_10 vr8_8_10 vr9_8_10 vr10_8_10 vr11_8_10 vr12_8_10 vr13_8_10 vr14_8_10 vr15_8_10 vr16_8_10 vr17_8_10 vr18_8_10 vr19_8_10 vr20_8_10 vr21_8_10 vr22_8_10 vr23_8_10 vr24_8_10 vr25_8_10 vr26_8_10 vr27_8_10 vr28_8_10 vr29_8_10 vr30_8_10 vr31_8_10 ]; +attach variables vrS_8_11 [vr0_8_11 vr1_8_11 vr2_8_11 vr3_8_11 vr4_8_11 vr5_8_11 vr6_8_11 vr7_8_11 vr8_8_11 vr9_8_11 vr10_8_11 vr11_8_11 vr12_8_11 vr13_8_11 vr14_8_11 vr15_8_11 vr16_8_11 vr17_8_11 vr18_8_11 vr19_8_11 vr20_8_11 vr21_8_11 vr22_8_11 vr23_8_11 vr24_8_11 vr25_8_11 vr26_8_11 vr27_8_11 vr28_8_11 vr29_8_11 vr30_8_11 vr31_8_11 ]; +attach variables vrS_8_12 [vr0_8_12 vr1_8_12 vr2_8_12 vr3_8_12 vr4_8_12 vr5_8_12 vr6_8_12 vr7_8_12 vr8_8_12 vr9_8_12 vr10_8_12 vr11_8_12 vr12_8_12 vr13_8_12 vr14_8_12 vr15_8_12 vr16_8_12 vr17_8_12 vr18_8_12 vr19_8_12 vr20_8_12 vr21_8_12 vr22_8_12 vr23_8_12 vr24_8_12 vr25_8_12 vr26_8_12 vr27_8_12 vr28_8_12 vr29_8_12 vr30_8_12 vr31_8_12 ]; +attach variables vrS_8_13 [vr0_8_13 vr1_8_13 vr2_8_13 vr3_8_13 vr4_8_13 vr5_8_13 vr6_8_13 vr7_8_13 vr8_8_13 vr9_8_13 vr10_8_13 vr11_8_13 vr12_8_13 vr13_8_13 vr14_8_13 vr15_8_13 vr16_8_13 vr17_8_13 vr18_8_13 vr19_8_13 vr20_8_13 vr21_8_13 vr22_8_13 vr23_8_13 vr24_8_13 vr25_8_13 vr26_8_13 vr27_8_13 vr28_8_13 vr29_8_13 vr30_8_13 vr31_8_13 ]; +attach variables vrS_8_14 [vr0_8_14 vr1_8_14 vr2_8_14 vr3_8_14 vr4_8_14 vr5_8_14 vr6_8_14 vr7_8_14 vr8_8_14 vr9_8_14 vr10_8_14 vr11_8_14 vr12_8_14 vr13_8_14 vr14_8_14 vr15_8_14 vr16_8_14 vr17_8_14 vr18_8_14 vr19_8_14 vr20_8_14 vr21_8_14 vr22_8_14 vr23_8_14 vr24_8_14 vr25_8_14 vr26_8_14 vr27_8_14 vr28_8_14 vr29_8_14 vr30_8_14 vr31_8_14 ]; +attach variables vrS_8_15 [vr0_8_15 vr1_8_15 vr2_8_15 vr3_8_15 vr4_8_15 vr5_8_15 vr6_8_15 vr7_8_15 vr8_8_15 vr9_8_15 vr10_8_15 vr11_8_15 vr12_8_15 vr13_8_15 vr14_8_15 vr15_8_15 vr16_8_15 vr17_8_15 vr18_8_15 vr19_8_15 vr20_8_15 vr21_8_15 vr22_8_15 vr23_8_15 vr24_8_15 vr25_8_15 vr26_8_15 vr27_8_15 vr28_8_15 vr29_8_15 vr30_8_15 vr31_8_15 ]; + + +# AltVect Vector vrC sub-piece selectors + +# AltVect Vector vrC sub-piece selectors for size 64 +attach variables vrC_64_0 [vr0_64_0 vr1_64_0 vr2_64_0 vr3_64_0 vr4_64_0 vr5_64_0 vr6_64_0 vr7_64_0 vr8_64_0 vr9_64_0 vr10_64_0 vr11_64_0 vr12_64_0 vr13_64_0 vr14_64_0 vr15_64_0 vr16_64_0 vr17_64_0 vr18_64_0 vr19_64_0 vr20_64_0 vr21_64_0 vr22_64_0 vr23_64_0 vr24_64_0 vr25_64_0 vr26_64_0 vr27_64_0 vr28_64_0 vr29_64_0 vr30_64_0 vr31_64_0 ]; +attach variables vrC_64_1 [vr0_64_1 vr1_64_1 vr2_64_1 vr3_64_1 vr4_64_1 vr5_64_1 vr6_64_1 vr7_64_1 vr8_64_1 vr9_64_1 vr10_64_1 vr11_64_1 vr12_64_1 vr13_64_1 vr14_64_1 vr15_64_1 vr16_64_1 vr17_64_1 vr18_64_1 vr19_64_1 vr20_64_1 vr21_64_1 vr22_64_1 vr23_64_1 vr24_64_1 vr25_64_1 vr26_64_1 vr27_64_1 vr28_64_1 vr29_64_1 vr30_64_1 vr31_64_1 ]; + +# AltVect Vector vrC sub-piece selectors for size 32 +attach variables vrC_32_0 [vr0_32_0 vr1_32_0 vr2_32_0 vr3_32_0 vr4_32_0 vr5_32_0 vr6_32_0 vr7_32_0 vr8_32_0 vr9_32_0 vr10_32_0 vr11_32_0 vr12_32_0 vr13_32_0 vr14_32_0 vr15_32_0 vr16_32_0 vr17_32_0 vr18_32_0 vr19_32_0 vr20_32_0 vr21_32_0 vr22_32_0 vr23_32_0 vr24_32_0 vr25_32_0 vr26_32_0 vr27_32_0 vr28_32_0 vr29_32_0 vr30_32_0 vr31_32_0 ]; +attach variables vrC_32_1 [vr0_32_1 vr1_32_1 vr2_32_1 vr3_32_1 vr4_32_1 vr5_32_1 vr6_32_1 vr7_32_1 vr8_32_1 vr9_32_1 vr10_32_1 vr11_32_1 vr12_32_1 vr13_32_1 vr14_32_1 vr15_32_1 vr16_32_1 vr17_32_1 vr18_32_1 vr19_32_1 vr20_32_1 vr21_32_1 vr22_32_1 vr23_32_1 vr24_32_1 vr25_32_1 vr26_32_1 vr27_32_1 vr28_32_1 vr29_32_1 vr30_32_1 vr31_32_1 ]; +attach variables vrC_32_2 [vr0_32_2 vr1_32_2 vr2_32_2 vr3_32_2 vr4_32_2 vr5_32_2 vr6_32_2 vr7_32_2 vr8_32_2 vr9_32_2 vr10_32_2 vr11_32_2 vr12_32_2 vr13_32_2 vr14_32_2 vr15_32_2 vr16_32_2 vr17_32_2 vr18_32_2 vr19_32_2 vr20_32_2 vr21_32_2 vr22_32_2 vr23_32_2 vr24_32_2 vr25_32_2 vr26_32_2 vr27_32_2 vr28_32_2 vr29_32_2 vr30_32_2 vr31_32_2 ]; +attach variables vrC_32_3 [vr0_32_3 vr1_32_3 vr2_32_3 vr3_32_3 vr4_32_3 vr5_32_3 vr6_32_3 vr7_32_3 vr8_32_3 vr9_32_3 vr10_32_3 vr11_32_3 vr12_32_3 vr13_32_3 vr14_32_3 vr15_32_3 vr16_32_3 vr17_32_3 vr18_32_3 vr19_32_3 vr20_32_3 vr21_32_3 vr22_32_3 vr23_32_3 vr24_32_3 vr25_32_3 vr26_32_3 vr27_32_3 vr28_32_3 vr29_32_3 vr30_32_3 vr31_32_3 ]; + +# AltVect Vector vrC sub-piece selectors for size 16 +attach variables vrC_16_0 [vr0_16_0 vr1_16_0 vr2_16_0 vr3_16_0 vr4_16_0 vr5_16_0 vr6_16_0 vr7_16_0 vr8_16_0 vr9_16_0 vr10_16_0 vr11_16_0 vr12_16_0 vr13_16_0 vr14_16_0 vr15_16_0 vr16_16_0 vr17_16_0 vr18_16_0 vr19_16_0 vr20_16_0 vr21_16_0 vr22_16_0 vr23_16_0 vr24_16_0 vr25_16_0 vr26_16_0 vr27_16_0 vr28_16_0 vr29_16_0 vr30_16_0 vr31_16_0 ]; +attach variables vrC_16_1 [vr0_16_1 vr1_16_1 vr2_16_1 vr3_16_1 vr4_16_1 vr5_16_1 vr6_16_1 vr7_16_1 vr8_16_1 vr9_16_1 vr10_16_1 vr11_16_1 vr12_16_1 vr13_16_1 vr14_16_1 vr15_16_1 vr16_16_1 vr17_16_1 vr18_16_1 vr19_16_1 vr20_16_1 vr21_16_1 vr22_16_1 vr23_16_1 vr24_16_1 vr25_16_1 vr26_16_1 vr27_16_1 vr28_16_1 vr29_16_1 vr30_16_1 vr31_16_1 ]; +attach variables vrC_16_2 [vr0_16_2 vr1_16_2 vr2_16_2 vr3_16_2 vr4_16_2 vr5_16_2 vr6_16_2 vr7_16_2 vr8_16_2 vr9_16_2 vr10_16_2 vr11_16_2 vr12_16_2 vr13_16_2 vr14_16_2 vr15_16_2 vr16_16_2 vr17_16_2 vr18_16_2 vr19_16_2 vr20_16_2 vr21_16_2 vr22_16_2 vr23_16_2 vr24_16_2 vr25_16_2 vr26_16_2 vr27_16_2 vr28_16_2 vr29_16_2 vr30_16_2 vr31_16_2 ]; +attach variables vrC_16_3 [vr0_16_3 vr1_16_3 vr2_16_3 vr3_16_3 vr4_16_3 vr5_16_3 vr6_16_3 vr7_16_3 vr8_16_3 vr9_16_3 vr10_16_3 vr11_16_3 vr12_16_3 vr13_16_3 vr14_16_3 vr15_16_3 vr16_16_3 vr17_16_3 vr18_16_3 vr19_16_3 vr20_16_3 vr21_16_3 vr22_16_3 vr23_16_3 vr24_16_3 vr25_16_3 vr26_16_3 vr27_16_3 vr28_16_3 vr29_16_3 vr30_16_3 vr31_16_3 ]; +attach variables vrC_16_4 [vr0_16_4 vr1_16_4 vr2_16_4 vr3_16_4 vr4_16_4 vr5_16_4 vr6_16_4 vr7_16_4 vr8_16_4 vr9_16_4 vr10_16_4 vr11_16_4 vr12_16_4 vr13_16_4 vr14_16_4 vr15_16_4 vr16_16_4 vr17_16_4 vr18_16_4 vr19_16_4 vr20_16_4 vr21_16_4 vr22_16_4 vr23_16_4 vr24_16_4 vr25_16_4 vr26_16_4 vr27_16_4 vr28_16_4 vr29_16_4 vr30_16_4 vr31_16_4 ]; +attach variables vrC_16_5 [vr0_16_5 vr1_16_5 vr2_16_5 vr3_16_5 vr4_16_5 vr5_16_5 vr6_16_5 vr7_16_5 vr8_16_5 vr9_16_5 vr10_16_5 vr11_16_5 vr12_16_5 vr13_16_5 vr14_16_5 vr15_16_5 vr16_16_5 vr17_16_5 vr18_16_5 vr19_16_5 vr20_16_5 vr21_16_5 vr22_16_5 vr23_16_5 vr24_16_5 vr25_16_5 vr26_16_5 vr27_16_5 vr28_16_5 vr29_16_5 vr30_16_5 vr31_16_5 ]; +attach variables vrC_16_6 [vr0_16_6 vr1_16_6 vr2_16_6 vr3_16_6 vr4_16_6 vr5_16_6 vr6_16_6 vr7_16_6 vr8_16_6 vr9_16_6 vr10_16_6 vr11_16_6 vr12_16_6 vr13_16_6 vr14_16_6 vr15_16_6 vr16_16_6 vr17_16_6 vr18_16_6 vr19_16_6 vr20_16_6 vr21_16_6 vr22_16_6 vr23_16_6 vr24_16_6 vr25_16_6 vr26_16_6 vr27_16_6 vr28_16_6 vr29_16_6 vr30_16_6 vr31_16_6 ]; +attach variables vrC_16_7 [vr0_16_7 vr1_16_7 vr2_16_7 vr3_16_7 vr4_16_7 vr5_16_7 vr6_16_7 vr7_16_7 vr8_16_7 vr9_16_7 vr10_16_7 vr11_16_7 vr12_16_7 vr13_16_7 vr14_16_7 vr15_16_7 vr16_16_7 vr17_16_7 vr18_16_7 vr19_16_7 vr20_16_7 vr21_16_7 vr22_16_7 vr23_16_7 vr24_16_7 vr25_16_7 vr26_16_7 vr27_16_7 vr28_16_7 vr29_16_7 vr30_16_7 vr31_16_7 ]; + +# AltVect Vector vrC sub-piece selectors for size 8 +attach variables vrC_8_0 [vr0_8_0 vr1_8_0 vr2_8_0 vr3_8_0 vr4_8_0 vr5_8_0 vr6_8_0 vr7_8_0 vr8_8_0 vr9_8_0 vr10_8_0 vr11_8_0 vr12_8_0 vr13_8_0 vr14_8_0 vr15_8_0 vr16_8_0 vr17_8_0 vr18_8_0 vr19_8_0 vr20_8_0 vr21_8_0 vr22_8_0 vr23_8_0 vr24_8_0 vr25_8_0 vr26_8_0 vr27_8_0 vr28_8_0 vr29_8_0 vr30_8_0 vr31_8_0 ]; +attach variables vrC_8_1 [vr0_8_1 vr1_8_1 vr2_8_1 vr3_8_1 vr4_8_1 vr5_8_1 vr6_8_1 vr7_8_1 vr8_8_1 vr9_8_1 vr10_8_1 vr11_8_1 vr12_8_1 vr13_8_1 vr14_8_1 vr15_8_1 vr16_8_1 vr17_8_1 vr18_8_1 vr19_8_1 vr20_8_1 vr21_8_1 vr22_8_1 vr23_8_1 vr24_8_1 vr25_8_1 vr26_8_1 vr27_8_1 vr28_8_1 vr29_8_1 vr30_8_1 vr31_8_1 ]; +attach variables vrC_8_2 [vr0_8_2 vr1_8_2 vr2_8_2 vr3_8_2 vr4_8_2 vr5_8_2 vr6_8_2 vr7_8_2 vr8_8_2 vr9_8_2 vr10_8_2 vr11_8_2 vr12_8_2 vr13_8_2 vr14_8_2 vr15_8_2 vr16_8_2 vr17_8_2 vr18_8_2 vr19_8_2 vr20_8_2 vr21_8_2 vr22_8_2 vr23_8_2 vr24_8_2 vr25_8_2 vr26_8_2 vr27_8_2 vr28_8_2 vr29_8_2 vr30_8_2 vr31_8_2 ]; +attach variables vrC_8_3 [vr0_8_3 vr1_8_3 vr2_8_3 vr3_8_3 vr4_8_3 vr5_8_3 vr6_8_3 vr7_8_3 vr8_8_3 vr9_8_3 vr10_8_3 vr11_8_3 vr12_8_3 vr13_8_3 vr14_8_3 vr15_8_3 vr16_8_3 vr17_8_3 vr18_8_3 vr19_8_3 vr20_8_3 vr21_8_3 vr22_8_3 vr23_8_3 vr24_8_3 vr25_8_3 vr26_8_3 vr27_8_3 vr28_8_3 vr29_8_3 vr30_8_3 vr31_8_3 ]; +attach variables vrC_8_4 [vr0_8_4 vr1_8_4 vr2_8_4 vr3_8_4 vr4_8_4 vr5_8_4 vr6_8_4 vr7_8_4 vr8_8_4 vr9_8_4 vr10_8_4 vr11_8_4 vr12_8_4 vr13_8_4 vr14_8_4 vr15_8_4 vr16_8_4 vr17_8_4 vr18_8_4 vr19_8_4 vr20_8_4 vr21_8_4 vr22_8_4 vr23_8_4 vr24_8_4 vr25_8_4 vr26_8_4 vr27_8_4 vr28_8_4 vr29_8_4 vr30_8_4 vr31_8_4 ]; +attach variables vrC_8_5 [vr0_8_5 vr1_8_5 vr2_8_5 vr3_8_5 vr4_8_5 vr5_8_5 vr6_8_5 vr7_8_5 vr8_8_5 vr9_8_5 vr10_8_5 vr11_8_5 vr12_8_5 vr13_8_5 vr14_8_5 vr15_8_5 vr16_8_5 vr17_8_5 vr18_8_5 vr19_8_5 vr20_8_5 vr21_8_5 vr22_8_5 vr23_8_5 vr24_8_5 vr25_8_5 vr26_8_5 vr27_8_5 vr28_8_5 vr29_8_5 vr30_8_5 vr31_8_5 ]; +attach variables vrC_8_6 [vr0_8_6 vr1_8_6 vr2_8_6 vr3_8_6 vr4_8_6 vr5_8_6 vr6_8_6 vr7_8_6 vr8_8_6 vr9_8_6 vr10_8_6 vr11_8_6 vr12_8_6 vr13_8_6 vr14_8_6 vr15_8_6 vr16_8_6 vr17_8_6 vr18_8_6 vr19_8_6 vr20_8_6 vr21_8_6 vr22_8_6 vr23_8_6 vr24_8_6 vr25_8_6 vr26_8_6 vr27_8_6 vr28_8_6 vr29_8_6 vr30_8_6 vr31_8_6 ]; +attach variables vrC_8_7 [vr0_8_7 vr1_8_7 vr2_8_7 vr3_8_7 vr4_8_7 vr5_8_7 vr6_8_7 vr7_8_7 vr8_8_7 vr9_8_7 vr10_8_7 vr11_8_7 vr12_8_7 vr13_8_7 vr14_8_7 vr15_8_7 vr16_8_7 vr17_8_7 vr18_8_7 vr19_8_7 vr20_8_7 vr21_8_7 vr22_8_7 vr23_8_7 vr24_8_7 vr25_8_7 vr26_8_7 vr27_8_7 vr28_8_7 vr29_8_7 vr30_8_7 vr31_8_7 ]; +attach variables vrC_8_8 [vr0_8_8 vr1_8_8 vr2_8_8 vr3_8_8 vr4_8_8 vr5_8_8 vr6_8_8 vr7_8_8 vr8_8_8 vr9_8_8 vr10_8_8 vr11_8_8 vr12_8_8 vr13_8_8 vr14_8_8 vr15_8_8 vr16_8_8 vr17_8_8 vr18_8_8 vr19_8_8 vr20_8_8 vr21_8_8 vr22_8_8 vr23_8_8 vr24_8_8 vr25_8_8 vr26_8_8 vr27_8_8 vr28_8_8 vr29_8_8 vr30_8_8 vr31_8_8 ]; +attach variables vrC_8_9 [vr0_8_9 vr1_8_9 vr2_8_9 vr3_8_9 vr4_8_9 vr5_8_9 vr6_8_9 vr7_8_9 vr8_8_9 vr9_8_9 vr10_8_9 vr11_8_9 vr12_8_9 vr13_8_9 vr14_8_9 vr15_8_9 vr16_8_9 vr17_8_9 vr18_8_9 vr19_8_9 vr20_8_9 vr21_8_9 vr22_8_9 vr23_8_9 vr24_8_9 vr25_8_9 vr26_8_9 vr27_8_9 vr28_8_9 vr29_8_9 vr30_8_9 vr31_8_9 ]; +attach variables vrC_8_10 [vr0_8_10 vr1_8_10 vr2_8_10 vr3_8_10 vr4_8_10 vr5_8_10 vr6_8_10 vr7_8_10 vr8_8_10 vr9_8_10 vr10_8_10 vr11_8_10 vr12_8_10 vr13_8_10 vr14_8_10 vr15_8_10 vr16_8_10 vr17_8_10 vr18_8_10 vr19_8_10 vr20_8_10 vr21_8_10 vr22_8_10 vr23_8_10 vr24_8_10 vr25_8_10 vr26_8_10 vr27_8_10 vr28_8_10 vr29_8_10 vr30_8_10 vr31_8_10 ]; +attach variables vrC_8_11 [vr0_8_11 vr1_8_11 vr2_8_11 vr3_8_11 vr4_8_11 vr5_8_11 vr6_8_11 vr7_8_11 vr8_8_11 vr9_8_11 vr10_8_11 vr11_8_11 vr12_8_11 vr13_8_11 vr14_8_11 vr15_8_11 vr16_8_11 vr17_8_11 vr18_8_11 vr19_8_11 vr20_8_11 vr21_8_11 vr22_8_11 vr23_8_11 vr24_8_11 vr25_8_11 vr26_8_11 vr27_8_11 vr28_8_11 vr29_8_11 vr30_8_11 vr31_8_11 ]; +attach variables vrC_8_12 [vr0_8_12 vr1_8_12 vr2_8_12 vr3_8_12 vr4_8_12 vr5_8_12 vr6_8_12 vr7_8_12 vr8_8_12 vr9_8_12 vr10_8_12 vr11_8_12 vr12_8_12 vr13_8_12 vr14_8_12 vr15_8_12 vr16_8_12 vr17_8_12 vr18_8_12 vr19_8_12 vr20_8_12 vr21_8_12 vr22_8_12 vr23_8_12 vr24_8_12 vr25_8_12 vr26_8_12 vr27_8_12 vr28_8_12 vr29_8_12 vr30_8_12 vr31_8_12 ]; +attach variables vrC_8_13 [vr0_8_13 vr1_8_13 vr2_8_13 vr3_8_13 vr4_8_13 vr5_8_13 vr6_8_13 vr7_8_13 vr8_8_13 vr9_8_13 vr10_8_13 vr11_8_13 vr12_8_13 vr13_8_13 vr14_8_13 vr15_8_13 vr16_8_13 vr17_8_13 vr18_8_13 vr19_8_13 vr20_8_13 vr21_8_13 vr22_8_13 vr23_8_13 vr24_8_13 vr25_8_13 vr26_8_13 vr27_8_13 vr28_8_13 vr29_8_13 vr30_8_13 vr31_8_13 ]; +attach variables vrC_8_14 [vr0_8_14 vr1_8_14 vr2_8_14 vr3_8_14 vr4_8_14 vr5_8_14 vr6_8_14 vr7_8_14 vr8_8_14 vr9_8_14 vr10_8_14 vr11_8_14 vr12_8_14 vr13_8_14 vr14_8_14 vr15_8_14 vr16_8_14 vr17_8_14 vr18_8_14 vr19_8_14 vr20_8_14 vr21_8_14 vr22_8_14 vr23_8_14 vr24_8_14 vr25_8_14 vr26_8_14 vr27_8_14 vr28_8_14 vr29_8_14 vr30_8_14 vr31_8_14 ]; +attach variables vrC_8_15 [vr0_8_15 vr1_8_15 vr2_8_15 vr3_8_15 vr4_8_15 vr5_8_15 vr6_8_15 vr7_8_15 vr8_8_15 vr9_8_15 vr10_8_15 vr11_8_15 vr12_8_15 vr13_8_15 vr14_8_15 vr15_8_15 vr16_8_15 vr17_8_15 vr18_8_15 vr19_8_15 vr20_8_15 vr21_8_15 vr22_8_15 vr23_8_15 vr24_8_15 vr25_8_15 vr26_8_15 vr27_8_15 vr28_8_15 vr29_8_15 vr30_8_15 vr31_8_15 ]; + +################################################################ +# Pseudo Instructions +################################################################ + +define pcodeop clearHistory; +define pcodeop countLeadingZeros; +define pcodeop countTrailingZeros; +define pcodeop dataCacheBlockAllocate; +define pcodeop dataCacheBlockFlush; +define pcodeop dataCacheBlockInvalidate; +define pcodeop dataCacheBlockStore; +define pcodeop dataCacheBlockTouch; +define pcodeop dataCacheBlockTouchForStore; +define pcodeop dataCacheBlockClearToZero; +define pcodeop dataCacheCongruenceClassInvalidate; +define pcodeop dataCacheRead; +define pcodeop externalControlIn; +define pcodeop externalControlOut; +define pcodeop enforceInOrderExecutionIO; +define pcodeop instructionCacheBlockInvalidate; +define pcodeop instructionCacheBlockTouch; +define pcodeop instructionCacheCongruenceClassInvalidate; +define pcodeop instructionCacheRead; +define pcodeop instructionSynchronize; + + +define pcodeop floatAddOverflow; +define pcodeop floatDivOverflow; +define pcodeop floatAddRoundedUp; +define pcodeop floatDivRoundedUp; +define pcodeop floatAddInexact; +define pcodeop floatDivInexact; +define pcodeop floatAddUnderflow; +define pcodeop floatDivUnderflow; +define pcodeop floatInfinityAdd; +define pcodeop intToFloatRoundedUp; +define pcodeop intToFloatInexact; +define pcodeop invalidFloatToInt; +define pcodeop floatToIntRoundedUp; +define pcodeop floatToIntInexact; +define pcodeop floatInfinityDivide; +define pcodeop floatMaddInexact; +define pcodeop floatMaddRoundedUp; +define pcodeop floatMaddOverflow; +define pcodeop floatMaddUnderflow; +define pcodeop floatInfinityMulZero; + +define pcodeop floatMsubInexact; +define pcodeop floatMsubRoundedUp; +define pcodeop floatMsubOverflow; +define pcodeop floatMsubUnderflow; +define pcodeop floatInfinitySub; + +define pcodeop floatSubRoundedUp; +define pcodeop floatSubInexact; +define pcodeop floatSubOverflow; +define pcodeop floatSubUnderflow; + +define pcodeop floatMulRoundedUp; +define pcodeop floatMulOverflow; +define pcodeop floatMulUnderflow; +define pcodeop floatMulInexact; +define pcodeop sqrtInvalid; +define pcodeop floatSqrtRoundedUp; +define pcodeop floatSqrtInexact; + +define pcodeop eventInterrupt; +define pcodeop illegal; +define pcodeop message; +define pcodeop movebuffer; +define pcodeop stopT; +define pcodeop waitT; + +define pcodeop mematom; + +define pcodeop random; +define pcodeop returnFromInterrupt; +define pcodeop returnFromCriticalInterrupt; +define pcodeop returnFromDebugInterrupt; +define pcodeop returnFromGuestInterrupt; +define pcodeop returnFromMachineCheckInterrupt; +define pcodeop syscall; +define pcodeop slbInvalidateAll; +define pcodeop slbInvalidateEntry; +define pcodeop slbMoveFromEntryESID; +define pcodeop slbMoveFromEntryVSID; +define pcodeop slbMoveToEntry; +define pcodeop storeDoubleWordConditionalIndexed; +define pcodeop storeWordConditionalIndexed; +define pcodeop trapWord; +define pcodeop trapDoubleWordImmediate; +define pcodeop trapDoubleWord; +define pcodeop sync; +define pcodeop loadString; +define pcodeop storeString; + +define pcodeop xer_mac_update; + +define pcodeop macchw; +define pcodeop macchws; +define pcodeop macchwsu; +define pcodeop macchwu; + +define pcodeop machhw; +define pcodeop machhws; +define pcodeop machhwsu; +define pcodeop machhwu; + +define pcodeop maclhw; +define pcodeop maclhws; +define pcodeop maclhwsu; +define pcodeop maclhwu; + +define pcodeop mulchw; +define pcodeop mulchwu; + +define pcodeop mulhhw; +define pcodeop mulhhwu; + +define pcodeop mullhw; +define pcodeop mullhwu; + +define pcodeop nmacchw; +define pcodeop nmacchws; + +define pcodeop nmachhw; +define pcodeop nmachhws; + +define pcodeop nmaclhw; +define pcodeop nmaclhws; + +define pcodeop copytrans; +define pcodeop pastetrans; +define pcodeop transaction; +define pcodeop TLBRead; +define pcodeop TLBSearchIndexed; +define pcodeop TLBWrite; +define pcodeop WriteExternalEnable; +define pcodeop WriteExternalEnableImmediate; + +# This is really used in the altivec version, but since it's a registered pcode op +# and due to the way things get @included, this needs to be here +define pcodeop vectorPermute; + + +################################################################ +# Macros +################################################################ + +macro shiftCarry(value, sa) +{ + local mask = value; # force mask to have same size as value (may vary) + mask = (1 << sa) - 1; + xer_ca = (value s< 0) && ((value & mask)!=0); +} +macro getCrBit(crReg, bitIndex, result) +{ + tmp:1 = crReg >> (3-bitIndex); + result = tmp & 1; +} +macro setCrBit(crReg, bitIndex, bit) +{ + shift:1 = 3-bitIndex; + mask:1 = ~(1< 0)); # 0b010 + setCrBit(cr0, 2, (result == 0)); # 0b001 + setCrBit(cr0, 3, (xer_so & 1)); +} + +macro addOverflow(a,b) { + xer_ov = scarry(a,b); + xer_so = xer_so || xer_ov; +} + +macro subOverflow(a,b) { + xer_ov = sborrow(a,b); + xer_so = xer_so || xer_ov; +} + +macro addExtendedCarry(op1,op2){ + local carryIn:$(REGISTER_SIZE) = zext(xer_ca); + tmp:$(REGISTER_SIZE) = op2 + carryIn; + xer_ca = carry(op2, carryIn) || carry(op1, tmp); +} + +macro addExtendedOverflow(op1, op2) { + local carryIn:$(REGISTER_SIZE) = zext(xer_ca); + tmp:$(REGISTER_SIZE) = op1 + op2; + xer_ov = scarry(op1,op2) ^^ scarry(tmp, carryIn); + xer_so = xer_so || xer_ov; +} + +macro subExtendedCarry(op1,op2){ + local carryIn = zext(!xer_ca); + local CYa = op1 < op2; + local result = op1 - op2; + xer_ca = !(CYa || (result < carryIn) ); +} + +macro subExtendedOverflow(op1, op2) { + local carryIn = zext(!xer_ca); + local result = op1 - op2; + xer_ov = sborrow( op1, op2 ) ^^ sborrow( result, carryIn ); + xer_so = xer_so || xer_ov; +} + +# check b=0 or (a=0x80000000 and b=-1) +macro divOverflow(a,b) { + xer_ov = (b==0) || ((b==-1) && (a==0x80000000)); + xer_so = xer_so || xer_ov; +} +macro divZero(b) { + xer_ov = (b==0); + xer_so = xer_so || xer_ov; +} + +macro mulOverflow64(result) { + local tmp:4 = result(0); + local sext_tmp:8 = sext(tmp); + xer_ov = (sext_tmp != result); + xer_so = xer_so || xer_ov; +} + +macro mulOverflow128(result) { + local tmp:8 = result(0); + local sext_tmp:16 = sext(tmp); + xer_ov = (sext_tmp != result); + xer_so = xer_so || xer_ov; +} + +macro cr1flags() { + setCrBit(cr1, 0, fp_fx); + setCrBit(cr1, 1, fp_fex); + setCrBit(cr1, 2, fp_vx); + setCrBit(cr3, 2, fp_ox); +} +macro setFPRF(result) { + fp_cc0 = result f< 0; + fp_cc1 = result f> 0; + fp_cc2 = result f== 0; + fp_cc3 = nan(result); +} + +macro setSummaryFPSCR() { + fp_vx = fp_vxsnan | fp_vxisi | fp_vxidi | fp_vxzdz | fp_vximz | fp_vxvc | fp_vxsoft | fp_vxsqrt | fp_vxcvi; + fp_fx = fp_fx | fp_ox | fp_ux | fp_zx | fp_xx; + fp_fex = (fp_vx & fp_ve) ^ (fp_ox & fp_oe) ^ (fp_ux & fp_ue) ^ (fp_zx & fp_ze) ^ (fp_xx & fp_xe); +} + +macro setFPAddFlags(op1, op2, result) { + setFPRF(result); +# fp_fr = floatAddRoundedUp(op1, op2); +# fp_fi = floatAddInexact(op1, op2); +# fp_ox = fp_ox | floatAddOverflow(op1, op2); +# fp_ux = fp_ux | floatAddUnderflow(op1, op2); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(op1) | nan(op2); +# fp_vxisi = fp_vxisi | floatInfinityAdd(op1, op2); + setSummaryFPSCR(); +} +macro setFPDivFlags(op1, op2, result) { + setFPRF(result); +# fp_fr = floatDivRoundedUp(op1, op2); +# fp_fi = floatDivInexact(op1, op2); +# fp_ox = fp_ox | floatDivOverflow(op1, op2); +# fp_ux = fp_ux | floatDivUnderflow(op1, op2); + fp_zx = fp_zx | (op2 f== 0); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(op1) | nan(op2); +# fp_vxidi = fp_vxidi | floatInfinityDivide(op1, op2); + fp_vxzdz = fp_vxzdz | ((op1 f== 0) && (op2 f== 0)); + setSummaryFPSCR(); +} +macro setFPMulFlags(op1, op2, result) { + setFPRF(result); +# fp_fr = floatMulRoundedUp(op1, op2); +# fp_fi = floatMulInexact(op1, op2); +# fp_ox = fp_ox | floatMulOverflow(op1, op2); +# fp_ux = fp_ux | floatMulUnderflow(op1, op2); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(op1) | nan(op2); +# fp_vximz = fp_vximz | floatInfinityMulZero(op1, op2); + setSummaryFPSCR(); +} +macro setFPSubFlags(op1, op2, result) { + setFPRF(result); +# fp_fr = floatSubRoundedUp(op1, op2); +# fp_fi = floatSubInexact(op1, op2); +# fp_ox = fp_ox | floatSubOverflow(op1, op2); +# fp_ux = fp_ux | floatSubUnderflow(op1, op2); +# fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(op1) | nan(op2); +# fp_vxisi = fp_vxisi | floatInfinitySub(op1, op2); + setSummaryFPSCR(); +} + +macro loadRegister(reg, ea) { +@ifdef BIT_64 + reg = zext(*:4(ea)); +@else + reg = *:4(ea); +@endif + ea = ea+4; +} + +macro loadReg(reg) { +@ifdef BIT_64 + reg = zext(*:4(tea)); +@else + reg = *:4(tea); +@endif + tea = tea+4; +} + +macro loadRegisterPartial(reg, ea, sa) { + mask:$(REGISTER_SIZE) = 0xffffffff; + sa = ((4-sa) & 3) * 8; + mask = mask << sa; +@ifdef BIT_64 + reg = zext(*:4(ea)); +@else + reg = *:4(ea); +@endif + reg = reg & mask; + ea = ea + 4; +} + +macro storeRegister(reg, ea) { +@ifdef BIT_64 + *:4(ea) = reg:4; +@else + *:4(ea) = reg; +@endif + ea = ea+4; +} + +macro storeReg(reg) { +@ifdef BIT_64 + *:4(tea) = reg:4; +@else + *:4(tea) = reg; +@endif + tea = tea+4; +} + +macro storeRegisterPartial(reg, ea, sa) { +@ifdef BIT_64 + *:4(ea) = reg:4; +@else + *:4(ea) = reg; +@endif + ea = ea + 4; +} + + +macro packbits( D,a0,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15, + a16,a17,a18,a19,a20,a21,a22,a23,a24,a25,a26,a27,a28,a29,a30,a31) { + D = zext(a31) & 1; + D=D|(zext(a0)&1)<<31; D=D|(zext(a1)&1)<<30; D=D|(zext(a2)&1)<<29; D=D|(zext(a3)&1)<<28; + D=D|(zext(a4)&1)<<27; D=D|(zext(a5)&1)<<26; D=D|(zext(a6)&1)<<25; D=D|(zext(a7)&1)<<24; + D=D|(zext(a8)&1)<<23; D=D|(zext(a9)&1)<<22; D=D|(zext(a10)&1)<<21; D=D|(zext(a11)&1)<<20; + D=D|(zext(a12)&1)<<19; D=D|(zext(a13)&1)<<18; D=D|(zext(a14)&1)<<17; D=D|(zext(a15)&1)<<16; + D=D|(zext(a16)&1)<<15; D=D|(zext(a17)&1)<<14; D=D|(zext(a18)&1)<<13; D=D|(zext(a19)&1)<<12; + D=D|(zext(a20)&1)<<11; D=D|(zext(a21)&1)<<10; D=D|(zext(a22)&1)<<9; D=D|(zext(a23)&1)<<8; + D=D|(zext(a24)&1)<<7; D=D|(zext(a25)&1)<<6; D=D|(zext(a26)&1)<<5; D=D|(zext(a27)&1)<<4; + D=D|(zext(a28)&1)<<3; D=D|(zext(a29)&1)<<2; D=D|(zext(a30)&1)<<1; + } + +macro unpackbits(D,a0,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15, + a16,a17,a18,a19,a20,a21,a22,a23,a24,a25,a26,a27,a28,a29,a30,a31) { + a0=(D&0x80000000)!=0; a1=(D&0x40000000)!=0; a2=(D&0x20000000)!=0; a3=(D&0x10000000)!=0; + a4=(D&0x8000000)!=0; a5=(D&0x4000000)!=0; a6=(D&0x2000000)!=0; a7=(D&0x1000000)!=0; + a8=(D&0x800000)!=0; a9=(D&0x400000)!=0; a10=(D&0x200000)!=0; a11=(D&0x100000)!=0; + a12=(D&0x80000)!=0; a13=(D&0x40000)!=0; a14=(D&0x20000)!=0; a15=(D&0x10000)!=0; + a16=(D&0x8000)!=0; a17=(D&0x4000)!=0; a18=(D&0x2000)!=0; a19=(D&0x1000)!=0; + a20=(D&0x800)!=0; a21=(D&0x400)!=0; a22=(D&0x200)!=0; a23=(D&0x100)!=0; + a24=(D&0x80)!=0; a25=(D&0x40)!=0; a26=(D&0x20)!=0; a27=(D&0x10)!=0; + a28=(D&0x8)!=0; a29=(D&0x4)!=0; a30=(D&0x2)!=0; a31=(D&0x1)!=0; } + +macro packFPSCR(tmp) { + packbits(tmp, fp_fx, fp_fex, fp_vx, fp_ox, fp_ux, fp_zx, fp_xx, fp_vxsnan, + fp_vxisi, fp_vxidi, fp_vxzdz, fp_vximz, fp_vxvc, fp_fr, fp_fi, fp_c, + fp_cc0, fp_cc1, fp_cc2, fp_cc3, fp_reserve1, fp_vxsoft, fp_vxsqrt, + fp_vxcvi, fp_ve, fp_oe, fp_ue, fp_ze, fp_xe, fp_ni, fp_rn0, fp_rn1); +} +macro unpackFPSCR(tmp) { + unpackbits(tmp, fp_fx, fp_fex, fp_vx, fp_ox, + fp_ux, fp_zx, fp_xx, fp_vxsnan, + fp_vxisi, fp_vxidi, fp_vxzdz, fp_vximz, + fp_vxvc, fp_fr, fp_fi, fp_c, + fp_cc0, fp_cc1, fp_cc2, fp_cc3, + fp_reserve1, fp_vxsoft, fp_vxsqrt, fp_vxcvi, + fp_ve, fp_oe, fp_ue, fp_ze, + fp_xe, fp_ni, fp_rn0, fp_rn1); +} + + +################################################################ +# Sub-Constructors +################################################################ +REL_ABS: "a" is AA = 1 {} +REL_ABS: is AA = 0 {} + +addressLI: reloc is LI & AA=0 [ reloc = inst_start + LI*4;] { export *[ram]:4 reloc; } +addressLI: reloc is LI & AA=1 [ reloc = LI*4; ] { export *[ram]:4 reloc; } +addressBD: reloc is BD & AA=0 [ reloc = inst_start + BD*4; ] { export *[ram]:4 reloc; } +addressBD: reloc is BD & AA=1 [ reloc = BD*4; ] { export *[ram]:4 reloc; } + +OFF16SH: val is D0 & D1 & D2 [ val = ((D0 << 6) | (D1 << 1) | D2) << 16; ] { export *[const]:4 val;} + +# X 00-------------------------------06 07-07 08-----------10 11-----------13 14------15 16----------------------------------------------------------------------------31 +# X -----------------?-----------------|BO_1=1|-------?-------|-----BI_CR-----|--BI_CC---|---------------------------------------?----------------------------------------| +CC: "lt" is BI_CC=0 & BO_1=1 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; } +CC: "le" is BI_CC=1 & BO_1=0 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); tmp = !tmp; export tmp; } +CC: "eq" is BI_CC=2 & BO_1=1 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; } +CC: "ge" is BI_CC=0 & BO_1=0 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); tmp = !tmp; export tmp; } +CC: "gt" is BI_CC=1 & BO_1=1 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; } +CC: "ne" is BI_CC=2 & BO_1=0 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); tmp = !tmp; export tmp; } +CC: "so" is BI_CC=3 & BO_1=1 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; } +CC: "ns" is BI_CC=3 & BO_1=0 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); tmp = !tmp; export tmp; } + +TOm: "lt" is TO=16 { } +TOm: "le" is TO=20 { } +TOm: "eq" is TO=4 { } +TOm: "ge" is TO=12 { } +TOm: "gt" is TO=8 { } +TOm: "ne" is TO=24 { } +TOm: "llt" is TO=2 { } +TOm: "lle" is TO=6 { } +TOm: "lge" is TO=5 { } +TOm: "lgt" is TO=1 { } +TOm: "" is TO { } + +CTR_DEC: "z" is BO_3=1 {CTR = CTR-1; tmp:1 = (CTR == 0); export tmp; } +CTR_DEC: "nz" is BO_3=0 {CTR = CTR-1; tmp:1 = (CTR != 0); export tmp; } + +CC_TF: "t" is BO_1=1 {} +CC_TF: "f" is BO_1=0 {} + +# OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=129 & BIT_0=0 + + +# X 00---------------------------------------------------10 11-----------13 14------15 16----------------------------------------------------------------------------31 +# X ---------------------------?---------------------------|----BI_CR=0----|--BI_CC---|---------------------------------------?----------------------------------------| +CC_OP: "lt" is BI_CC=0 & BI_CR=0 & BI_CC { tmp:1 = 0; getCrBit(cr0, BI_CC, tmp); export tmp; } +CC_OP: "eq" is BI_CC=2 & BI_CR=0 & BI_CC { tmp:1 = 0; getCrBit(cr0, BI_CC, tmp); export tmp; } +CC_OP: "gt" is BI_CC=1 & BI_CR=0 & BI_CC { tmp:1 = 0; getCrBit(cr0, BI_CC, tmp); export tmp; } +CC_OP: "so" is BI_CC=3 & BI_CR=0 & BI_CC { tmp:1 = 0; getCrBit(cr0, BI_CC, tmp); export tmp; } +CC_OP: "4*"^BI_CR^"+lt" is BI_CC=0 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; } +CC_OP: "4*"^BI_CR^"+eq" is BI_CC=2 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; } +CC_OP: "4*"^BI_CR^"+gt" is BI_CC=1 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; } +CC_OP: "4*"^BI_CR^"+so" is BI_CC=3 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; } + +# X 00----------------------------------------------------------------------------15 16-----------18 19------20 21---------------------------------------------------31 +# X ---------------------------------------?----------------------------------------|----CR_B=0-----|-CR_B_CC--|---------------------------?---------------------------| +CC_B_OP: "lt" is CR_B_CC=0 & CR_B=0 & CR_B_CC { tmp:1 = 0; getCrBit(cr0, CR_B_CC, tmp); export tmp; } +CC_B_OP: "eq" is CR_B_CC=2 & CR_B=0 & CR_B_CC { tmp:1 = 0; getCrBit(cr0, CR_B_CC, tmp); export tmp; } +CC_B_OP: "gt" is CR_B_CC=1 & CR_B=0 & CR_B_CC { tmp:1 = 0; getCrBit(cr0, CR_B_CC, tmp); export tmp; } +CC_B_OP: "so" is CR_B_CC=3 & CR_B=0 & CR_B_CC { tmp:1 = 0; getCrBit(cr0, CR_B_CC, tmp); export tmp; } +CC_B_OP: "4*"^CR_B^"+lt" is CR_B_CC=0 & CR_B & CR_B_CC { tmp:1 = 0; getCrBit(CR_B, CR_B_CC, tmp); export tmp; } +CC_B_OP: "4*"^CR_B^"+eq" is CR_B_CC=2 & CR_B & CR_B_CC { tmp:1 = 0; getCrBit(CR_B, CR_B_CC, tmp); export tmp; } +CC_B_OP: "4*"^CR_B^"+gt" is CR_B_CC=1 & CR_B & CR_B_CC { tmp:1 = 0; getCrBit(CR_B, CR_B_CC, tmp); export tmp; } +CC_B_OP: "4*"^CR_B^"+so" is CR_B_CC=3 & CR_B & CR_B_CC { tmp:1 = 0; getCrBit(CR_B, CR_B_CC, tmp); export tmp; } + +# X 00-----------------------------------------------------------------------------------------------------20 21-----------23 24------25 26--------------------------31 +# X ----------------------------------------------------?----------------------------------------------------|----CR_X=0-----|-CR_X_CC--|--------------?---------------| +CC_X_OP: cr0 is CR_X_CC=0 & CR_X=0 & CR_X_CC & cr0 { tmp:1 = 0; getCrBit(cr0, CR_X_CC, tmp); export tmp; } +CC_X_OP: cr0 is CR_X_CC=1 & CR_X=0 & CR_X_CC & cr0 { tmp:1 = 0; getCrBit(cr0, CR_X_CC, tmp); export tmp; } +CC_X_OP: cr0 is CR_X_CC=2 & CR_X=0 & CR_X_CC & cr0 { tmp:1 = 0; getCrBit(cr0, CR_X_CC, tmp); export tmp; } +CC_X_OP: cr0 is CR_X_CC=3 & CR_X=0 & CR_X_CC & cr0 { tmp:1 = 0; getCrBit(cr0, CR_X_CC, tmp); export tmp; } +CC_X_OP: CR_X is CR_X_CC=0 & CR_X & CR_X_CC { tmp:1 = 0; getCrBit(CR_X, CR_X_CC, tmp); export tmp; } +CC_X_OP: CR_X is CR_X_CC=1 & CR_X & CR_X_CC { tmp:1 = 0; getCrBit(CR_X, CR_X_CC, tmp); export tmp; } +CC_X_OP: CR_X is CR_X_CC=2 & CR_X & CR_X_CC { tmp:1 = 0; getCrBit(CR_X, CR_X_CC, tmp); export tmp; } +CC_X_OP: CR_X is CR_X_CC=3 & CR_X & CR_X_CC { tmp:1 = 0; getCrBit(CR_X, CR_X_CC, tmp); export tmp; } + +CC_X_OPm: "lt" is CR_X_CC=0 & CR_X=0 & CR_X_CC { } +CC_X_OPm: "gt" is CR_X_CC=1 & CR_X=0 & CR_X_CC { } +CC_X_OPm: "eq" is CR_X_CC=2 & CR_X=0 & CR_X_CC { } +CC_X_OPm: "so" is CR_X_CC=3 & CR_X=0 & CR_X_CC { } +CC_X_OPm: "lt" is CR_X_CC=0 & CR_X & CR_X_CC { } +CC_X_OPm: "gt" is CR_X_CC=1 & CR_X & CR_X_CC { } +CC_X_OPm: "eq" is CR_X_CC=2 & CR_X & CR_X_CC { } +CC_X_OPm: "so" is CR_X_CC=3 & CR_X & CR_X_CC { } + +# X 00--------------------------05 06-----------08 09------10 11-----------------------------------------------------------------------------------------------------31 +# X --------------?---------------|----CR_D=0-----|-CR_D_CC--|----------------------------------------------------?----------------------------------------------------| +CC_D_OP: "lt" is CR_D_CC=0 & CR_D=0 & CR_D_CC { tmp:1 = 0; getCrBit(cr0, CR_D_CC, tmp); export tmp; } +CC_D_OP: "eq" is CR_D_CC=2 & CR_D=0 & CR_D_CC { tmp:1 = 0; getCrBit(cr0, CR_D_CC, tmp); export tmp; } +CC_D_OP: "gt" is CR_D_CC=1 & CR_D=0 & CR_D_CC { tmp:1 = 0; getCrBit(cr0, CR_D_CC, tmp); export tmp; } +CC_D_OP: "so" is CR_D_CC=3 & CR_D=0 & CR_D_CC { tmp:1 = 0; getCrBit(cr0, CR_D_CC, tmp); export tmp; } +CC_D_OP: "4*"^CR_D^"+lt" is CR_D_CC=0 & CR_D & CR_D_CC { tmp:1 = 0; getCrBit(CR_D, CR_D_CC, tmp); export tmp; } +CC_D_OP: "4*"^CR_D^"+eq" is CR_D_CC=2 & CR_D & CR_D_CC { tmp:1 = 0; getCrBit(CR_D, CR_D_CC, tmp); export tmp; } +CC_D_OP: "4*"^CR_D^"+gt" is CR_D_CC=1 & CR_D & CR_D_CC { tmp:1 = 0; getCrBit(CR_D, CR_D_CC, tmp); export tmp; } +CC_D_OP: "4*"^CR_D^"+so" is CR_D_CC=3 & CR_D & CR_D_CC { tmp:1 = 0; getCrBit(CR_D, CR_D_CC, tmp); export tmp; } + +RA_OR_ZERO: A is A { export A; } +RA_OR_ZERO: 0 is A=0 { export 0:$(REGISTER_SIZE); } + +RB_OR_ZERO: B is B { export B; } +RB_OR_ZERO: 0 is B=0 { export 0:$(REGISTER_SIZE); } + +RS_OR_ZERO: S is S { export S; } +RS_OR_ZERO: 0 is S=0 { export 0:$(REGISTER_SIZE); } + +@ifdef BIT_64 +MB: mbValue is MBH & MBL [ mbValue=(MBH<<5)|MBL; ] { export *[const]:4 mbValue; } +SH: shValue is SHH & SHL [ shValue=(SHH<<5)|SHL; ] { export *[const]:4 shValue; } + +rotmask: mask is MBL & ME [mask = ((((ME-MBL)>>8) $and 1)*0xffffffffffffffff) $xor (0x7fffffff>>ME) $xor (0xffffffff>>MBL); ] { export *[const]:8 mask; } + +rotmask_SH: masksh, mbValue, shValue is MBL & MBH & SHL & SHH + [ mbValue= (MBH<<5)|MBL; + shValue= (SHH<<5)|SHL; + masksh = ((((shValue-mbValue)>>8) $and 1)*0xffffffffffffffff) $xor ((0x7fffffffffffffff >> shValue) $xor (0xffffffffffffffff >> mbValue)); + ] +{ + local start:4 = mbValue; + local stop:4 = 63-shValue; + mask_tmp:8 = (zext(start > stop) * 0xffffffffffffffff) ^ (0x7fffffffffffffff>>stop) ^ (0xffffffffffffffff>>start); + export *[const]:8 mask_tmp; +} + +rotmask_Z: mask, mbValue is MBL & MBH [mbValue= (MBH<<5)|MBL; mask = ~(0xffffffffffffffff >> (mbValue+1)); ] +{ mask_tmp:8 = ~(0xffffffffffffffff >> (mbValue+1)); export *[const]:8 mask_tmp; } + +@else +rotmask: mask is MBL & ME [ mask = ((((ME-MBL)>>8) $and 1)*0xffffffff) $xor (0x7fffffff>>ME) $xor (0xffffffff>>MBL); ] { export *[const]:4 mask; } +@endif + +DSIZE: "w" is L {} # L is a don't care bit in 32-bit languages although it should always be 0 +@ifdef BIT_64 # L can only be 1 when in 64 bit language +DSIZE: "d" is L=1 {} +@endif + +@ifdef BIT_64 +REG_A: is L=0 & A {tmp:8 = sext(A:4); export tmp; } +REG_A: is L=1 & A {export A; } +REG_B: is L=0 & B {tmp:8 = sext(B:4); export tmp; } +REG_B: is L=1 & B {export B; } +@else # L is a don't care bit in 32-bit languages although it should always be 0 +REG_A: is A { export A; } +REG_B: is B { export B; } +@endif + +@ifdef BIT_64 +UREG_A: is L=0 & A {tmp:8 = zext(A:4); export tmp; } +UREG_A: is L=1 & A {export A; } +UREG_B: is L=0 & B {tmp:8 = zext(B:4); export tmp; } +UREG_B: is L=1 & B {export B; } +@else # L is a don't care bit in 32-bit languages although it should always be 0 +UREG_A: is A { export A; } +UREG_B: is B { export B; } +@endif + +dPlusRaOrZeroAddress: SIMM(RA_OR_ZERO) is SIMM & RA_OR_ZERO { tmp:$(REGISTER_SIZE) = RA_OR_ZERO+SIMM; export tmp; } +dPlusRaAddress: SIMM(A) is SIMM & A {tmp:$(REGISTER_SIZE) = A+SIMM; export tmp; } + +dUI16PlusRAOrZeroAddress: val^"("^RA_OR_ZERO^")" is RA_OR_ZERO & UI_16_s8 [ val = UI_16_s8 << 3; ] { ea:$(REGISTER_SIZE) = RA_OR_ZERO + val; export ea; } + +@ifdef BIT_64 +dsPlusRaAddress: simm_ds(A) is SIMM_DS & A [simm_ds = SIMM_DS << 2;] {tmp:8 = simm_ds + A;export tmp;} +dsPlusRaOrZeroAddress: simm_ds(RA_OR_ZERO) is SIMM_DS & RA_OR_ZERO [simm_ds = SIMM_DS << 2;] {tmp:8 = simm_ds + RA_OR_ZERO;export tmp;} +@endif + + +FPSCR_CRFS: is CRFS=0 {tmp:1 = fp_fx<<3 | fp_fex<<2 | fp_vx<<1 | fp_ox; fp_fx=0; fp_ox=0; export tmp;} +FPSCR_CRFS: is CRFS=1 {tmp:1 = fp_ux<<3 | fp_zx<<2 | fp_xx<<1 | fp_vxsnan; fp_ux=0; fp_zx=0; fp_xx=0; fp_ux=0;export tmp;} +FPSCR_CRFS: is CRFS=2 {tmp:1 = fp_vxisi<<3 | fp_vxidi<<2 | fp_vxzdz<<1 | fp_vximz; fp_vxisi=0; fp_vxidi=0; fp_vxzdz=0; fp_vximz=0; export tmp;} +FPSCR_CRFS: is CRFS=3 {tmp:1 = fp_vxvc<<3 | fp_fr<<2 | fp_fi<<1 | fp_c; fp_vxvc=0; export tmp;} +FPSCR_CRFS: is CRFS=4 {tmp:1 = fp_cc0<<3 | fp_cc1<<2 | fp_cc2<<1 | fp_cc3; export tmp;} +FPSCR_CRFS: is CRFS=5 {tmp:1 = fp_vxsoft<<2 | fp_vxsqrt<<1 | fp_vxcvi; fp_vxsoft=0; fp_vxsqrt=0; fp_vxcvi=0; export tmp;} +FPSCR_CRFS: is CRFS=6 {tmp:1 = fp_ve<<3 | fp_oe <<2 | fp_ue<<1 | fp_ze; export tmp;} +FPSCR_CRFS: is CRFS=7 {tmp:1 = fp_xe<<3 | fp_ni<<2 | fp_rn0<<1 | fp_rn1; export tmp;} + +CRM_CR: cr7 is CRM=1 & cr7 {tmp:4 = zext(cr7);export tmp;} +CRM_CR: cr6 is CRM=2 & cr6 {tmp:4 = zext(cr6) << 4;export tmp;} +CRM_CR: cr5 is CRM=4 & cr5 {tmp:4 = zext(cr5) << 8;export tmp;} +CRM_CR: cr4 is CRM=8 & cr4 {tmp:4 = zext(cr4) << 12;export tmp;} +CRM_CR: cr3 is CRM=16 & cr3 {tmp:4 = zext(cr3) << 16;export tmp;} +CRM_CR: cr2 is CRM=32 & cr2 {tmp:4 = zext(cr2) << 20;export tmp;} +CRM_CR: cr1 is CRM=64 & cr1 {tmp:4 = zext(cr1) << 24;export tmp;} +CRM_CR: cr0 is CRM=128 & cr0 {tmp:4 = zext(cr0) << 28;export tmp;} + + +################################################################ +# Instructions +################################################################ + + +@include "ppc_instructions.sinc" +@include "ppc_embedded.sinc" + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_embedded.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_embedded.sinc new file mode 100644 index 00000000..be433159 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_embedded.sinc @@ -0,0 +1,219 @@ +# these are identified as part of the PowerPC Embedded Architecture + +#dcba 0,r0 0x7c 00 05 ec +:dcba RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=758 & BIT_0=0 & RA_OR_ZERO +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + dataCacheBlockAllocate(ea); +} + +#dcbf 0,r0 0x7c 00 00 ac +:dcbf RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=86 & BIT_0=0 & RA_OR_ZERO +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + dataCacheBlockFlush(ea); +} + +#dcbi 0,r0 0x7c 00 03 ac +:dcbi RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=470 & BIT_0=0 & RA_OR_ZERO +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + dataCacheBlockInvalidate(ea); +} + +#dcbst 0,r0 0x7c 00 00 6c +:dcbst RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=54 & BIT_0=0 & RA_OR_ZERO +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + dataCacheBlockStore(ea); +} + +#dcbt 0,r0 0x7c 00 02 2c +:dcbt RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=278 & BIT_0=0 & RA_OR_ZERO +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + dataCacheBlockTouch(ea); +} + +#dcbtst 0,r0 0x7c 00 01 ec +:dcbtst RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=246 & BIT_0=0 & RA_OR_ZERO +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + dataCacheBlockTouchForStore(ea); +} + +#dcbz 0,r0 0x7c 00 07 ec +:dcbz RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=1014 & BIT_0=0 & RA_OR_ZERO +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + dataCacheBlockClearToZero(ea); +} + +@ifndef IS_ISA +# this is equilent to "mbar 0" +#eieio 0x7c 00 06 ac +:eieio is $(NOTVLE) & OP=31 & BITS_21_25=0 & BITS_16_20=0 & BITS_11_15=0 & XOP_1_10=854 & BIT_0=0 +{ + enforceInOrderExecutionIO(); +} +@endif + +#icbi r0,r0 0x7c 00 07 ac +:icbi RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=982 & BIT_0=0 & RA_OR_ZERO +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + instructionCacheBlockInvalidate(ea); +} + +#icbt 0,r0 0x7c 00 02 0c +:icbt BITS_21_24,RA_OR_ZERO,B is OP=31 & BIT_25=0 & BITS_21_24 & RA_OR_ZERO & B & XOP_1_10=22 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + instructionCacheBlockTouch(ea); +} + +#isync 0x4c 00 01 2c +:isync is $(NOTVLE) & OP=19 & BITS_21_25=0 & BITS_16_20=0 & BITS_11_15=0 & XOP_1_10=150 & BIT_0=0 +{ + instructionSynchronize(); +} + +#mfdcr r0,DCRN 0x7c 00 02 86 +:mfdcr D, DCRN is OP=31 & D & DCRN & XOP_1_10=323 & BIT_0=0 +{ + D = DCRN; +} + +#mfmsr r0 0x7c 00 00 a6 +:mfmsr D is OP=31 & D & BITS_11_20=0 & XOP_1_10=83 & BIT_0=0 +{ + D = MSR; +} + +#mfspr r0 0x7c 00 02 a6 +:mfspr D,SPRVAL is OP=31 & D & SPRVAL & XOP_1_10=339 & BIT_0=0 +{ + D = SPRVAL; +} + +#mftb r0,TBLr 0x7c 0c 42 e6 +:mftb D,TBLr is $(NOTVLE) & OP=31 & D & TBR=392 & TBLr & XOP_1_10=371 & BIT_0=0 +{ + D = TBLr; +} +#mftb r0,TBUr 0x7c 0d 42 e6 +:mftb D,TBUr is $(NOTVLE) & OP=31 & D & TBR=424 & TBUr & XOP_1_10=371 & BIT_0=0 +{ + D = TBUr; +} + +#mtdcr DCRN,r0 0x7c 00 03 86 +:mtdcr DCRN, D is OP=31 & D & DCRN & XOP_1_10=451 & BIT_0=0 +{ + DCRN = D; +} + +#mtmsr r0,0 0x7c 00 01 24 +:mtmsr S,0 is OP=31 & S & BITS_17_20=0 & MSR_L=0 & BITS_11_15=0 & XOP_1_10=146 & BIT_0=0 +{ + bit58:$(REGISTER_SIZE) = (S >> 5) & 1; #bit 58 + bit49:$(REGISTER_SIZE) = (S >> 14)& 1; #bit 49 + bit59:$(REGISTER_SIZE) = (S >> 4) & 1; #bit 59 +@ifdef BIT_64 + tmp:8 = S & 0x00000000ffff6fcf; #0b00000000000000000000000000000000 1111 1111 1111 1111 0110 1111 1100 1111 + tmp = tmp & ((bit58 | bit49) << 5); + tmp = tmp & ((bit59 | bit49) << 4); + MSR = MSR & 0xffffffff00009030 | tmp; +@else + tmp:4 = S & 0xffff6fcf; + tmp = tmp & ((bit58 | bit49) << 5); + tmp = tmp & ((bit59 | bit49) << 4); + MSR = MSR & 0x00009000 | tmp; +@endif +} + +#mtmsr r0,1 0x7c 01 01 24 +:mtmsr S,1 is OP=31 & S & BITS_17_20=0 & MSR_L=1 & BITS_11_15=0 & XOP_1_10=146 & BIT_0=0 +{ +@ifdef BIT_64 + mask:8 = 0x000000000000fffe; +@else + mask:4 = 0x0000fffe; +@endif + MSR = (MSR & ~mask) | (S & mask); +} + +#mtspr spr000,r0 0x7c 00 02 a6 +:mtspr SPRVAL,S is OP=31 & SPRVAL & S & XOP_1_10=467 & BIT_0=0 +{ + SPRVAL = S; +} + +:mtspr SPRVAL,S is OP=31 & BITS_11_20=0x100 & BITS_21_25=0 & SPRVAL & S & XOP_1_10=467 & BIT_0=0 + [ linkreg=1; globalset(inst_next,linkreg); ] +{ + SPRVAL = S; +} + +:mtspr SPRVAL,S is linkreg=1 & OP=31 & BITS_11_20=0x100 & BITS_21_25=0 & SPRVAL & S & XOP_1_10=467 & BIT_0=0 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + SPRVAL = S; +} + +:rfci is $(NOTVLE) & OP=19 & BITS_21_25=0 & BITS_16_20=0 & BITS_11_15=0 & XOP_1_10=51 & BIT_0=0 +{ + MSR = returnFromCriticalInterrupt(MSR, CSRR1); + local ra = CSRR0; + return[ra]; + +} + +#rfi 0x4c 00 00 64 +:rfi is $(NOTVLE) & OP=19 & BITS_11_25=0 & XOP_1_10=50 & BIT_0=0 +{ + MSR = returnFromInterrupt(MSR, SRR1); + local ra = SRR0; + return[ra]; +} + + +#tlbre 0x7c 00 07 64 +:tlbre is OP=31 & XOP_1_10=946 +{ + TLBRead(); +} + +#tlbsx r0,r0,r0 0x7c 00 07 24 +:tlbsx D,RA_OR_ZERO,B is OP=31 & D & B & XOP_1_10=914 & RA_OR_ZERO & Rc=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + D = TLBSearchIndexed(D,ea); +} + +#tlbsx. r0,r0,r0 0x7c 00 07 25 +:tlbsx. D,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & D & B & XOP_1_10=914 & RA_OR_ZERO & Rc=1 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + D = TLBSearchIndexed(D,ea); + cr0flags(D); +} + +#tlbwe 0x7c 00 07 a4 +:tlbwe D,A,B_BITS is OP=31 & D & A & B_BITS & XOP_1_10=978 +{ + D = TLBWrite(D,A,B_BITS:1); +} + + +#wrtee r0 0x7c 00 01 06 +:wrtee S is OP=31 & S & XOP_1_10=131 +{ + WriteExternalEnable(S); +} + +#wrteei 0 0x7c 00 01 46 +:wrteei BIT_15 is OP=31 & BIT_15 & XOP_1_10=163 +{ + WriteExternalEnableImmediate(BIT_15:1); +} diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_instructions.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_instructions.sinc new file mode 100644 index 00000000..a3fa7a8f --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_instructions.sinc @@ -0,0 +1,4521 @@ +#=========================================================== +# ADD +#=========================================================== + +#add r1,r2,r3 0x7c 22 1a 14 +:add D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=266 & Rc=0 +{ + D = A + B; +} + +#add. r1,r2,r3 0x7c 22 1a 15 +:add. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=266 & Rc=1 +{ + D = A + B; + cr0flags(D); +} + +#addo r1,r2,r3 0x7c 22 1e 14 +:addo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=266 & Rc=0 +{ + addOverflow(A,B); + D = A + B; +} + +#addo. r1,r2,r3 0x7c 22 1e 15 +:addo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=266 & Rc=1 +{ + addOverflow(A,B); + D = A + B; + cr0flags(D); +} + +#addc r1,r2,r3 0x7c 22 18 14 +:addc D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=10 & Rc=0 +{ + xer_ca = carry(A,B); + D = A + B; +} + +#addc. r1,r2,r3 0x7c 22 18 15 +:addc. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=10 & Rc=1 +{ + xer_ca = carry(A,B); + D = A + B; + cr0flags(D); +} + +#addco r1,r2,r3 0x7c 22 1c 14 +:addco D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=10 & Rc=0 +{ + xer_ca = carry(A,B); + addOverflow( A, B ); + D = A + B; +} + +#addco. r1,r2,r3 0x7c 22 1c 15 +:addco. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=10 & Rc=1 +{ + xer_ca = carry(A,B); + addOverflow( A, B ); + D = A + B; + cr0flags(D); +} + +#adde r1,r2,r3 0x7c 22 19 14 +:adde D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=138 & Rc=0 +{ + zextCarry:$(REGISTER_SIZE) = zext(xer_ca); + addExtendedCarry(A,B); + D=A + B + zextCarry; +} + +#adde. r1,r2,r3 0x7c 22 19 15 +:adde. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=138 & Rc=1 +{ + zextCarry:$(REGISTER_SIZE) = zext(xer_ca); + addExtendedCarry(A,B); + D=A + B + zextCarry; + cr0flags(D); +} + +#addeo r1,r2,r3 0x7c 22 1d 14 +:addeo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=138 & Rc=0 +{ + zextCarry:$(REGISTER_SIZE) = zext(xer_ca); + addExtendedOverflow(A,B); + addExtendedCarry(A,B); + D=A + B + zextCarry; +} + +#addeo. r1,r2,r3 0x7c 22 1d 15 +:addeo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=138 & Rc=1 +{ + zextCarry:$(REGISTER_SIZE) = zext(xer_ca); + addExtendedOverflow(A,B); + addExtendedCarry(A,B); + D=A + B + zextCarry; + cr0flags(D); +} + +#addi r0,0x7fff 0x38 00 7f ff +#addi r0,1 0x38 01 00 01 +:addi D,A,SIMM is $(NOTVLE) & OP=14 & D & A & SIMM_SIGN=0 & SIMM +{ + D = A + SIMM; +} + +#li r0,1 0x38 00 00 01 # addi simplified mnemonic +:li D,SIMM is $(NOTVLE) & OP=14 & D & A=0 & SIMM_SIGN=1 & SIMM +{ + D = SIMM; +} + +#li r0,-0x1 0x38 00 FF FF # addi simplified mnemonic +:li D,SIMM is $(NOTVLE) & OP=14 & D & A=0 & SIMM_SIGN=0 & SIMM +{ + D = SIMM; +} + +#subi r0,r1,1 0x38 01 FF FF # addi simplified mnemonic +:subi D,A,tmp is $(NOTVLE) & OP=14 & D & A & SIMM_SIGN=1 & SIMM [ tmp = -SIMM; ] +{ + D = A + SIMM; +} + +#addic r0,r0,2 0x30 00 00 02 +:addic D,A,SIMM is $(NOTVLE) & OP=12 & D & A & SIMM_SIGN=0 & SIMM +{ + xer_ca=carry(A,SIMM); + D = A + SIMM; +} + +#subic r0,r0,2 0x30 00 FF FE # addi simplified mnemonic +:subic D,A,tmp is $(NOTVLE) & OP=12 & D & A & SIMM_SIGN=1 & SIMM [ tmp = -SIMM; ] +{ + xer_ca=carry(A,SIMM); + D = A + SIMM; +} + +#addic. r0,r0,5 0x34 00 00 05 +:addic. D,A,SIMM is $(NOTVLE) & OP=13 & D & A & SIMM_SIGN=0 & SIMM +{ + xer_ca = carry(A,SIMM); + D = A + SIMM; + cr0flags( D ); +} + +#subic. r0,r0,1 0x34 00 FF FF # addic. simplified mnemonic +:subic. D,A,tmp is $(NOTVLE) & OP=13 & D & A & SIMM_SIGN=1 & SIMM [ tmp = -SIMM; ] +{ + xer_ca=carry(A,SIMM); + D = A + SIMM; + cr0flags( D ); +} + +#addis r0,r1,1 0x3c 01 00 01 +:addis D,A,SIMM is $(NOTVLE) & OP=15 & D & A & SIMM_SIGN=0 & SIMM +{ + D = A + (SIMM:$(REGISTER_SIZE) << 16); +} + +#lis r0,1 0x3c 00 00 01 # addis simplified mnemonic +:lis D,SIMM is $(NOTVLE) & OP=15 & D & A=0 & SIMM_SIGN=1 & SIMM +{ + D = SIMM:$(REGISTER_SIZE) << 16; +} + +#lis r0,-1 0x3c 00 FF FF # addis simplified mnemonic +:lis D,SIMM is $(NOTVLE) & OP=15 & D & A=0 & SIMM_SIGN=0 & SIMM +{ + D = SIMM:$(REGISTER_SIZE) << 16; +} + +#subis r0,r1,1 0x3c 01 FF FF # addis simplified mnemonic +:subis D,A,tmp is $(NOTVLE) & OP=15 & D & A & SIMM_SIGN=1 & SIMM [ tmp = -SIMM; ] +{ + D = A + (SIMM:$(REGISTER_SIZE) << 16); +} + +#addme r0,r0 0x7c 00 01 D4 +:addme D,A is OP=31 & D & A & BITS_11_15=0 & OE=0 & XOP_1_9=234 & Rc=0 +{ + local zextCarry:$(REGISTER_SIZE) = zext(xer_ca); + local BVal:$(REGISTER_SIZE) = ~(0); + addExtendedCarry(A,BVal); + D=A + BVal + zextCarry; +} + +#addme. r0,r0 0x7c 00 01 D5 +:addme. D,A is OP=31 & D & A & BITS_11_15=0 & OE=0 & XOP_1_9=234 & Rc=1 +{ + local zextCarry:$(REGISTER_SIZE) = zext(xer_ca); + local BVal:$(REGISTER_SIZE) = ~(0); + addExtendedCarry(A,BVal); + D=A + BVal + zextCarry; + cr0flags(D); +} + +#addmeo r0,r0 0x7C 00 05 D4 +:addmeo D,A is OP=31 & D & A & BITS_11_15=0 & OE=1 & XOP_1_9=234 & Rc=0 +{ + local zextCarry:$(REGISTER_SIZE) = zext(xer_ca); + local BVal:$(REGISTER_SIZE) = ~(0); + addExtendedOverflow(A,BVal); + addExtendedCarry(A,BVal); + D=A + BVal + zextCarry; +} + +#addmeo. r0,r0 0x7C 00 05 D5 +:addmeo. D,A is OP=31 & D & A & BITS_11_15=0 & OE=1 & XOP_1_9=234 & Rc=1 +{ + local zextCarry:$(REGISTER_SIZE) = zext(xer_ca); + local BVal:$(REGISTER_SIZE) = ~(0); + addExtendedOverflow(A,BVal); + addExtendedCarry(A,BVal); + D=A + BVal + zextCarry; + cr0flags(D); +} + +#addze r0,r0 0x7C 00 01 94 +:addze D,A is OP=31 & D & A & BITS_11_15=0 & OE=0 & XOP_1_9=202 & Rc=0 +{ + zextedCarry:$(REGISTER_SIZE) = zext( xer_ca ); + xer_ca = carry(A,zextedCarry); + D = A + zextedCarry; +} + +#addze. r0,r0 0x7C 00 01 95 +:addze. D,A is OP=31 & D & A & BITS_11_15=0 & OE=0 & XOP_1_9=202 & Rc=1 +{ + zextedCarry:$(REGISTER_SIZE) = zext( xer_ca ); + xer_ca=carry(A,zextedCarry); + D = A + zextedCarry; + cr0flags( D ); +} + +#addzeo r0,r0 0x7C 00 05 94 +:addzeo D,A is OP=31 & D & A & BITS_11_15=0 & OE=1 & XOP_1_9=202 & Rc=0 +{ + zextedCarry:$(REGISTER_SIZE) = zext( xer_ca ); + xer_ca=carry(A,zextedCarry); + addOverflow(A,zextedCarry); + D = A + zextedCarry; +} + +#addzeo. r0,r0 0x7C 00 05 95 +:addzeo. D,A is OP=31 & D & A & BITS_11_15=0 & OE=1 & XOP_1_9=202 & Rc=1 +{ + zextedCarry:$(REGISTER_SIZE) = zext( xer_ca ); + xer_ca=carry(A,zextedCarry); + addOverflow(A,zextedCarry); + D = A + zextedCarry; + cr0flags( D ); +} + +#=========================================================== +# AND +#=========================================================== + +#and r0,r0,r0 0x7C 00 00 38 +:and A,S,B is OP=31 & S & A & B & XOP_1_10=28 & Rc=0 +{ + A = S & B; +} + +#and. r0,r0,r0 0x7C 00 00 39 +:and. A,S,B is OP=31 & S & A & B & XOP_1_10=28 & Rc=1 +{ + A = S & B; + cr0flags( A ); +} + +#andc r0,r0,r0 0x7C 00 00 78 +:andc A,S,B is OP=31 & S & A & B & XOP_1_10=60 & Rc=0 +{ + A = S & ~B; +} + +#andc. r0,r0,r0 0x7C 00 00 79 +:andc. A,S,B is OP=31 & S & A & B & XOP_1_10=60 & Rc=1 +{ + A = S & ~B; + cr0flags( A ); +} + +#andi. r0,r0,0xffff 0x70 00 ff ff +:andi. A,S,UIMM is $(NOTVLE) & OP=28 & S & A & UIMM +{ + A = S & UIMM:$(REGISTER_SIZE); + cr0flags( A ); +} + +#andis. r0,r0,1 0x74 00 00 01 +:andis. A,S,UIMM is $(NOTVLE) & OP=29 & A & S & UIMM +{ + A = S & (UIMM:$(REGISTER_SIZE) << 16); + cr0flags( A ); +} + +#=========================================================== +# Branch (op=18) +#=========================================================== + +#b 1008 0x48 00 00 08 (assuming a starting address of 1000) +#ba LAB_00000158 0x48 00 01 5a +:b^REL_ABS addressLI is $(NOTVLE) & OP=18 & REL_ABS & addressLI & LK=0 +{ + goto addressLI; +} + +:b^REL_ABS addressLI is linkreg=1 & OP=18 & REL_ABS & addressLI & LK=0 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + # don't do this anymore, detect another way + # call addressLI; + # return [LR]; + goto addressLI; +} + +#bl 0x48 00 00 09 +#bla 0x48 00 10 0f +:bl^REL_ABS addressLI is $(NOTVLE) & OP=18 & REL_ABS & addressLI & LK=1 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + r2Save = r2; # Save r2 (needed for branch to ppc64 call stub) + LR = inst_next; + call addressLI; +} + +# special case when branch is to fall-through instruction, just loading the link register +#bl 0x48 00 00 05 +:bl addressLI is $(NOTVLE) & OP=18 & REL_ABS & AA=0 & addressLI & LK=1 & LI=1 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + LR = inst_next; + goto addressLI; +} + +#=========================================================== +# Branch Conditional (op=16) +#=========================================================== + +#b sameAddr 0x42 80 00 00 +#ba LAB_0000 0x42 80 00 02 +:b^REL_ABS addressBD is $(NOTVLE) & OP=16 & addressBD & REL_ABS & BO_0=1 & BO_2=1 & LK=0 +{ + goto addressBD; +} + +:b^REL_ABS addressBD is linkreg=1 & OP=16 & addressBD & REL_ABS & BO_0=1 & BO_2=1 & LK=0 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + # don't do this anymore, detect another way + # call addressBD; + # return [LR]; + goto addressBD; +} + +#bl LAB_0000 0x42 80 00 01 +#bla LAB_0000 0x42 80 00 03 +:bl^REL_ABS addressBD is $(NOTVLE) & OP=16 & addressBD & REL_ABS & BO_0=1 & BO_2=1 & LK=1 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + LR = inst_next; + call addressBD; +} + +# special case when branch is to fall-through instruction, just loading the link register +#bl (Load LR) +:bl addressBD is $(NOTVLE) & OP=16 & addressBD & REL_ABS & BO_0=1 & BO_2=1 & BD=1 & LK=1 +{ + LR = inst_next; + goto addressBD; +} + + + +#blt LAB_0000 0x41 80 00 00 +:b^CC^REL_ABS addressBD is $(NOTVLE) & OP=16 & CC & addressBD & BO_0=0 & BO_2=1 & BI_CR= 0 & + REL_ABS & LK=0 + [ linkreg=0; globalset(inst_start,linkreg); ] # affects both flows, but not at this instruction +{ + if (CC) goto addressBD; +} +## do a special linkreg setting only if linkreg is set, since this happens all over the code +:b^CC^REL_ABS addressBD is linkreg=1 & OP=16 & CC & addressBD & BO_0=0 & BO_2=1 & BI_CR= 0 & + REL_ABS & LK=0 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (CC) goto addressBD; +} + +#bltl LAB_0000 0x41 80 00 01 +:b^CC^"l"^REL_ABS addressBD is $(NOTVLE) & OP=16 & CC & addressBD & BO_0=0 & BO_2=1 & BI_CR= 0 & + REL_ABS & LK=1 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CC) goto inst_next; + LR = inst_next; + call addressBD; +} + +#bne cr2,LAB_xxxx 0x40 8a 00 00 +:b^CC^REL_ABS BI_CR,addressBD is $(NOTVLE) & OP=16 & CC & BI_CR & addressBD & BO_0=0 & BO_2=1 & + REL_ABS & LK=0 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (CC) goto addressBD; +} + +#bnel cr2,LAB_xxxx 0x40 8a 00 01 +:b^CC^"l"^REL_ABS BI_CR,addressBD is $(NOTVLE) & OP=16 & CC & BI_CR & addressBD & BO_0=0 & BO_2=1 & + REL_ABS & LK=1 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CC) goto inst_next; + LR = inst_next; + call addressBD; +} + +#bdnz LAB_0000 0x42 00 00 00 +:bd^CTR_DEC^REL_ABS addressBD is $(NOTVLE) & OP=16 & CTR_DEC & REL_ABS & addressBD & BO_0=1 & BO_2=0 & LK=0 +{ + if (CTR_DEC) goto addressBD; +} + +#bdnzl FUN_0xxx 0x42 00 00 01 +#bdzla FUN_0000 0x42 40 00 03 +:bd^CTR_DEC^"l"^REL_ABS addressBD is $(NOTVLE) & OP=16 & CTR_DEC & REL_ABS & addressBD & BO_0=1 & BO_2=0 & LK=1 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CTR_DEC) goto inst_next; + LR = inst_next; + call addressBD; +} + +#bdnzf lt,LAB_0000 0x40 00 00 00 +#bdnzf 4*cr2+eq,LAB_0000 0x40 0a 00 00 +:bd^CTR_DEC^CC_TF^REL_ABS CC_OP,addressBD is $(NOTVLE) & OP=16 & CC_TF & REL_ABS & CTR_DEC & CC_OP & addressBD & BO_0=0 & BO_2=0 & LK=0 +{ + if (CTR_DEC && CC_OP) goto addressBD; +} + +#bdzfl lt,FUN_0000 0x40 00 00 01 +#bdnzfl 4*cr2+eq,FUN_0000 0x40 0a 00 01 +:bd^CTR_DEC^CC_TF^"l"^REL_ABS CC_OP,addressBD is $(NOTVLE) & OP=16 & CC_TF & CTR_DEC & REL_ABS & CC_OP & addressBD & BO_0=0 & BO_2=0 & LK=1 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!(CTR_DEC && CC_OP)) goto inst_next; + LR = inst_next; + call addressBD; +} + + +#=========================================================== +# Branch Conditional CTR(op=19, xop=528) +#=========================================================== + + +#bctr 0x4E 80 04 20 +:bctr is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH=0 & XOP_1_10=528 +{ + goto [CTR]; +} + +:bctr is $(NOTVLE) & linkreg=1 & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH=0 & XOP_1_10=528 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + # don't do this anymore, detect another way + # call [CTR]; + # return [LR]; + goto [CTR]; +} + +:bctr BH is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH & XOP_1_10=528 +{ + goto [CTR]; +} + +#bctrl 0x4e 80 04 21 +:bctrl is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=1 & BITS_13_15=0 & BH=0 & XOP_1_10=528 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + LR = inst_next; + call [CTR]; +} +:bctrl BH is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=1 & BITS_13_15=0 & BH & XOP_1_10=528 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + LR = inst_next; + call [CTR]; +} + +#bgectr 0x4c 80 04 20 +:b^CC^"ctr" is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=528 +{ + if (!CC) goto inst_next; + goto [CTR]; +} +:b^CC^"ctr" BH is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH & BH_BITS!=0 & LK=0 & BITS_13_15=0 & XOP_1_10=528 +{ + if (!CC) goto inst_next; + goto [CTR]; +} + +#bgectrl 0x4c 80 04 21 +:b^CC^"ctrl" is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH=0 & LK=1 & BITS_13_15=0 & XOP_1_10=528 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CC) goto inst_next; + LR = inst_next; + call [CTR]; +} +:b^CC^"ctrl" BH is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH & BH_BITS!=0 & LK=1 & BITS_13_15=0 & XOP_1_10=528 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CC) goto inst_next; + LR = inst_next; + call [CTR]; +} + +#bgectr cr3 0x4c 8c 04 20 +:b^CC^"ctr" BI_CR is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=528 +{ + if (!CC) goto inst_next; + goto [CTR]; +} + +#bnectr cr2,#0x3 0x4c 8c 1c 20 +:b^CC^"ctr" BI_CR,BH is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH & LK=0 & BITS_13_15=0 & XOP_1_10=528 +{ + if (!CC) goto inst_next; + goto [CTR]; +} + +#bgectrl cr2,LAB_xxxx 0x4c 8c 04 21 +:b^CC^"ctrl" BI_CR is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH=0 & LK=1 & BITS_13_15=0 & XOP_1_10=528 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CC) goto inst_next; + LR = inst_next; + call [CTR]; +} + +#bnectr cr2,#0x3 0x4c 8c 1c 21 +:b^CC^"ctrl" BI_CR,BH is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH & LK=1 & BITS_13_15=0 & XOP_1_10=528 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CC) goto inst_next; + LR = inst_next; + call [CTR]; +} + +#=========================================================== +# Branch Conditional to Link Register (op=19, XOP=16) +#=========================================================== + +#bclr 0x4E 80 00 20 +:blr is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH=0 & XOP_1_10=16 +{ + return [LR]; +} +:blr BH is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH & XOP_1_10=16 +{ + goto [LR]; +} + +#blrl 0x4e 80 00 21 +:blrl is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=1 & BITS_13_15=0 & BH=0 & XOP_1_10=16 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + tmp:$(REGISTER_SIZE) = LR; + LR = inst_next; + call [tmp]; +} +:blrl BH is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=1 & BITS_13_15=0 & BH & XOP_1_10=16 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + tmp:$(REGISTER_SIZE) = LR; + LR = inst_next; + call [tmp]; +} + +#bgelr 0x4c 80 00 20 +:b^CC^"lr" is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR=0 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CC) goto inst_next; + return [LR]; +} +:b^CC^"lr" BH is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR=0 & BH & BH_BITS!=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CC) goto inst_next; + goto [LR]; +} + +#bgelrl 0x4c 80 00 21 +:b^CC^"lrl" is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR=0 & BH=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CC) goto inst_next; + tmp:$(REGISTER_SIZE) = LR; + LR = inst_next; + call [tmp]; +} +:b^CC^"lrl" BH is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR=0 & BH & BH_BITS!=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CC) goto inst_next; + tmp:$(REGISTER_SIZE) = LR; + LR = inst_next; + call [tmp]; +} + +#bgelr cr2 0x4c 88 00 20 +:b^CC^"lr" BI_CR is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CC) goto inst_next; + return [LR]; +} + +#bnelr cr2,#0x3 0x4c 8c 18 20 +:b^CC^"lr" BI_CR,BH is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH & BH_BITS!=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CC) goto inst_next; + goto [LR]; +} + +#bgelrl cr3 0x4c 8c 00 21 +:b^CC^"lrl" BI_CR is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CC) goto inst_next; + tmp:$(REGISTER_SIZE) = LR; + LR = inst_next; + call [tmp]; +} + +#bnelr cr2,#0x3 0x4c 8c 18 21 +:b^CC^"lrl" BI_CR,BH is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH & LK=1 & BITS_13_15=0 & XOP_1_10=16 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CC) goto inst_next; + tmp:$(REGISTER_SIZE) = LR; + LR = inst_next; + call [tmp]; +} + +###### + +#bdnzlr 0x4e 00 00 20 +:bd^CTR_DEC^"lr" is $(NOTVLE) & OP=19 & BH=0 & CTR_DEC & BO_0=1 & BO_2=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CTR_DEC) goto inst_next; + goto [LR]; +} +:bd^CTR_DEC^"lr" BH is $(NOTVLE) & OP=19 & BH & CTR_DEC & BO_0=1 & BO_2=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CTR_DEC) goto inst_next; + goto [LR]; +} + +#bdnzlrl 0x4e 00 00 21 +:bd^CTR_DEC^"lrl" is $(NOTVLE) & OP=19 & CTR_DEC & BH=0 & BO_0=1 & BO_2=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CTR_DEC) goto inst_next; + tmp:$(REGISTER_SIZE) = LR; + LR = inst_next; + call [tmp]; +} +:bd^CTR_DEC^"lrl" BH is $(NOTVLE) & OP=19 & CTR_DEC & BH & BO_0=1 & BO_2=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CTR_DEC) goto inst_next; + tmp:$(REGISTER_SIZE) = LR; + LR = inst_next; + call [tmp]; +} + +#bdnzflr lt 0x4c 00 00 20 +#bdnzflr 4*cr2+eq 0x4c 0a 00 20 +:bd^CTR_DEC^CC_TF^"lr" CC_OP is $(NOTVLE) & OP=19 & CC_TF & CTR_DEC & CC_OP & BO_0=0 & BO_2=0 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!(CTR_DEC && CC_OP)) goto inst_next; + goto [LR]; +} + +#bdnzflr ge 0x4c 00 18 20 +#bdnzflr 4*cr2+eq 0x4c 0a 18 20 +:bd^CTR_DEC^CC_TF^"lr" CC_OP,BH is $(NOTVLE) & OP=19 & CC_TF & CTR_DEC & CC_OP & BO_0=0 & BO_2=0 & BH & LK=0 & BITS_13_15=0 & XOP_1_10=16 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!(CTR_DEC && CC_OP)) goto inst_next; + goto [LR]; +} + +#bdzflrl lt 0x4c 00 00 21 +#bdnzflrl 4*cr2+eq 0x4c 0a 00 21 +:bd^CTR_DEC^CC_TF^"lrl" CC_OP is $(NOTVLE) & OP=19 & CC_TF & CTR_DEC & CC_OP & BH=0 & BO_0=0 & BO_2=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!(CTR_DEC && CC_OP)) goto inst_next; + tmp:$(REGISTER_SIZE) = LR; + LR = inst_next; + call [tmp]; +} + +#bdzflrl lt 0x4c 00 18 21 +#bdnzflrl 4*cr2+eq 0x4c 0a 18 21 +:bd^CTR_DEC^CC_TF^"lrl" CC_OP,BH is $(NOTVLE) & OP=19 & CC_TF & CTR_DEC & CC_OP & BH & BO_0=0 & BO_2=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!(CTR_DEC && CC_OP)) goto inst_next; + tmp:$(REGISTER_SIZE) = LR; + LR = inst_next; + call [tmp]; +} + + +#=========================================================== +# CMP +#=========================================================== + +#cmpw r0,r1 0x7c 00 08 00 +#cmpd r0,r1 0x7c 20 08 00 (64 bit mode) +:cmp^DSIZE A,B is OP=31 & CRFD=0 & BIT_22=0 & DSIZE & A & B & REG_A & REG_B & XOP_1_10=0 & BIT_0=0 +{ + tmpA:$(REGISTER_SIZE) = REG_A; + tmpB:$(REGISTER_SIZE) = REG_B; + cr0 = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); + +} + +#cmpw cr2,r0,r1 0x7d 00 08 00 +#cmpd cr2,r0,r1 0x7d 20 08 00 (64 bit mode) +:cmp^DSIZE CRFD,A,B is OP=31 & CRFD & BIT_22=0 & DSIZE & A & B & REG_A & REG_B & XOP_1_10=0 & BIT_0=0 +{ + tmpA:$(REGISTER_SIZE) = REG_A; + tmpB:$(REGISTER_SIZE) = REG_B; + CRFD = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); +} + +############################### +#cmpwi r0,0x00 0x2c 00 00 00 +#cmpdi r0,0x00 0x2c 20 00 00 (64 bit mode) +:cmp^DSIZE^"i" A,SIMM is $(NOTVLE) & OP=11 & CRFD=0 & BIT_22=0 & DSIZE & A & REG_A & SIMM +{ + tmpA:$(REGISTER_SIZE) = REG_A; + tmpB:$(REGISTER_SIZE) = SIMM; + cr0 = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); + +} + +#cmpwi cr2,r0,0x00 0x2d 00 00 00 +#cmpwi cr2,r0,0x00 0x2d 20 00 00 (64 bit mode) +:cmp^DSIZE^"i" CRFD,A,SIMM is $(NOTVLE) & OP=11 & CRFD & BIT_22=0 & DSIZE & A & B & REG_A & SIMM +{ + tmpA:$(REGISTER_SIZE) = REG_A; + tmpB:$(REGISTER_SIZE) = SIMM; + CRFD = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); +} + +############################ +#cmplw r0,r1 0x7c 00 08 40 +#cmpld r0,r1 0x7c 20 08 40 (64 bit mode) +:cmpl^DSIZE A,B is OP=31 & CRFD=0 & BIT_22=0 & DSIZE & A & B & UREG_A & UREG_B & XOP_1_10=32 & BIT_0=0 +{ + tmpA:$(REGISTER_SIZE) = UREG_A; + tmpB:$(REGISTER_SIZE) = UREG_B; + cr0 = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); + +} + +#cmplw cr2,r0,r1 0x7d 00 08 40 +#cmplw cr2,r0,r1 0x7d 20 08 40 (64 bit mode) +:cmpl^DSIZE CRFD,A,B is OP=31 & CRFD & BIT_22=0 & DSIZE & A & B & UREG_A & UREG_B & XOP_1_10=32 & BIT_0=0 +{ + tmpA:$(REGISTER_SIZE) = UREG_A; + tmpB:$(REGISTER_SIZE) = UREG_B; + CRFD = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); +} + +############################### +#cmplwi r0,0x00 0x28 00 00 00 +#cmpldi r0,0x00 0x28 20 00 00 (64 bit mode) +:cmpl^DSIZE^"i" A,UIMM is $(NOTVLE) & OP=10 & CRFD=0 & BIT_22=0 & DSIZE & A & UREG_A & UIMM +{ + tmpA:$(REGISTER_SIZE) = UREG_A; + tmpB:$(REGISTER_SIZE) = UIMM; + cr0 = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); + +} + +#cmplwi cr2,r0,0x00 0x29 00 00 00 +#cmplwi cr2,r0,0x00 0x29 20 00 00 (64 bit mode) +:cmpl^DSIZE^"i" CRFD,A,UIMM is $(NOTVLE) & OP=10 & CRFD & BIT_22=0 & DSIZE & A & B & UREG_A & UIMM +{ + tmpA:$(REGISTER_SIZE) = UREG_A; + tmpB:$(REGISTER_SIZE) = UIMM; + CRFD = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); +} +#=========================================================== +# CNTLZx +#=========================================================== + +@ifdef BIT_64 +#cntlzd r0,r0 0x7c 00 00 74 +:cntlzd A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=58 & Rc=0 +{ + A = countLeadingZeros(S); +} + +#cntlzd. r0,r0 0x7c 00 00 75 +:cntlzd. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=58 & Rc=1 +{ + A = countLeadingZeros(S); + cr0flags(A); +} +@endif + +#cntlzw r0,r0 0x7c 00 00 34 +:cntlzw A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=26 & Rc=0 +{ + A = countLeadingZeros(S:4); +} + +#cntlzw. r0,r0 0x7c 00 00 35 +:cntlzw. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=26 & Rc=1 +{ + A = countLeadingZeros(S:4); + cr0flags(A); +} +#=========================================================== +# CRxxx +#=========================================================== +#crand lt,lt,lt 0x4c 00 02 02 +#crand 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 72 02 +:crand CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=257 & BIT_0=0 +{ + setCrBit(CR_D,CR_D_CC,CC_OP & CC_B_OP); +} + +#crandc lt,lt,lt 0x4c 00 01 02 +#crandc 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 71 02 +:crandc CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=129 & BIT_0=0 +{ + tmp1:1 = !CC_B_OP; + setCrBit(CR_D,CR_D_CC,CC_OP & tmp1); +} + +#creqv lt,lt,lt 0x4c 00 02 42 +#creqv 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 72 42 +:creqv CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=289 & BIT_0=0 +{ + setCrBit(CR_D,CR_D_CC,CC_B_OP == CC_OP); +} + +#crnand lt,lt,lt 0x4c 00 01 c2 +#crnand 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 71 c2 +:crnand CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=225 & BIT_0=0 +{ + setCrBit(CR_D,CR_D_CC,!(CC_B_OP & CC_OP)); +} + +#crnor lt,lt,lt 0x4c 00 00 42 +#crnor 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 70 42 +:crnor CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=33 & BIT_0=0 +{ + setCrBit(CR_D,CR_D_CC,!(CC_B_OP | CC_OP)); +} + +#cror lt,lt,lt 0x4c 00 03 82 +#cror 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 73 82 +:cror CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=449 & BIT_0=0 +{ + setCrBit(CR_D,CR_D_CC,(CC_B_OP | CC_OP)); +} + +#crorc lt,lt,lt 0x4c 00 03 42 +#crorc 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 73 42 +:crorc CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=417 & BIT_0=0 +{ + setCrBit(CR_D,CR_D_CC,((!CC_B_OP) | CC_OP)); +} + +#crxor lt,lt,lt 0x4c 00 01 82 +#crxor 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 71 82 +:crxor CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=193 & BIT_0=0 +{ + setCrBit(CR_D,CR_D_CC,(CC_B_OP ^ CC_OP)); +} + +@ifndef IS_ISA +# replace with dci command in ISA +#dccci 0,r0 0x7c 00 03 8c +:dccci RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=454 & BIT_0=0 & RA_OR_ZERO +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + dataCacheCongruenceClassInvalidate(ea); +} +@endif + +#=========================================================== +# DIVxx +#=========================================================== + +@ifdef BIT_64 +#divd r0,r0,r0 0x7c 00 03 d2 +:divd D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=489 & Rc=0 +{ + D = A s/ B; +} + +#divd. r0,r0,r0 0x7c 00 03 d3 +:divd. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=489 & Rc=1 +{ + D = A s/ B; + cr0flags(D); +} + +#divdo r0,r0,r0 0x7c 00 07 d2 +:divdo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=489 & Rc=0 +{ + divOverflow(A,B); + D = A s/ B; +} + +#divdo. r0,r0,r0 0x7c 00 07 d3 +:divdo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=489 & Rc=1 +{ + divOverflow(A,B); + D = A s/ B; + cr0flags(D); +} + +###################### +#divdu r0,r0,r0 0x7c 00 03 92 +:divdu D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=457 & Rc=0 +{ + D = A / B; +} + +#divdu. r0,r0,r0 0x7c 00 03 93 +:divdu. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=457 & Rc=1 +{ + D = A / B; + cr0flags(D); +} + +#divduo r0,r0,r0 0x7c 00 07 92 +:divduo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=457 & Rc=0 +{ + divZero(B); + D = A / B; +} + +#divduo. r0,r0,r0 0x7c 00 07 93 +:divduo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=457 & Rc=1 +{ + divZero(B); + D = A / B; + cr0flags(D); +} +@endif + +#############################3 +#divw r0,r0,r0 0x7c 00 03 d6 +:divw D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=491 & Rc=0 +{ +@ifdef BIT_64 + D = sext(A:4 s/ B:4); +@else + D = A s/ B; +@endif +} + +#divw. r0,r0,r0 0x7c 00 03 d7 +:divw. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=491 & Rc=1 +{ +@ifdef BIT_64 + divOverflow(A:4,B:4); + D = sext(A:4 s/ B:4); + cr0flags(D:4); +@else + divOverflow(A,B); + D = A s/ B; + cr0flags(D); +@endif +} + +#divwo r0,r0,r0 0x7c 00 07 d6 +:divwo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=491 & Rc=0 +{ +@ifdef BIT_64 + divOverflow(A:4,B:4); + D = sext(A:4 s/ B:4); +@else + divOverflow(A,B); + D = A s/ B; +@endif +} + +#divwo. r0,r0,r0 0x7c 00 07 d7 +:divwo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=491 & Rc=1 +{ +@ifdef BIT_64 + divOverflow(A:4,B:4); + D = sext(A:4 s/ B:4); + cr0flags(D:4); +@else + divOverflow(A,B); + D = A s/ B; + cr0flags(D); +@endif +} + +######################### +#divwu r0,r0,r0 0x7c 00 03 96 +:divwu D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=459 & Rc=0 +{ +@ifdef BIT_64 + D = zext(A:4) / zext(B:4); +@else + D = A / B; +@endif +} + +#divwu. r0,r0,r0 0x7c 00 03 97 +:divwu. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=459 & Rc=1 +{ +@ifdef BIT_64 + D = zext(A:4) / zext(B:4); + cr0flags(D:4); +@else + D = A / B; + cr0flags(D); +@endif +} + +#divwuo r0,r0,r0 0x7c 00 07 96 +:divwuo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=459 & Rc=0 +{ +@ifdef BIT_64 + divZero(B:4); + D = zext(A:4) / zext(B:4); +@else + divZero(B); + D = A / B; +@endif +} + +#divwuo. r0,r0,r0 0x7c 00 07 97 +:divwuo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=459 & Rc=1 +{ +@ifdef BIT_64 + divZero(B:4); + D = zext(A:4) / zext(B:4); + cr0flags(D:4); +@else + divZero(B); + D = A / B; + cr0flags(D); +@endif +} + +#=========================================================== +# ECxxx,EIxxx +#=========================================================== +#eciwx r0,r0,r0 0x7c 00 02 6c +:eciwx D,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & D & B & RA_OR_ZERO & XOP_1_10=310 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + D = externalControlIn(ea); +} + +#ecowx r0,r0,r0 0x7c 00 03 6c +:ecowx S,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & S & B & RA_OR_ZERO & XOP_1_10=438 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + externalControlOut(ea, S); +} + +#=========================================================== +# EQVx +#=========================================================== +#eqv r0,r0,r0 0x7c 00 02 38 +:eqv A,S,B is OP=31 & S & A & B & XOP_1_10=284 & Rc=0 +{ + A = ~(S ^ B); +} + +#eqv. r0,r0,r0 0x7c 00 02 39 +:eqv. A,S,B is OP=31 & S & A & B & XOP_1_10=284 & Rc=1 +{ + A = ~(S ^ B); + cr0flags(A); +} + +#=========================================================== +# EXTSBx +#=========================================================== +#extsb r0,r0 0x7c 00 07 74 +:extsb A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=954 & Rc=0 +{ + A = sext(S:1); +} + +#extsb. r0,r0 0x7c 00 07 75 +:extsb. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=954 & Rc=1 +{ + A = sext(S:1); + cr0flags(A); +} + +#=========================================================== +# EXTSHx +#=========================================================== +#extsh r0,r0 0x7c 00 07 34 +:extsh A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=922 & Rc=0 +{ + A = sext(S:2); +} + +#extsh. r0,r0 0x7c 00 07 35 +:extsh. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=922 & Rc=1 +{ + A = sext(S:2); + cr0flags(A); +} + +@ifdef BIT_64 +#extsw r0,r0 0x7c 00 07 b4 +:extsw A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=986 & Rc=0 +{ + A = sext(S:4); +} + +#extsw. r0,r0 0x7c 00 07 b5 +:extsw. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=986 & Rc=1 +{ + A = sext(S:4); + cr0flags(A); +} +@endif + +#=========================================================== +# FABSx +#=========================================================== +#fabs fr,f1r 0xfc 00 02 10 +:fabs fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=264 & Rc=0 +{ + fD = abs(fB); +} + +#fabs. fr0,fr1 0xfc 00 02 11 +:fabs. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=264 & Rc=1 +{ + fD = abs(fB); + cr1flags(); +} +#fadd fr0,fr0,fr0 0xfc 00 00 2a +:fadd fD,fA,fB is $(NOTVLE) & OP=63 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=21 & Rc=0 +{ + local tmpfA = fA; + local tmpfB = fB; + fD = fA f+ fB; + setFPAddFlags(tmpfA,tmpfB,fD); +} + +#fadd. fr0,fr0,fr0 0xfc 00 00 2b +:fadd. fD,fA,fB is $(NOTVLE) & OP=63 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=21 & Rc=1 +{ + local tmpfA = fA; + local tmpfB = fB; + fD = fA f+ fB; + setFPAddFlags(tmpfA,tmpfB,fD); + cr1flags(); +} + +#fadds fr0,fr0,fr0 0xec 00 00 2a +:fadds fD,fA,fB is $(NOTVLE) & OP=59 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=21 & Rc=0 +{ + local tmpfA = fA; + local tmpfB = fB; + tmp:4 = float2float(fA f+ fB); + fD = float2float(tmp); + setFPAddFlags(tmpfA,tmpfB,fD); + +} + +#fadds. fr0,fr0,fr0 0xec 00 00 2b +:fadds. fD,fA,fB is $(NOTVLE) & OP=59 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=21 & Rc=1 +{ + local tmpfA = fA; + local tmpfB = fB; + tmp:4 = float2float(fA f+ fB); + fD = float2float(tmp); + setFPAddFlags(tmpfA,tmpfB,fD); + cr1flags(); +} + +#=========================================================== +# FCFIDx +#=========================================================== +#fcfid fr0,fr0 0xfc 00 06 9c +:fcfid fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=846 & Rc=0 +{ + fD = int2float(fB); +} + +#fcfid. fr0,fr0 0xfc 00 06 9d +:fcfid. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=846 & Rc=1 +{ + fD = int2float(fB); + setFPRF(fD); +# fp_fr = intToFloatRoundedUp(fB); +# fp_fi = intToFloatInexact(fB); + fp_xx = fp_xx | fp_fi; + setSummaryFPSCR(); + cr1flags(); +} + +#=========================================================== +# FCMPO +#=========================================================== +#fcmpo fr0,fr0,fr0 0xfc 00 00 40 +:fcmpo CRFD,fA,fB is $(NOTVLE) & OP=63 & CRFD & BITS_21_22=0 & fA & fB & XOP_1_10=32 & BIT_0=0 +{ + tmp:1 = nan(fA) | nan(fB); + fp_cc0 = (fA f< fB); + fp_cc1 = (fA f> fB); + fp_cc2 = (fA f== fB); + CRFD = (fp_cc0 << 3) | (fp_cc1 << 2) | (fp_cc2 << 1) | tmp; +} +#fcmpu fr0,fr0,fr0 0xfc 00 00 00 +:fcmpu CRFD,fA,fB is $(NOTVLE) & OP=63 & CRFD & BITS_21_22=0 & fA & fB & XOP_1_10=0 & BIT_0=0 +{ + tmp:1 = nan(fA) | nan(fB); + fp_cc0 = (fA f< fB); + fp_cc1 = (fA f> fB); + fp_cc2 = (fA f== fB); + CRFD = (fp_cc0 << 3) | (fp_cc1 << 2) | (fp_cc2 << 1) | tmp; +} + +#fctid fr0,fr0 0xfc 00 06 5c +:fctid fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=814 & Rc=0 +{ +# fp_fr = floatToIntRoundedUp(fB); +# fp_fi = floatToIntInexact(fB); + fp_vxsnan = fp_vxsnan | nan(fB); +# fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB); +# fp_xx = fp_xx | fp_fi; + fD = trunc(fB); +} +#fctid. fr0,fr0 0xfc 00 06 5d +:fctid. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=814 & Rc=1 +{ +# fp_fr = floatToIntRoundedUp(fB); +# fp_fi = floatToIntInexact(fB); + fp_xx = fp_xx | fp_fi; +# fp_vxsnan = fp_vxsnan | nan(fB); +# fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB); + setSummaryFPSCR(); + cr1flags(); + fD = trunc(fB); +} +#fctidz fr0,fr0 0xfc 00 06 5e +:fctidz fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=815 & Rc=0 +{ + fp_fr = 0; +# fp_fi = floatToIntInexact(fB); + fp_vxsnan = fp_vxsnan | nan(fB); +# fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB); + fp_xx = fp_xx | fp_fi; + fD = trunc(fB); +} +#fctidz. fr0,fr0 0xfc 00 06 5f +:fctidz. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=815 & Rc=1 +{ + fp_fr = 0; +# fp_fi = floatToIntInexact(fB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(fB); +# fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB); + setSummaryFPSCR(); + cr1flags(); + fD = trunc(fB); +} + +#fctiw fr0,fr0 0xfc 00 00 1c +:fctiw fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=14 & Rc=0 +{ +# fp_fr = floatToIntRoundedUp(fB); +# fp_fi = floatToIntInexact(fB); + fp_vxsnan = fp_vxsnan | nan(fB); +# fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB); + fp_xx = fp_xx | fp_fi; + local intres:4; + intres = trunc(fB); + fD = sext(intres); +} +#fctiw. fr0,fr0 0xfc 00 00 1d +:fctiw. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=14 & Rc=1 +{ +# fp_fr = floatToIntRoundedUp(fB); +# fp_fi = floatToIntInexact(fB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(fB); +# fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB); + setSummaryFPSCR(); + cr1flags(); + local intres:4; + intres = trunc(fB); + fD = sext(intres); +} +#fctiwz fr0,fr0 0xfc 00 00 1e +:fctiwz fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=15 & Rc=0 +{ + fp_fr = 0; +# fp_fi = floatToIntInexact(fB); + fp_vxsnan = fp_vxsnan | nan(fB); +# fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB); + fp_xx = fp_xx | fp_fi; + local intres:4; + intres = trunc(fB); + fD = sext(intres); +} +#fctiwz. fr0,fr0 0xfc 00 00 1f +:fctiwz. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=15 & Rc=1 +{ + fp_fr = 0; +# fp_fi = floatToIntInexact(fB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(fB); +# fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB); + setSummaryFPSCR(); + cr1flags(); + local intres:4; + intres = trunc(fB); + fD = sext(intres); +} + +#fdiv fr0,fr0,fr0 0xfc 00 00 24 +:fdiv fD,fA,fB is $(NOTVLE) & OP=63 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=18 & Rc=0 +{ + local tmpfA = fA; + local tmpfB = fB; + fD = fA f/ fB; + setFPDivFlags(tmpfA,tmpfB,fD); +} +#fdiv. fr0,fr0,fr0 0xfc 00 00 25 +:fdiv. fD,fA,fB is $(NOTVLE) & OP=63 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=18 & Rc=1 +{ + local tmpfA = fA; + local tmpfB = fB; + fD = fA f/ fB; + setFPDivFlags(tmpfA,tmpfB,fD); + cr1flags(); +} + +#fdivs fr0,fr0,fr0 0xec 00 00 24 +:fdivs fD,fA,fB is $(NOTVLE) & OP=59 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=18 & Rc=0 +{ + local tmpfA = fA; + local tmpfB = fB; + tmp:4 = float2float(fA f/ fB); + fD = float2float(tmp); + setFPDivFlags(tmpfA,tmpfB,fD); +} +#fdivs. fr0,fr0,fr0 0xec 00 00 25 +:fdivs. fD,fA,fB is $(NOTVLE) & OP=59 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=18 & Rc=1 +{ + local tmpfA = fA; + local tmpfB = fB; + tmp:4 = float2float(fA f/ fB); + fD = float2float(tmp); + setFPDivFlags(tmpfA,tmpfB,fD); + cr1flags(); +} + +#fmadd fr0,fr0,fr0,fr0 0xfc 00 00 3a +:fmadd fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=29 & Rc=0 +{ + local tmpfA = fA; + local tmpfB = fB; + local tmpfC = fC; + tmp:8 = fA f* fC; + fD = tmp f+ fB; + setFPRF(fD); +# fp_fr = floatMaddRoundedUp(tmpfA, tmpfC, tmpfB); +# fp_fi = floatMaddInexact(tmpfA,tmpfC,tmpfB); +# fp_ox = fp_ox | floatMaddOverflow(tmpfA,tmpfC,tmpfB); +# fp_ux = fp_ux | floatMaddUnderflow(tmpfA,tmpfC,tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); +# fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, tmpfB); +# fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); + setSummaryFPSCR(); +} + +#fmadd. fr0,fr0,fr0,fr0 0xfc 00 00 3b +:fmadd. fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=29 & Rc=1 +{ + local tmpfA = fA; + local tmpfB = fB; + local tmpfC = fC; + tmp:8 = fA f* fC; + fD = tmp f+ fB; + setFPRF(fD); +# fp_fr = floatMaddRoundedUp(tmpfA, tmpfC, tmpfB); +# fp_fi = floatMaddInexact(tmpfA,tmpfC,tmpfB); +# fp_ox = fp_ox | floatMaddOverflow(tmpfA,tmpfC,tmpfB); +# fp_ux = fp_ux | floatMaddUnderflow(tmpfA,tmpfC,tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); +# fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, tmpfB); +# fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); + setSummaryFPSCR(); + cr1flags(); +} + +#fmadds fr0,fr0,fr0,fr0 0xec 00 00 3a +:fmadds fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=29 & Rc=0 +{ + local tmpfA = fA; + local tmpfB = fB; + local tmpfC = fC; + tmp:8 = fA f* fC; + tmp2:4 = float2float(tmp f+ fB); + fD = float2float(tmp2); + setFPRF(fD); +# fp_fr = floatMaddRoundedUp(tmpfA, tmpfC, tmpfB); +# fp_fi = floatMaddInexact(tmpfA,tmpfC,tmpfB); +# fp_ox = fp_ox | floatMaddOverflow(tmpfA,tmpfC,tmpfB); +# fp_ux = fp_ux | floatMaddUnderflow(tmpfA,tmpfC,tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); +# fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, tmpfB); +# fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tm[fC); + setSummaryFPSCR(); +} + +#fmadds. fr0,fr0,fr0,fr0 0xec 00 00 3b +:fmadds. fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=29 & Rc=1 +{ + local tmpfA = fA; + local tmpfB = fB; + local tmpfC = fC; + tmp:8 = fA f* fC; + tmp2:4 = float2float(tmp f+ fB); + fD = float2float(tmp2); + setFPRF(fD); +# fp_fr = floatMaddRoundedUp(tmpfA, tmpfC, tmpfB); +# fp_fi = floatMaddInexact(tmpfA,tmpfC,tmpfB); +# fp_ox = fp_ox | floatMaddOverflow(tmpfA,tmpfC,tmpfB); +# fp_ux = fp_ux | floatMaddUnderflow(tmpfA,tmpfC,tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); +# fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, tmpfB); +# fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); + setSummaryFPSCR(); + cr1flags(); +} + +#fmr fr0,fr0 0xfc 00 00 90 +:fmr fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=72 & Rc=0 +{ + fD = fB; +} +#fmr. fr0,fr0 0xfc 00 00 91 +:fmr. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=72 & Rc=1 +{ + fD = fB; + cr1flags(); +} +#fmsub fr0,fr0,fr0,fr0 0xfc 00 00 38 +:fmsub fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=28 & Rc=0 +{ + local tmpfA = fA; + local tmpfB = fB; + local tmpfC = fC; + tmp:8 = fA f* fC; + fD = tmp f- fB; + setFPRF(fD); +# fp_fr = floatMsubRoundedUp(tmpfA, tmpfC, tmpfB); +# fp_fi = floatMsubInexact(tmpfA,tmpfC,tmpfB); +# fp_ox = fp_ox | floatMsubOverflow(tmpfA,tmpfC,tmpfB); +# fp_ux = fp_ux | floatMsubUnderflow(tmpfA,tmpfC,tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); +# fp_vxisi = fp_vxisi | floatInfinitySub(tmp, tmpfB); +# fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); + setSummaryFPSCR(); +} + +#fmsub. fr0,fr0,fr0,fr0 0xfc 00 00 39 +:fmsub. fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=28 & Rc=1 +{ + local tmpfA = fA; + local tmpfB = fB; + local tmpfC = fC; + tmp:8 = fA f* fC; + tmp2:4 = float2float(tmp f- fB); + fD = float2float(tmp2); + setFPRF(fD); +# fp_fr = floatMsubRoundedUp(tmpfA, tmpfC, tmpfB); +# fp_fi = floatMsubInexact(tmpfA,tmpfC,tmpfB); +# fp_ox = fp_ox | floatMsubOverflow(tmpfA,tmpfC,tmpfB); +# fp_ux = fp_ux | floatMsubUnderflow(tmpfA,tmpfC,tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); +# fp_vxisi = fp_vxisi | floatInfinitySub(tmp, tmpfB); +# fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); + setSummaryFPSCR(); + cr1flags(); +} + +#fmsubs fr0,fr0,fr0,fr0 0xec 00 00 38 +:fmsubs fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=28 & Rc=0 +{ + local tmpfA = fA; + local tmpfB = fB; + local tmpfC = fC; + tmp:8 = fA f* fC; + tmp2:4 = float2float(tmp f- fB); + fD = float2float(tmp2); + setFPRF(fD); +# fp_fr = floatMsubRoundedUp(tmpfA, tmpfC, tmpfB); +# fp_fi = floatMsubInexact(tmpfA,tmpfC,tmpfB); +# fp_ox = fp_ox | floatMsubOverflow(tmpfA,tmpfC,tmpfB); +# fp_ux = fp_ux | floatMsubUnderflow(tmpfA,tmpfC,tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); +# fp_vxisi = fp_vxisi | floatInfinitySub(tmp, tmpfB); +# fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); + setSummaryFPSCR(); +} + +#fmsubs. fr0,fr0,fr0,fr0 0xfc 00 00 39 +:fmsubs. fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=28 & Rc=1 +{ + local tmpfA = fA; + local tmpfB = fB; + local tmpfC = fC; + tmp:8 = fA f* fC; + tmp2:4 = float2float(tmp f- fB); + fD = float2float(tmp2); + setFPRF(fD); +# fp_fr = floatMsubRoundedUp(tmpfA, tmpfC, tmpfB); +# fp_fi = floatMsubInexact(tmpfA,tmpfC,tmpfB); +# fp_ox = fp_ox | floatMsubOverflow(tmpfA,tmpfC,tmpfB); +# fp_ux = fp_ux | floatMsubUnderflow(tmpfA,tmpfC,tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); +# fp_vxisi = fp_vxisi | floatInfinitySub(tmp, tmpfB); +# fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); + setSummaryFPSCR(); + cr1flags(); +} + +#fmul fr0,fr0,fr0 0xfc 00 00 32 +:fmul fD,fA,fC is $(NOTVLE) & OP=63 & fD & fA & fC & BITS_11_15=0 & XOP_1_5=25 & Rc=0 +{ + local tmpfA = fA; + local tmpfC = fC; + fD = fA f* fC; + setFPMulFlags(tmpfA,tmpfC,fD); +} +#fmul. fr0,fr0,fr0 0xfc 00 00 33 +:fmul. fD,fA,fC is $(NOTVLE) & OP=63 & fD & fA & fC & BITS_11_15=0 & XOP_1_5=25 & Rc=1 +{ + local tmpfA = fA; + local tmpfC = fC; + fD = fA f* fC; + setFPMulFlags(tmpfA,tmpfC,fD); + cr1flags(); +} + +#fmuls fr0,fr0,fr0 0xec 00 00 32 +:fmuls fD,fA,fC is $(NOTVLE) & OP=59 & fD & fA & fC & BITS_11_15=0 & XOP_1_5=25 & Rc=0 +{ + local tmpfA = fA; + local tmpfC = fC; + tmp:4 = float2float(fA f* fC); + fD = float2float(tmp); + setFPMulFlags(tmpfA,tmpfC,fD); +} + +#fmuls. fr0,fr0,fr0 0xec 00 00 33 +:fmuls. fD,fA,fC is $(NOTVLE) & OP=59 & fD & fA & fC & BITS_11_15=0 & XOP_1_5=25 & Rc=1 +{ + local tmpfA = fA; + local tmpfC = fC; + tmp:4 = float2float(fA f* fC); + fD = float2float(tmp); + setFPMulFlags(tmpfA,tmpfC,fD); + cr1flags(); +} + +#fnabs fr0,fr0 0xfc 00 01 10 +:fnabs fD,fB is $(NOTVLE) & OP=63 & fD & fB & BITS_16_20=0 & XOP_1_10=136 & Rc=0 +{ + fD = fB | 0x8000000000000000; +} + +#fnabs. fr0,fr0 0xfc 00 01 11 +:fnabs. fD,fB is $(NOTVLE) & OP=63 & fD & fB & BITS_16_20=0 & XOP_1_10=136 & Rc=1 +{ + fD = fB | 0x8000000000000000; + cr1flags(); +} + +#fneg fr0,fr0 0xfc 00 00 50 +:fneg fD,fB is $(NOTVLE) & OP=63 & fD & fB & BITS_16_20=0 & XOP_1_10=40 & Rc=0 +{ + fD = f- fB; +} + +#fneg. fr0,fr0 0xfc 00 00 51 +:fneg. fD,fB is $(NOTVLE) & OP=63 & fD & fB & BITS_16_20=0 & XOP_1_10=40 & Rc=1 +{ + fD = f- fB; + cr1flags(); +} + +#fnmadd fr0,fr0,fr0,fr0 0xfc 00 00 3e +:fnmadd fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=31 & Rc=0 +{ + local tmpfA = fA; + local tmpfB = fB; + local tmpfC = fC; + tmp:8 = fA f* fC; + fD = f- (tmp f+ fB); + setFPRF(fD); +# fp_fr = floatMaddRoundedUp(tmpfA, tmpfC, tmpfB); +# fp_fi = floatMaddInexact(tmpfA,tmpfC,tmpfB); +# fp_ox = fp_ox | floatMaddOverflow(tmpfA,tmpfC,tmpfB); +# fp_ux = fp_ux | floatMaddUnderflow(tmpfA,tmpfC,tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); +# fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, tmpfB); +# fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); + setSummaryFPSCR(); +} + +#fnmadd. fr0,fr0,fr0,fr0 0xfc 00 00 3f +:fnmadd. fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=31 & Rc=1 +{ + local tmpfA = fA; + local tmpfB = fB; + local tmpfC = fC; + tmp:8 = fA f* fC; + fD = f- (tmp f+ fB); + setFPRF(fD); +# fp_fr = floatMaddRoundedUp(tmpfA, tmpfC, tmpfB); +# fp_fi = floatMaddInexact(tmpfA,tmpfC,tmpfB); +# fp_ox = fp_ox | floatMaddOverflow(tmpfA,tmpfC,tmpfB); +# fp_ux = fp_ux | floatMaddUnderflow(tmpfA,tmpfC,tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); +# fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, tmpfB); +# fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); + setSummaryFPSCR(); + cr1flags(); +} + +#fnmadds fr0,fr0,fr0,fr0 0xec 00 00 3e +:fnmadds fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=31 & Rc=0 +{ + local tmpfA = fA; + local tmpfB = fB; + local tmpfC = fC; + tmp:8 = fA f* fC; + tmp2:4 = float2float(tmp f+ fB); + fD = f- float2float(tmp2); + setFPRF(fD); +# fp_fr = floatMaddRoundedUp(tmpfA, tmpfC, tmpfB); +# fp_fi = floatMaddInexact(tmpfA,tmpfC,tmpfB); +# fp_ox = fp_ox | floatMaddOverflow(tmpfA,tmpfC,tmpfB); +# fp_ux = fp_ux | floatMaddUnderflow(tmpfA,tmpfC,tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); +# fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, tmpfB); +# fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); + setSummaryFPSCR(); +} + +#fnmadds. fr0,fr0,fr0,fr0 0xec 00 00 3f +:fnmadds. fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=31 & Rc=1 +{ + local tmpfA = fA; + local tmpfB = fB; + local tmpfC = fC; + tmp:8 = fA f* fC; + tmp2:4 = float2float(tmp f+ fB); + fD = f- float2float(tmp2); + setFPRF(fD); +# fp_fr = floatMaddRoundedUp(tmpfA, tmpfC, tmpfB); +# fp_fi = floatMaddInexact(tmpfA,tmpfC,tmpfB); +# fp_ox = fp_ox | floatMaddOverflow(tmpfA,tmpfC,tmpfB); +# fp_ux = fp_ux | floatMaddUnderflow(tmpfA,tmpfC,tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); +# fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, tmpfB); +# fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); + setSummaryFPSCR(); + cr1flags(); +} + +#fnmsub fr0,fr0,fr0,fr0 0xfc 00 00 3c +:fnmsub fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=30 & Rc=0 +{ + local tmpfA = fA; + local tmpfB = fB; + local tmpfC = fC; + tmp:8 = fA f* fC; + fD = f- (tmp f- fB); + setFPRF(fD); +# fp_fr = floatMsubRoundedUp(tmpfA, tmpfC, tmpfB); +# fp_fi = floatMsubInexact(tmpfA,tmpfC,tmpfB); +# fp_ox = fp_ox | floatMsubOverflow(tmpfA,tmpfC,tmpfB); +# fp_ux = fp_ux | floatMsubUnderflow(tmpfA,tmpfC,tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); +# fp_vxisi = fp_vxisi | floatInfinitySub(tmp, tmpfB); +# fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); + setSummaryFPSCR(); +} + +#fnmsub. fr0,fr0,fr0,fr0 0xfc 00 00 3d +:fnmsub. fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=30 & Rc=1 +{ + local tmpfA = fA; + local tmpfB = fB; + local tmpfC = fC; + tmp:8 = fA f* fC; + tmp2:4 = float2float(tmp f- fB); + fD = f- float2float(tmp2); + setFPRF(fD); +# fp_fr = floatMsubRoundedUp(tmpfA, tmpfC, tmpfB); +# fp_fi = floatMsubInexact(tmpfA,tmpfC,tmpfB); +# fp_ox = fp_ox | floatMsubOverflow(tmpfA,tmpfC,tmpfB); +# fp_ux = fp_ux | floatMsubUnderflow(tmpfA,tmpfC,tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); +# fp_vxisi = fp_vxisi | floatInfinitySub(tmp, tmpfB); +# fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); + setSummaryFPSCR(); + cr1flags(); +} + +#fnmsubs fr0,fr0,fr0,fr0 0xec 00 00 3c +:fnmsubs fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=30 & Rc=0 +{ + local tmpfA = fA; + local tmpfB = fB; + local tmpfC = fC; + tmp:8 = fA f* fC; + tmp2:4 = float2float(tmp f- fB); + fD = f- float2float(tmp2); + setFPRF(fD); +# fp_fr = floatMsubRoundedUp(tmpfA, tmpfC, tmpfB); +# fp_fi = floatMsubInexact(tmpfA,tmpfC,tmpfB); +# fp_ox = fp_ox | floatMsubOverflow(tmpfA,tmpfC,tmpfB); +# fp_ux = fp_ux | floatMsubUnderflow(tmpfA,tmpfC,tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); +# fp_vxisi = fp_vxisi | floatInfinitySub(tmp, tmpfB); +# fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); + setSummaryFPSCR(); +} + +#fnmsubs. fr0,fr0,fr0,fr0 0xfc 00 00 3d +:fnmsubs. fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=30 & Rc=1 +{ + local tmpfA = fA; + local tmpfB = fB; + local tmpfC = fC; + tmp:8 = fA f* fC; + tmp2:4 = float2float(tmp f- fB); + fD = f- float2float(tmp2); + setFPRF(fD); +# fp_fr = floatMsubRoundedUp(tmpfA, tmpfC, tmpfB); +# fp_fi = floatMsubInexact(tmpfA,tmpfC,tmpfB); +# fp_ox = fp_ox | floatMsubOverflow(tmpfA,tmpfC,tmpfB); +# fp_ux = fp_ux | floatMsubUnderflow(tmpfA,tmpfC,tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); +# fp_vxisi = fp_vxisi | floatInfinitySub(tmp, tmpfB); +# fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); + setSummaryFPSCR(); + cr1flags(); +} + +#fres fr0,fr0 0xec 00 00 30 +:fres fD,fB is $(NOTVLE) & OP=59 & fD & BITS_16_20 & fB & BITS_6_10=0 & XOP_1_5=24 & Rc=0 +{ + local tmpfB = fB; + one:8 = 1; + floatOne:8 = int2float(one); + tmp:4 = float2float(floatOne f/ fB); + fD = float2float(tmp); + setFPRF(fD); +# fp_fr = floatDivRoundedUp(floatOne, tmpfB); +# fp_fi = floatDivInexact(floatOne, tmpfB); +# fp_ox = fp_ox | floatDivOverflow(floatOne, tmpfB); +# fp_ux = fp_ux | floatDivUnderflow(floatOne, tmpfB); + fp_zx = fp_zx | (fB f== 0); + fp_vxsnan = fp_vxsnan | nan(tmpfB); + setSummaryFPSCR(); +} + +#fres. fr0,fr0 0xec 00 00 31 +:fres. fD,fB is $(NOTVLE) & OP=59 & fD & BITS_16_20 & fB & BITS_6_10=0 & XOP_1_5=24 & Rc=1 +{ + local tmpfB = fB; + one:8 = 1; + floatOne:8 = int2float(one); + tmp:4 = float2float(floatOne f/ fB); + fD = float2float(tmp); + setFPRF(fD); +# fp_fr = floatDivRoundedUp(floatOne, tmpfB); +# fp_fi = floatDivInexact(floatOne, tmpfB); +# fp_ox = fp_ox | floatDivOverflow(floatOne, tmpfB); +# fp_ux = fp_ux | floatDivUnderflow(floatOne, tmpfB); + fp_zx = fp_zx | (fB f== 0); + fp_vxsnan = fp_vxsnan | nan(tmpfB); + setSummaryFPSCR(); + cr1flags(); +} + +#frsp fr0,fr0 0xfc 00 00 18 +:frsp fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=12 & Rc=0 +{ + local tmpfB = fB; + #zero:8 = 0; + #floatZero:8 = int2float(zero); + tmp:4 = float2float(fB); + fD = float2float(tmp); + setFPRF(fD); +# fp_fr = floatAddRoundedUp(floatZero, tmpfB); +# fp_fi = floatAddInexact(floatZero, tmpfB); +# fp_ox = fp_ox | floatAddOverflow(floatZero, tmpfB); +# fp_ux = fp_ux | floatAddUnderflow(floatZero, tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfB); + setSummaryFPSCR(); +} + +#frsp. fr0,fr0 0xfc 00 00 19 +:frsp. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=12 & Rc=1 +{ + local tmpfB = fB; + #zero:8 = 0; + #floatZero:8 = int2float(zero); + tmp:4 = float2float(fB); + fD = float2float(tmp); + setFPRF(fD); +# fp_fr = floatAddRoundedUp(floatZero, tmpfB); +# fp_fi = floatAddInexact(floatZero, tmpfB); +# fp_ox = fp_ox | floatAddOverflow(floatZero, tmpfB); +# fp_ux = fp_ux | floatAddUnderflow(floatZero, tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfB); + setSummaryFPSCR(); + cr1flags(); +} + +#frsqrte fr0,fr0 0xfc 00 00 34 +:frsqrte fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20 & fB & BITS_6_10=0 & XOP_1_5=26 & Rc=0 +{ + local tmpfB = fB; + one:8 = 1; + floatOne:8 = int2float(one); + tmpSqrt:8 = sqrt(fB); + fD = (floatOne f/ tmpSqrt); + setFPRF(fD); +# fp_fr = floatDivRoundedUp(floatOne, tmpSqrt); +# fp_fi = floatDivInexact(floatOne, tmpSqrt); +# fp_ox = fp_ox | floatDivOverflow(floatOne, tmpSqrt); +# fp_ux = fp_ux | floatDivUnderflow(floatOne, tmpSqrt); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfB); + setSummaryFPSCR(); +} + +#frsqrte. fr0,fr0 0xfc 00 00 35 +:frsqrte. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20 & fB & BITS_6_10=0 & XOP_1_5=26 & Rc=1 +{ + local tmpfB = fB; + one:8 = 1; + floatOne:8 = int2float(one); + tmpSqrt:8 = sqrt(fB); + fD = (floatOne f/ tmpSqrt); + setFPRF(fD); +# fp_fr = floatDivRoundedUp(floatOne, tmpSqrt); +# fp_fi = floatDivInexact(floatOne, tmpSqrt); +# fp_ox = fp_ox | floatDivOverflow(floatOne, tmpSqrt); +# fp_ux = fp_ux | floatDivUnderflow(floatOne, tmpSqrt); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfB); + fp_vxsqrt = fp_vxsqrt | sqrtInvalid(tmpfB); + setSummaryFPSCR(); + cr1flags(); +} + +#fsel f0r,fr0,fr0,fr0 0xfc 00 00 2e +:fsel fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fB & fC & XOP_1_5=23 & Rc=0 +{ + local tmpfA = fA; + local tmpfB = fB; + zero:4=0; + fD=fC; + if (tmpfA f> int2float(zero)) goto inst_next; + fD=tmpfB; +} + +#fsel. fr0,fr0,fr0,fr0 0xfc 00 00 2f +:fsel. fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fB & fC & XOP_1_5=23 & Rc=1 +{ + local tmpfA = fA; + local tmpfB = fB; + zero:4=0; + fD=fC; + cr1flags(); + if (tmpfA f> int2float(zero)) goto inst_next; + fD=tmpfB; +} + +#fsqrt f0r,fr0 0xfc 00 00 2c +:fsqrt fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=22 & Rc=0 +{ + local tmpfB = fB; + fD = sqrt(fB); + setFPRF(fD); +# fp_fr = floatSqrtRoundedUp(tmpfB); +# fp_fi = floatSqrtInexact(tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfB); +# fp_vxsqrt = fp_vxsqrt | sqrtInvalid(tmpfB); + setSummaryFPSCR(); +} + +#fsqrt. fr0,fr0 0xfc 00 00 2d +:fsqrt. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=22 & Rc=1 +{ + local tmpfB = fB; + fD = sqrt(fB); + setFPRF(fD); +# fp_fr = floatSqrtRoundedUp(tmpfB); +# fp_fi = floatSqrtInexact(tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfB); +# fp_vxsqrt = fp_vxsqrt | sqrtInvalid(tmpfB); + setSummaryFPSCR(); + cr1flags(); +} + +#fsqrts fr0,fr0 0xec 00 00 2c +:fsqrts fD,fB is $(NOTVLE) & OP=59 & fD & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=22 & Rc=0 +{ + local tmpfB = fB; + tmp:4 = float2float(sqrt(fB)); + fD = float2float(tmp); + setFPRF(fD); +# fp_fr = floatSqrtRoundedUp(tmpfB); +# fp_fi = floatSqrtInexact(tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfB); +# fp_vxsqrt = fp_vxsqrt | sqrtInvalid(tmpfB); + setSummaryFPSCR(); +} + +#fsqrts. fr0,fr0 0xec 00 00 2d +:fsqrts. fD,fB is $(NOTVLE) & OP=59 & fD & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=22 & Rc=1 +{ + local tmpfB = fB; + tmp:4 = float2float(sqrt(fB)); + fD = float2float(tmp); + setFPRF(fD); +# fp_fr = floatSqrtRoundedUp(tmpfB); +# fp_fi = floatSqrtInexact(tmpfB); + fp_xx = fp_xx | fp_fi; + fp_vxsnan = fp_vxsnan | nan(tmpfB); +# fp_vxsqrt = fp_vxsqrt | sqrtInvalid(tmpfB); + setSummaryFPSCR(); + cr1flags(); +} + +#fsub fr0,fr0,fr0 0xfc 00 00 28 +:fsub fD,fA,fB is $(NOTVLE) & OP=63 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=20 & Rc=0 +{ + local tmpfA = fA; + local tmpfB = fB; + fD = fA f- fB; + setFPSubFlags(tmpfA,tmpfB,fD); +} + +#fsub. fr0,fr0,fr0 0xfc 00 00 29 +:fsub. fD,fA,fB is $(NOTVLE) & OP=63 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=20 & Rc=1 +{ + local tmpfA = fA; + local tmpfB = fB; + fD = fA f- fB; + setFPSubFlags(tmpfA,tmpfB,fD); + cr1flags(); +} + +#fsubs fr0,fr0,fr0 0xec 00 00 28 +:fsubs fD,fA,fB is $(NOTVLE) & OP=59 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=20 & Rc=0 +{ + local tmpfA = fA; + local tmpfB = fB; + tmp:4 = float2float(fA f- fB); + fD = float2float(tmp); + setFPSubFlags(tmpfA,tmpfB,fD); + +} + +#fsubs. fr0,fr0,fr0 0xec 00 00 29 +:fsubs. fD,fA,fB is $(NOTVLE) & OP=59 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=20 & Rc=1 +{ + local tmpfA = fA; + local tmpfB = fB; + tmp:4 = float2float(fA f- fB); + fD = float2float(tmp); + setFPSubFlags(tmpfA,tmpfB,fD); + cr1flags(); +} + +@ifndef IS_ISA +# iccci is just a special form of ici +#iccci 0,r0 0x7c 00 07 8c +:iccci RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=966 & BIT_0=0 & RA_OR_ZERO +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + instructionCacheCongruenceClassInvalidate(ea); +} +@endif + +#icread 0,r0 0x7c 00 07 cc +:icread RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=998 & BIT_0=0 & RA_OR_ZERO +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + instructionCacheRead(ea); +} + +#lbz r0,3(0) 0x88 00 00 03 +#lbz r0,3(r2) 0x88 02 00 03 +:lbz D,dPlusRaOrZeroAddress is $(NOTVLE) & OP=34 & D & dPlusRaOrZeroAddress +{ + D = zext(*:1(dPlusRaOrZeroAddress)); + +} + +#lbzu r0,3(r2) 0x8c 02 00 03 +:lbzu D,dPlusRaAddress is $(NOTVLE) & OP=35 & D & dPlusRaAddress & A +{ + ea:$(REGISTER_SIZE) = dPlusRaAddress; + D = zext(*:1(ea)); + A = ea; +} + +#lbzux r0,r2,r0 0x7c 02 00 ee +:lbzux D,A,B is OP=31 & D & A & B & XOP_1_10=119 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = A+B; + D = zext(*:1(ea)); + A = ea; +} + +#lbzx r0,r2,r0 0x7c 02 00 ae +:lbzx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=87 & BIT_0=0 +{ + tmp:$(REGISTER_SIZE) = RA_OR_ZERO+B; + D = zext(*:1(tmp)); +} + +@ifdef BIT_64 +#ld r0,8(r2) 0xe8 02 00 08 +:ld D,dPlusRaOrZeroAddress is $(NOTVLE) & OP=58 & D & dPlusRaOrZeroAddress & BITS_0_1=0 +{ + D = *:8(dPlusRaOrZeroAddress); +} + +##ldarx r0,r0,r0 0x7c 00 00 a8 +#:ldarx T,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & T & RA_OR_ZERO & B & XOP_1_10=84 & TX +#{ +# ea = RA_OR_ZERO+B; +# RESERVE = 1; +# RESERVE_ADDRSS = ea; +# T = *:8(ea); +#} + +#ldu r0,8(r2) 0xe8 02 00 09 +:ldu D,dsPlusRaAddress is $(NOTVLE) & OP=58 & D & dsPlusRaAddress & A & BITS_0_1=1 +{ + ea:$(REGISTER_SIZE) = dsPlusRaAddress; + D = *:8(ea); + A = ea; +} + +#ldux r0,r2,r0 0x7c 02 00 6a +:ldux D,A,B is OP=31 & D & A & B & XOP_1_10=53 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = A+B; + D = *:8(ea); + A = ea; +} + +@ifndef IS_ISA +#ldarx r0,r2,r0 0x7c 02 00 2a +:ldarx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=21 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; + D = *:8(ea); +} +@endif +@endif + +#lfd fr0,8(r2) 0xc8 02 00 08 +:lfd fD,dPlusRaOrZeroAddress is $(NOTVLE) & OP=50 & fD & dPlusRaOrZeroAddress +{ + fD = *:8(dPlusRaOrZeroAddress); +} + +#lfdu fr0,8(r2) 0xcc 02 00 08 +:lfdu fD,dPlusRaAddress is $(NOTVLE) & OP=51 & fD & dPlusRaAddress & A +{ + ea:$(REGISTER_SIZE) = dPlusRaAddress; + fD = *:8(ea); + A = ea; +} +#lfdux fr0,r2,r0 0x7c 02 04 ee +:lfdux fD,A,B is $(NOTVLE) & OP=31 & fD & A & B & XOP_1_10=631 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = A+B; + fD = *:8(ea); + A = ea; +} +#lfdx fr0,r2,r0 0x7c 02 04 ae +:lfdx fD,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fD & RA_OR_ZERO & B & XOP_1_10=599 & BIT_0=0 +{ + fD = *:8(RA_OR_ZERO+B); +} + +#lfs fr0,8(r2) 0xc0 02 00 08 +:lfs fD,dPlusRaOrZeroAddress is $(NOTVLE) & OP=48 & fD & dPlusRaOrZeroAddress +{ + fD = float2float(*:4(dPlusRaOrZeroAddress)); +} +#lfsu fr0,8(r2) 0xc0 02 00 08 +:lfsu fD,dPlusRaAddress is $(NOTVLE) & OP=49 & fD & dPlusRaAddress & A +{ + ea:$(REGISTER_SIZE) = dPlusRaAddress; + fD = float2float(*:4(ea)); + A = ea; +} + +#lfsux fr0,r2,r0 0x7c 02 04 6e +:lfsux fD,A,B is $(NOTVLE) & OP=31 & fD & A & B & XOP_1_10=567 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = A+B; + fD = float2float(*:4(ea)); + A = ea; +} +#lfsx fr0,r2,r0 0x7c 02 04 2e +:lfsx fD,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fD & RA_OR_ZERO & B & XOP_1_10=535 & BIT_0=0 +{ + fD = float2float(*:4(RA_OR_ZERO+B)); +} +#lha r0,4(0) 0xa8 00 00 04 +#lha r0,4(r2) 0xa8 02 00 04 +:lha D,dPlusRaOrZeroAddress is $(NOTVLE) & OP=42 & D & dPlusRaOrZeroAddress +{ + D = sext(*:2(dPlusRaOrZeroAddress)); + +} +#lhau r0,8(r2) 0xac 02 00 08 +:lhau D,dPlusRaAddress is $(NOTVLE) & OP=43 & D & dPlusRaAddress & A +{ + ea:$(REGISTER_SIZE) = dPlusRaAddress; + D = sext(*:2(ea)); + A = ea; +} +#lhaux r0,r2,r0 0x7c 02 02 ee +:lhaux D,A,B is OP=31 & D & A & B & XOP_1_10=375 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = A+B; + D = sext(*:2(ea)); + A = ea; +} +#lhax r0,r2,r0 0x7c 02 02 ae +:lhax D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=343 & BIT_0=0 +{ + D = sext(*:2(RA_OR_ZERO+B)); +} + +#lhbrx r0,r2,r0 0x7c 02 06 2c +:lhbrx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=790 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; + tmp:$(REGISTER_SIZE) = zext(*:1(ea+1)) << 8; + D = tmp | zext(*:1(ea)); +} + +#lhz r0,4(0) 0xa0 00 00 04 +#lhz r0,4(r2) 0xa0 02 00 04 +:lhz D,dPlusRaOrZeroAddress is $(NOTVLE) & OP=40 & D & dPlusRaOrZeroAddress +{ + D = zext(*:2(dPlusRaOrZeroAddress)); + +} + +#lhzu r0,4(r2) 0xa4 02 00 04 +:lhzu D,dPlusRaAddress is $(NOTVLE) & OP=41 & D & dPlusRaAddress & A +{ + ea:$(REGISTER_SIZE) = dPlusRaAddress; + D = zext(*:2(ea)); + A = ea; +} + +#lhzux r0,r2,r0 0x7c 02 02 6e +:lhzux D,A,B is OP=31 & D & A & B & XOP_1_10=311 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = A+B; + D = zext(*:2(ea)); + A = ea; +} +#lhzx r0,r2,r0 0x7c 02 02 2e +:lhzx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=279 & BIT_0=0 +{ + D = zext(*:2(RA_OR_ZERO+B)); +} + +# big stuffs +@include "lmwInstructions.sinc" + +@include "lswInstructions.sinc" + +#lswx r0,0,r0 0x7c 00 3c 2a +#lswx r0,r2,40 0x7c 02 3c 2a +define pcodeop lswxOp; +:lswx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & NB & BITS_21_25 & B & XOP_1_10=533 & BIT_0=0 +{ + D = lswxOp(D,RA_OR_ZERO,B); +} +@ifdef BIT_64 +#lwa r0,8(r2) 0xe8 02 00 0a +:lwa D,dsPlusRaOrZeroAddress is $(NOTVLE) & OP=58 & D & dsPlusRaOrZeroAddress & BITS_0_1=2 +{ + D = sext(*:4(dsPlusRaOrZeroAddress)); +} +@endif + +#lwarx r0,r0,r0 0x7c 00 00 28 +:lwarx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=20 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; + #RESERVE = 1; + #RESERVE_ADDRSS:$(REGISTER_SIZE) = ea; +@ifdef BIT_64 + D = zext(*:4(ea)); +@else + D = *:4(ea); +@endif +} + +@ifdef BIT_64 +#lwaux r0,r2,r0 0x7c 02 02 ea +:lwaux D,A,B is OP=31 & D & A & B & XOP_1_10=373 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = A+B; + D = sext(*:4(ea)); + A = ea; +} +#lwax r0,r2,r0 0x7c 02 02 aa +:lwax D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=341 & BIT_0=0 +{ + D = sext(*:4(RA_OR_ZERO)); +} +@endif + +#lwbrx r0,r2,r0 0x7c 02 04 2c +:lwbrx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=534 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; + tmp1:$(REGISTER_SIZE) = zext(*:1(ea+3)) << 24; + tmp2:$(REGISTER_SIZE) = zext(*:1(ea+2)) << 16; + tmp3:$(REGISTER_SIZE) = zext(*:1(ea+1)) << 8; + D = tmp1 | tmp2 | tmp3 | zext(*:1(ea)); +} +#lwz r0,4(0) 0x80 00 00 04 +#lwz r0,4(r2) 0x80 02 00 04 +:lwz D,dPlusRaOrZeroAddress is $(NOTVLE) & OP=32 & D & dPlusRaOrZeroAddress +{ +@ifdef BIT_64 + D = zext(*:4(dPlusRaOrZeroAddress)); +@else + D = *:4(dPlusRaOrZeroAddress); +@endif +} + +#lwzu r0,4(r2) 0x84 02 00 04 +:lwzu D,dPlusRaAddress is $(NOTVLE) & OP=33 & D & dPlusRaAddress & A +{ + ea:$(REGISTER_SIZE) = dPlusRaAddress; +@ifdef BIT_64 + D = zext(*:4(ea)); +@else + D = *:4(ea); +@endif + A = ea; +} + +#lwzux r0,r2,r0 0x7c 02 00 6e +:lwzux D,A,B is OP=31 & D & A & B & XOP_1_10=55 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = A+B; +@ifdef BIT_64 + D = zext(*:4(ea)); +@else + D = *:4(ea); +@endif + A = ea; + +} +#lwzx r0,r2,r0 0x7c 02 00 2e +:lwzx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=23 & BIT_0=0 +{ +@ifdef BIT_64 + D = zext(*:4(RA_OR_ZERO+B)); +@else + D = *:4(RA_OR_ZERO+B); +@endif +} + +#macchw r0,r0,r0 0x10 00 01 58 +:macchw D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=172 & Rc=0 +{ + D = macchw(D, A, B); +} + +#macchw. r0,r0,r0 0x10 00 01 59 +:macchw. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=172 & Rc=1 +{ + D = macchw(D, A, B); + cr0flags(D); +} + +#macchwo r0,r0,r0 0x10 00 05 58 +:macchwo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=172 & Rc=0 +{ + D = macchw(D, A, B); + xer_mac_update(D, A, B); +} + +@ifndef NoLegacyIntegerMultiplyAccumulate +#macchwo. r0,r0,r0 0x10 00 05 59 +:macchwo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=172 & Rc=1 +{ + D = macchw(D, A, B); + xer_mac_update(D, A, B); + cr0flags(D); +} +@endif + +#macchws r0,r0,r0 0x10 00 01 d8 +:macchws D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=236 & Rc=0 +{ + D = macchws(D, A, B); +} + +#macchws. r0,r0,r0 0x10 00 01 d9 +:macchws. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=236 & Rc=1 +{ + D = macchws(D, A, B); + cr0flags(D); +} + +@ifndef NoLegacyIntegerMultiplyAccumulate +#macchwso r0,r0,r0 0x10 00 05 d8 +:macchwso D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=236 & Rc=0 +{ + D = macchws(D, A, B); + xer_mac_update(D, A, B); +} +@endif + +@ifndef NoLegacyIntegerMultiplyAccumulate +#macchwso. r0,r0,r0 0x10 00 05 d9 +:macchwso. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=236 & Rc=1 +{ + D = macchws(D, A, B); + xer_mac_update(D, A, B); + cr0flags(D); +} +@endif + +#macchwsu r0,r0,r0 0x10 00 01 98 +:macchwsu D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=204 & Rc=0 +{ + D = macchwsu(D, A, B); +} + +#macchwsu. r0,r0,r0 0x10 00 01 99 +:macchwsu. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=204 & Rc=1 +{ + D = macchwsu(D, A, B); + cr0flags(D); +} + +#macchwsuo r0,r0,r0 0x10 00 05 98 +:macchwsuo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=204 & Rc=0 +{ + D = macchwsu(D, A, B); + xer_mac_update(D, A, B); +} + +#macchwsuo. r0,r0,r0 0x10 00 05 99 +:macchwsuo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=204 & Rc=1 +{ + D = macchwsu(D, A, B); + xer_mac_update(D, A, B); + cr0flags(D); +} + +#macchwu r0,r0,r0 0x10 00 01 18 +:macchwu D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=140 & Rc=0 +{ + D = macchwu(D, A, B); +} + +#macchwu. r0,r0,r0 0x10 00 01 19 +:macchwu. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=140 & Rc=1 +{ + D = macchwu(D, A, B); + cr0flags(D); +} + +#macchwuo r0,r0,r0 0x10 00 05 18 +:macchwuo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=140 & Rc=0 +{ + D = macchwu(D, A, B); + xer_mac_update(D, A, B); +} + +#macchwuo. r0,r0,r0 0x10 00 05 19 +:macchwuo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=140 & Rc=1 +{ + D = macchwu(D, A, B); + xer_mac_update(D, A, B); + cr0flags(D); +} + + +#machhw r0,r0,r0 0x10 00 00 58 +:machhw D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=44 & Rc=0 +{ + D = machhw(D, A, B); +} + +#machhw. r0,r0,r0 0x10 00 00 59 +:machhw. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=44 & Rc=1 +{ + D = machhw(D, A, B); + cr0flags(D); +} + +@ifndef NoLegacyIntegerMultiplyAccumulate +#machhwo r0,r0,r0 0x10 00 04 58 +:machhwo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=44 & Rc=0 +{ + D = machhw(D, A, B); + xer_mac_update(D, A, B); +} +@endif + +@ifndef NoLegacyIntegerMultiplyAccumulate +#machhwo. r0,r0,r0 0x10 00 04 59 +:machhwo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=44 & Rc=1 +{ + D = machhw(D, A, B); + xer_mac_update(D, A, B); + cr0flags(D); +} +@endif + +#machhws r0,r0,r0 0x10 00 00 d8 +:machhws D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=108 & Rc=0 +{ + D = machhws(D, A, B); +} + +#machhws. r0,r0,r0 0x10 00 00 d9 +:machhws. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=108 & Rc=1 +{ + D = machhws(D, A, B); + cr0flags(D); +} + +#machhwso r0,r0,r0 0x10 00 04 d8 +:machhwso D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=108 & Rc=0 +{ + D = machhws(D, A, B); + xer_mac_update(D, A, B); +} + +#machhwso. r0,r0,r0 0x10 00 04 d9 +:machhwso. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=108 & Rc=1 +{ + D = machhws(D, A, B); + xer_mac_update(D, A, B); + cr0flags(D); +} + +#machhwsu r0,r0,r0 0x10 00 00 98 +:machhwsu D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=76 & Rc=0 +{ + D = machhwsu(D, A, B); +} + +#machhwsu. r0,r0,r0 0x10 00 00 99 +:machhwsu. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=76 & Rc=1 +{ + D = machhwsu(D, A, B); + cr0flags(D); +} + +#machhwsuo r0,r0,r0 0x10 00 04 98 +:machhwsuo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=76 & Rc=0 +{ + D = machhwsu(D, A, B); + xer_mac_update(D, A, B); +} + +#machhwsuo. r0,r0,r0 0x10 00 04 99 +:machhwsuo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=76 & Rc=1 +{ + D = machhwsu(D, A, B); + xer_mac_update(D, A, B); + cr0flags(D); +} + +#machhwu r0,r0,r0 0x10 00 00 18 +:machhwu D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=12 & Rc=0 +{ + D = machhwu(D, A, B); +} + +#machhwu. r0,r0,r0 0x10 00 00 19 +:machhwu. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=12 & Rc=1 +{ + D = machhwu(D, A, B); + cr0flags(D); +} + +#machhwuo r0,r0,r0 0x10 00 04 18 +:machhwuo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=12 & Rc=0 +{ + D = machhwu(D, A, B); + xer_mac_update(D, A, B); +} + +#machhwuo. r0,r0,r0 0x10 00 04 19 +:machhwuo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=12 & Rc=1 +{ + D = machhwu(D, A, B); + xer_mac_update(D, A, B); + cr0flags(D); +} + + +#maclhw r0,r0,r0 0x10 00 03 58 +:maclhw D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=428 & Rc=0 +{ + D = maclhw(D, A, B); +} + +#maclhw. r0,r0,r0 0x10 00 03 59 +:maclhw. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=428 & Rc=1 +{ + D = maclhw(D, A, B); + cr0flags(D); +} + +#maclhwo r0,r0,r0 0x10 00 07 58 +:maclhwo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=428 & Rc=0 +{ + D = maclhw(D, A, B); + xer_mac_update(D, A, B); +} + +#maclhwo. r0,r0,r0 0x10 00 07 59 +:maclhwo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=428 & Rc=1 +{ + D = maclhw(D, A, B); + xer_mac_update(D, A, B); + cr0flags(D); +} + +#maclhws r0,r0,r0 0x10 00 03 d8 +:maclhws D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=492 & Rc=0 +{ + D = maclhws(D, A, B); +} + +#maclhws. r0,r0,r0 0x10 00 03 d9 +:maclhws. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=492 & Rc=1 +{ + D = maclhws(D, A, B); + cr0flags(D); +} + +#maclhwso r0,r0,r0 0x10 00 07 d8 +:maclhwso D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=492 & Rc=0 +{ + D = maclhws(D, A, B); + xer_mac_update(D, A, B); +} + +#maclhwso. r0,r0,r0 0x10 00 07 d9 +:maclhwso. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=492 & Rc=1 +{ + D = maclhws(D, A, B); + xer_mac_update(D, A, B); + cr0flags(D); +} + +#maclhwsu r0,r0,r0 0x10 00 03 98 +:maclhwsu D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=460 & Rc=0 +{ + D = maclhwsu(D, A, B); +} + +#maclhwsu. r0,r0,r0 0x10 00 03 99 +:maclhwsu. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=460 & Rc=1 +{ + D = maclhwsu(D, A, B); + cr0flags(D); +} + +#maclhwsuo r0,r0,r0 0x10 00 07 98 +:maclhwsuo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=460 & Rc=0 +{ + D = maclhwsu(D, A, B); + xer_mac_update(D, A, B); +} + +#maclhwsuo. r0,r0,r0 0x10 00 07 99 +:maclhwsuo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=460 & Rc=1 +{ + D = maclhwsu(D, A, B); + xer_mac_update(D, A, B); + cr0flags(D); +} + +@ifndef NoLegacyIntegerMultiplyAccumulate +#maclhwu r0,r0,r0 0x10 00 03 18 +:maclhwu D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=396 & Rc=0 +{ + D = maclhwu(D, A, B); +} +@endif + +@ifndef NoLegacyIntegerMultiplyAccumulate +#maclhwu. r0,r0,r0 0x10 00 03 19 +:maclhwu. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=396 & Rc=1 +{ + D = maclhwu(D, A, B); + cr0flags(D); +} +@endif + +#maclhwuo r0,r0,r0 0x10 00 07 18 +:maclhwuo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=396 & Rc=0 +{ + D = maclhwu(D, A, B); + xer_mac_update(D, A, B); +} + +#maclhwuo. r0,r0,r0 0x10 00 07 19 +:maclhwuo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=396 & Rc=1 +{ + D = maclhwu(D, A, B); + xer_mac_update(D, A, B); + cr0flags(D); +} + +#mulchw r0,r0,r0 0x10 00 01 50 +:mulchw D,A,B is OP=4 & D & A & B & XOP_1_10=168 & Rc=0 +{ + D = mulchw(D, A, B); +} + +#mulchw. r0,r0,r0 0x10 00 01 51 +:mulchw. D,A,B is OP=4 & D & A & B & XOP_1_10=168 & Rc=1 +{ + D = mulchw(D, A, B); + cr0flags(D); +} + +#mulchwu r0,r0,r0 0x10 00 01 10 +:mulchwu D,A,B is OP=4 & D & A & B & XOP_1_10=136 & Rc=0 +{ + D = mulchwu(D, A, B); +} + +#mulchwu. r0,r0,r0 0x10 00 01 11 +:mulchwu. D,A,B is OP=4 & D & A & B & XOP_1_10=136 & Rc=1 +{ + D = mulchwu(D, A, B); + cr0flags(D); +} + +#mulhhw r0,r0,r0 0x10 00 00 50 +:mulhhw D,A,B is OP=4 & D & A & B & XOP_1_10=40 & Rc=0 +{ + D = mulhhw(D, A, B); +} + +#mulhhw. r0,r0,r0 0x10 00 00 51 +:mulhhw. D,A,B is OP=4 & D & A & B & XOP_1_10=40 & Rc=1 +{ + D = mulhhw(D, A, B); + cr0flags(D); +} + +#mulhhwu r0,r0,r0 0x10 00 00 10 +:mulhhwu D,A,B is OP=4 & D & A & B & XOP_1_10=8 & Rc=0 +{ + D = mulhhwu(D, A, B); +} + +#mulhhwu. r0,r0,r0 0x10 00 00 11 +:mulhhwu. D,A,B is OP=4 & D & A & B & XOP_1_10=8 & Rc=1 +{ + D = mulhhwu(D, A, B); + cr0flags(D); +} + +#mullhw r0,r0,r0 0x10 00 03 50 +:mullhw D,A,B is OP=4 & D & A & B & XOP_1_10=424 & Rc=0 +{ + D = mullhw(D, A, B); +} + +#mullhw. r0,r0,r0 0x10 00 03 51 +:mullhw. D,A,B is OP=4 & D & A & B & XOP_1_10=424 & Rc=1 +{ + D = mullhw(D, A, B); + cr0flags(D); +} + +@ifndef NoLegacyIntegerMultiplyAccumulate +# mulhwu r0,r0,r0 0x10 00 03 10 +:mullhwu D,A,B is OP=4 & D & A & B & XOP_1_10=392 & Rc=0 +{ + D = mullhwu(D, A, B); +} +@endif + +@ifndef NoLegacyIntegerMultiplyAccumulate +#mullhwu. r0,r0,r0 0x10 00 03 11 +:mullhwu. D,A,B is OP=4 & D & A & B & XOP_1_10=392 & Rc=1 +{ + D = mullhwu(D, A, B); + cr0flags(D); +} +@endif + +#nmacchw r0,r0,r0 0x10 00 01 5c +:nmacchw D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=174 & Rc=0 +{ + D = nmacchw(D, A, B); +} + +#nmacchw. r0,r0,r0 0x10 00 01 5d +:nmacchw. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=174 & Rc=1 +{ + D = nmacchw(D, A, B); + cr0flags(D); +} + +#nmacchwo r0,r0,r0 0x10 00 05 5c +:nmacchwo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=174 & Rc=0 +{ + D = nmacchw(D, A, B); + xer_mac_update(D, A, B); +} + +#nmacchwo. r0,r0,r0 0x10 00 05 5d +:nmacchwo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=174 & Rc=1 +{ + D = nmacchw(D, A, B); + xer_mac_update(D, A, B); + cr0flags(D); +} + +#nmacchws r0,r0,r0 0x10 00 01 dc +:nmacchws D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=238 & Rc=0 +{ + D = nmacchws(D, A, B); +} + +#nmacchws. r0,r0,r0 0x10 00 01 dd +:nmacchws. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=238 & Rc=1 +{ + D = nmacchws(D, A, B); + cr0flags(D); +} + +#nmacchwso r0,r0,r0 0x10 00 05 dc +:nmacchwso D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=238 & Rc=0 +{ + D = nmacchws(D, A, B); + xer_mac_update(D, A, B); +} + +#nmacchwso. r0,r0,r0 0x10 00 05 dd +:nmacchwso. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=238 & Rc=1 +{ + D = nmacchws(D, A, B); + xer_mac_update(D, A, B); + cr0flags(D); +} + +#nmachhw r0,r0,r0 0x10 00 00 5c +:nmachhw D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=46 & Rc=0 +{ + D = nmachhw(D, A, B); +} + +#nmachhw. r0,r0,r0 0x10 00 00 5d +:nmachhw. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=46 & Rc=1 +{ + D = nmachhw(D, A, B); + cr0flags(D); +} + +#nmachhwo r0,r0,r0 0x10 00 04 5c +:nmachhwo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=46 & Rc=0 +{ + D = nmachhw(D, A, B); + xer_mac_update(D, A, B); +} + +#nmachhwo. r0,r0,r0 0x10 00 04 5d +:nmachhwo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=46 & Rc=1 +{ + D = nmachhw(D, A, B); + xer_mac_update(D, A, B); + cr0flags(D); +} + +#nmachhws r0,r0,r0 0x10 00 00 dc +:nmachhws D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=110 & Rc=0 +{ + D = nmachhws(D, A, B); +} + +#nmachhws. r0,r0,r0 0x10 00 00 dd +:nmachhws. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=110 & Rc=1 +{ + D = nmachhws(D, A, B); + cr0flags(D); +} + +#nmachhwso r0,r0,r0 0x10 00 04 dc +:nmachhwso D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=110 & Rc=0 +{ + D = nmachhws(D, A, B); + xer_mac_update(D, A, B); +} + +#nmachhwso. r0,r0,r0 0x10 00 04 dd +:nmachhwso. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=110 & Rc=1 +{ + D = nmachhws(D, A, B); + xer_mac_update(D, A, B); + cr0flags(D); +} + +#nmaclhw r0,r0,r0 0x10 00 03 5c +:nmaclhw D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=430 & Rc=0 +{ + D = nmaclhw(D, A, B); +} + +#nmaclhw. r0,r0,r0 0x10 00 03 5d +:nmaclhw. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=430 & Rc=1 +{ + D = nmaclhw(D, A, B); + cr0flags(D); +} + +#nmaclhwo r0,r0,r0 0x10 00 07 5c +:nmaclhwo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=430 & Rc=0 +{ + D = nmaclhw(D, A, B); + xer_mac_update(D, A, B); +} + +#nmaclhwo. r0,r0,r0 0x10 00 07 5d +:nmaclhwo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=430 & Rc=1 +{ + D = nmaclhw(D, A, B); + xer_mac_update(D, A, B); + cr0flags(D); +} + +#nmaclhws r0,r0,r0 0x10 00 03 dc +:nmaclhws D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=494 & Rc=0 +{ + D = nmaclhws(D, A, B); +} + +#nmaclhws. r0,r0,r0 0x10 00 03 dd +:nmaclhws. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=494 & Rc=1 +{ + D = nmaclhws(D, A, B); + cr0flags(D); +} + +#nmaclhwso r0,r0,r0 0x10 00 07 dc +:nmaclhwso D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=494 & Rc=0 +{ + D = nmaclhws(D, A, B); + xer_mac_update(D, A, B); +} + +#nmaclhwso. r0,r0,r0 0x10 00 07 dd +:nmaclhwso. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=494 & Rc=1 +{ + D = nmaclhws(D, A, B); + xer_mac_update(D, A, B); + cr0flags(D); +} + +#mcrf cr0,cr0 0x4c 00 00 00 +:mcrf CRFD,CRFS is $(NOTVLE) & OP=19 & CRFD & BITS_21_22=0 & CRFS & BITS_0_17=0 +{ + CRFD = CRFS; +} + +#mcrfs cr0,cr0 0xfc 00 00 80 +:mcrfs CRFD,CRFS is $(NOTVLE) & OP=63 & CRFD & FPSCR_CRFS & BITS_21_22=0 & CRFS & BITS_11_17=0 & XOP_1_10=64 & BIT_0=0 +{ + CRFD = FPSCR_CRFS; +} + +#mcrxr cr0 0x7c 00 04 00 +:mcrxr CRFD is OP=31 & CRFD & BITS_11_22=0 & XOP_1_10=512 & BIT_0=0 +{ + CRFD = (xer_so & 1) << 3 | (xer_ov & 1) << 2 | (xer_ca & 1) << 1; + xer_so = 0; + xer_ov = 0; + xer_ca = 0; +} + +#mfcr r0 0x7c 00 00 26 +:mfcr D is OP=31 & D & BITS_11_20=0 & XOP_1_10=19 & BIT_0=0 +{ + tmp:4 = zext(cr0 & 0xf) << 28 | + zext(cr1 & 0xf) << 24 | + zext(cr2 & 0xf) << 20 | + zext(cr3 & 0xf) << 16 | + zext(cr4 & 0xf) << 12 | + zext(cr5 & 0xf) << 8 | + zext(cr6 & 0xf) << 4 | + zext(cr7 & 0xf); +@ifdef BIT_64 + D = zext(tmp); +@else + D = tmp; +@endif +} + +#mfocrf D,cr1 0x7c 31 00 26 +:mfocrf D,CRM_CR is OP=31 & D & BIT_20=1 & CRM_CR & BIT_11 & XOP_1_10=19 & BIT_0=0 +{ +@ifdef BIT_64 + D = zext(CRM_CR); +@else + D = CRM_CR; +@endif +} + +#mffs fD 0xfc 00 04 8e +:mffs fD is $(NOTVLE) & OP=63 & fD & BITS_11_20=0 & XOP_1_10=583 & Rc=0 +{ + tmp:4 = 0; + packFPSCR(tmp); + fD = zext(tmp); +} + +#mffs. fD 0xfc 00 04 8f +:mffs. fD is $(NOTVLE) & OP=63 & fD & BITS_11_20=0 & XOP_1_10=583 & Rc=1 +{ + tmp:4 = 0; + packFPSCR(tmp); + fD = zext(tmp); + cr1flags(); +} + +### is this pcode correct on 64-bit bridge? +#mfsr r0,r0 0x7c 00 04 a6 +:mfsr D,B is $(NOTVLE) & OP=31 & D & SR & BIT_20=0 & B & BITS_11_15=0 & XOP_1_10=595 & BIT_0=0 +{ +@ifdef BIT_64 + D = zext(SR); +@else + D = SR; +@endif +} +#mfsrin r0,r0 0x7c 00 05 26 +:mfsrin D,B is $(NOTVLE) & OP=31 & D & BITS_16_20=0 & B & XOP_1_10=659 & BIT_0=0 +{ +@ifdef BIT_64 + tmp:4 = (B:4 >> 28); +@else + tmp:$(REGISTER_SIZE) = (B >> 28); +@endif + D = *[register]:4 ($(SEG_REGISTER_BASE)+tmp); +} + +#mtcrf 10,r0 0x7c 01 01 20 +:mtcrf CRM,S is OP=31 & S & BIT_20=0 & CRM & CRM0 & CRM1 & CRM2 & CRM3 & CRM4 & CRM5 & CRM6 & CRM7 & BIT_11=0 & XOP_1_10=144 & BIT_0=0 +{ + tmp:$(REGISTER_SIZE) = (S >> 28) & 0xf; + cr0 = (cr0 * (CRM0:1 == 0)) | (tmp:1 * (CRM0:1 == 1)); + + tmp = (S >> 24) & 0xf; + cr1 = (cr1 * (CRM1:1 == 0)) | (tmp:1 * (CRM1:1 == 1)); + + tmp = (S >> 20) & 0xf; + cr2 = (cr2 * (CRM2:1 == 0)) | (tmp:1 * (CRM2:1 == 1)); + + tmp = (S >> 16) & 0xf; + cr3 = (cr3 * (CRM3:1 == 0)) | (tmp:1 * (CRM3:1 == 1)); + + tmp = (S >> 12) & 0xf; + cr4 = (cr4 * (CRM4:1 == 0)) | (tmp:1 * (CRM4:1 == 1)); + + tmp = (S >> 8) & 0xf; + cr5 = (cr5 * (CRM5:1 == 0)) | (tmp:1 * (CRM5:1 == 1)); + + tmp = (S >> 4) & 0xf; + cr6 = (cr6 * (CRM6:1 == 0)) | (tmp:1 * (CRM6:1 == 1)); + + tmp = S & 0xf; + cr7 = (cr7 * (CRM7:1 == 0)) | (tmp:1 * (CRM7:1 == 1)); +} + +#mtfsb0 fp_ux 0xfc 80 00 8c +:mtfsb0 CRBD is $(NOTVLE) & OP=63 & CRBD & BITS_11_20=0 & XOP_1_10=70 & Rc=0 +{ + CRBD = 0; +} +#mtfsb0. fp_ux 0xfc 80 00 8d +:mtfsb0. CRBD is $(NOTVLE) & OP=63 & CRBD & BITS_11_20=0 & XOP_1_10=70 & Rc=1 +{ + CRBD = 0; + cr1flags(); +} +#mtfsb1 fp_ux 0xfc 80 00 4c +:mtfsb1 CRBD is $(NOTVLE) & OP=63 & CRBD & BITS_11_20=0 & XOP_1_10=38 & Rc=0 +{ + CRBD = 1; +} +#mtfsb1. fp_ux 0xfc 80 00 4d +:mtfsb1. CRBD is $(NOTVLE) & OP=63 & CRBD & BITS_11_20=0 & XOP_1_10=38 & Rc=1 +{ + CRBD = 1; +} + +#mtfsf 10,fr0 0xfc 00 05 8e +:mtfsf FM,fB is $(NOTVLE) & OP=63 & BIT_25=0 & FM & FM0 & FM1 & FM2 & FM3 & FM4 & FM5 & FM6 & FM7 & BIT_16=0 & fB & XOP_1_10=711 & Rc=0 +{ + tmp:4 = 0; + packFPSCR(tmp); + + mask0:4 = zext((FM0:1 == 1)* 0xf) << 28; + mask1:4 = zext((FM1:1 == 1)* 0xf) << 24; + mask2:4 = zext((FM2:1 == 1)* 0xf) << 20; + mask3:4 = zext((FM3:1 == 1)* 0xf) << 16; + mask4:4 = zext((FM4:1 == 1)* 0xf) << 12; + mask5:4 = zext((FM5:1 == 1)* 0xf) << 8; + mask6:4 = zext((FM6:1 == 1)* 0xf) << 4; + mask7:4 = zext((FM7:1 == 1)* 0xf); + + mask:4 = mask0 | mask1 | mask2 | mask3 | mask4 | mask5 | mask6 | mask7; + + tmp1:4 = fB:4; + tmp2:4 = (tmp & ~mask) | (tmp1 & mask); + unpackFPSCR(tmp2); +} + +#mtfsf. 10,fr0 0xfc 00 05 8f +:mtfsf. FM,fB is $(NOTVLE) & OP=63 & BIT_25=0 & FM & FM0 & FM1 & FM2 & FM3 & FM4 & FM5 & FM6 & FM7 & BIT_16=0 & fB & XOP_1_10=711 & Rc=1 +{ + tmp:4 = 0; + packFPSCR(tmp); + + mask0:4 = zext((FM0:1 == 1)* 0xf) << 28; + mask1:4 = zext((FM1:1 == 1)* 0xf) << 24; + mask2:4 = zext((FM2:1 == 1)* 0xf) << 20; + mask3:4 = zext((FM3:1 == 1)* 0xf) << 16; + mask4:4 = zext((FM4:1 == 1)* 0xf) << 12; + mask5:4 = zext((FM5:1 == 1)* 0xf) << 8; + mask6:4 = zext((FM6:1 == 1)* 0xf) << 4; + mask7:4 = zext((FM7:1 == 1)* 0xf); + + mask:4 = mask0 | mask1 | mask2 | mask3 | mask4 | mask5 | mask6 | mask7; + + tmp1:4 = fB:4; + tmp2:4 = (tmp & ~mask) | (tmp1 & mask); + unpackFPSCR(tmp2); + cr1flags(); +} + +#mtfsfi 10,3 0xfc 00 01 0c +:mtfsfi crfD,IMM is $(NOTVLE) & OP=63 & crfD & BITS_16_22=0 & IMM & BIT_11=0 & XOP_1_10=134 & Rc=0 +{ + tmp:4 = 0; + packFPSCR(tmp); + shift:1 = 28-(crfD*4); + mask:4 = 0xf << shift; + tmp1:4 = IMM << shift; + tmp2:4 = (tmp & ~mask) | tmp1; + unpackFPSCR(tmp2); +} + +#mtfsfi. 10,3 0xfc 00 01 0d +:mtfsfi. crfD,IMM is $(NOTVLE) & OP=63 & crfD & BITS_16_22=0 & IMM & BIT_11=0 & XOP_1_10=134 & Rc=1 +{ + tmp:4 = 0; + packFPSCR(tmp); + shift:1 = 28-(crfD*4); + mask:4 = 0xf << shift; + tmp1:4 = IMM << shift; + tmp2:4 = (tmp & ~mask) | tmp1; + unpackFPSCR(tmp2); + cr1flags(); +} + +# This instruction is not exclusive to 64 bit processors, per page 1259 of the PowerISA manual. +# However, it does seem to require 64 bit registers, so it is currently restricted to 64 bit machines. +@ifdef BIT_64 +#mtmsrd r0,0 0x7c 00 01 64 +:mtmsrd S,0 is $(NOTVLE) & OP=31 & S & BITS_17_20=0 & MSR_L=0 & BITS_11_15=0 & XOP_1_10=178 & BIT_0=0 +{ + bit0:8 = S >> 63 & 1; + bit1:8 = S >> 62 & 1; + bit49:8 = (S >> 14)& 1; + bit59:8 = (S >> 4) & 1; + tmp:8 = S & 0x6fffffffffff6fcf; + tmp = tmp & ((bit0 | bit1) << 63); + tmp = tmp & ((bit59 | bit49) << 5); + MSR = MSR & 0xefffffff00009020 | tmp; +} + +#mtmsrd r0,1 0x7c 01 01 64 +:mtmsrd S,1 is $(NOTVLE) & OP=31 & S & BITS_17_20=0 & MSR_L=1 & BITS_11_15=0 & XOP_1_10=178 & BIT_0=0 +{ + mask:8 = 0x000000000000fffe & S; + MSR = (MSR & ~mask) | (S & mask); +} +@endif +CRM_val: crmval is CRM [crmval = CRM+0;] {export *[const]:1 crmval;} +#mtocrf 10,r0 0x7c 21 01 20 +:mtocrf CRM_val,S is OP=31 & S & BIT_20=1 & CRM_val & CRM0 & CRM1 & CRM2 & CRM3 & CRM4 & CRM5 & CRM6 & CRM7 & BIT_11=0 & XOP_1_10=144 & BIT_0=0 +{ + tmp:$(REGISTER_SIZE) = (S >> 28) & 0xf; + cr0 = (cr0 * (CRM_val != 128)) | (tmp:1 * (CRM_val == 128)); + + tmp = (S >> 24) & 0xf; + cr1 = (cr1 * (CRM_val != 64)) | (tmp:1 * (CRM_val == 64)); + + tmp = (S >> 20) & 0xf; + cr2 = (cr2 * (CRM_val != 32)) | (tmp:1 * (CRM_val == 32)); + + tmp = (S >> 16) & 0xf; + cr3 = (cr3 * (CRM_val != 16)) | (tmp:1 * (CRM_val == 16)); + + tmp = (S >> 12) & 0xf; + cr4 = (cr4 * (CRM_val != 8)) | (tmp:1 * (CRM_val == 8)); + + tmp = (S >> 8) & 0xf; + cr5 = (cr5 * (CRM_val != 4)) | (tmp:1 * (CRM_val == 4)); + + tmp = (S >> 4) & 0xf; + cr6 = (cr6 * (CRM_val != 2)) | (tmp:1 * (CRM_val == 2)); + + tmp = S & 0xf; + cr7 = (cr7 * (CRM_val != 1)) | (tmp:1 * (CRM_val == 1)); +} + +### is this pcode correct on 64-bit bridge? +#mtsr sr0,r0 0x7c 00 01 a4 +:mtsr SR,S is $(NOTVLE) & OP=31 & S & SR & BIT_20=0 & B & BITS_11_15=0 & XOP_1_10=210 & BIT_0=0 +{ +@ifdef BIT_64 + SR = S:4; +@else + SR = S; +@endif +} + +#mtsrd sr0,r0 0x7c 00 0 a4 +:mtsrd SR,S is $(NOTVLE) & OP=31 & S & BIT_20=0 & SR & BITS_11_15=0 & XOP_1_10=82 & BIT_0=0 +{ + SR = S:4; +} + +#mtsrdin r0,r0 0x7c 00 00 e4 +:mtsrdin S,B is $(NOTVLE) & OP=31 & S & BITS_16_20=0 & B & XOP_1_10=114 & BIT_0=0 +{ + local tmp = (B >> 28) & 0xf; + *[register]:4 ($(SEG_REGISTER_BASE)+tmp:4) = S:4; +} + +### is this pcode correct on 64-bit bridge? +#mtsrin r0,r0 0x7c 00 01 e4 +:mtsrin S,B is $(NOTVLE) & OP=31 & S & BITS_16_20=0 & B & XOP_1_10=242 & BIT_0=0 +{ +@ifdef BIT_64 + tmp:4 = (B:4 >> 28); +@else + tmp:$(REGISTER_SIZE) = (B >> 28); +@endif + *[register]:4 ($(SEG_REGISTER_BASE)+tmp) = S; +} + +@ifdef BIT_64 +#mulhd r0,r0 0x7c 00 00 92 +:mulhd D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=73 & Rc=0 +{ + tmp:16 = sext(A) * sext(B); + D = tmp(8); +} +#mulhd. r0,r0 0x7c 00 00 93 +:mulhd. D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=73 & Rc=1 +{ + tmp:16 = sext(A) * sext(B); + D = tmp(8); + cr0flags(D); +} + +#mulhdu r0,r0 0x7c 00 00 12 +:mulhdu D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=9 & Rc=0 +{ + tmp:16 = zext(A) * zext(B); + D = tmp(8); +} +#mulhdu. r0,r0 0x7c 00 00 13 +:mulhdu. D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=9 & Rc=1 +{ + tmp:16 = zext(A) * zext(B); + D = tmp(8); + cr0flags(D); +} + +@endif + +#mulhw r0,r0,r0 0x7c 00 00 96 +:mulhw D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=75 & Rc=0 +{ +@ifdef BIT_64 + tmp:8 = sext(A:4) * sext(B:4); + tmp2:4 = tmp(4); + D = zext(tmp2); +@else + tmp:8 = sext(A) * sext(B); + D = tmp(4); +@endif +} + +#mulhw. r0,r0,r0 0x7c 00 00 97 +:mulhw. D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=75 & Rc=1 +{ +@ifdef BIT_64 + tmp:8 = sext(A:4) * sext(B:4); + tmp2:4 = tmp(4); + D = zext(tmp2); +@else + tmp:8 = sext(A) * sext(B); + D = tmp(4); +@endif + cr0flags(D); +} + +#mulhwu r0,r0,r0 0x7c 00 00 16 +:mulhwu D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=11 & Rc=0 +{ +@ifdef BIT_64 + tmp:8 = zext(A:4) * zext(B:4); + tmp2:4 = tmp(4); + D=zext(tmp2); +@else + tmp:8 = zext(A) * zext(B); + D = tmp(4); +@endif +} +#mulhwu. r0,r0,r0 0x7c 00 00 17 +:mulhwu. D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=11 & Rc=1 +{ +@ifdef BIT_64 + tmp:8 = zext(A:4) * zext(B:4); + tmp2:4 = tmp(4); + D=zext(tmp2); +@else + tmp:8 = zext(A) * zext(B); + D = tmp(4); +@endif + cr0flags(D); +} + +@ifdef BIT_64 +#mulld r0, r0, r0 0x7C 00 01 D2 +:mulld D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=233 & Rc=0 +{ + tmp:16 = sext(A) * sext(B); + D = tmp:8; +} + +#mulld. r0, r0, r0 0x7C 00 01 D3 +:mulld. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=233 & Rc=1 +{ + tmp:16 = sext(A) * sext(B); + D = tmp:8; + cr0flags(D); +} + +#mulldo r0, r0, r0 0x7C 00 05 D2 +:mulldo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=233 & Rc=0 +{ + tmp:16 = sext(A) * sext(B); + D = tmp:8; + mulOverflow128(tmp); +} + +#mulldo. r0, r0, r0 0x7C 00 05 D3 +:mulldo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=233 & Rc=1 +{ + tmp:16 = sext(A) * sext(B); + D = tmp:8; + mulOverflow128(tmp); + cr0flags(D); +} + +@endif + +#mulli r0,r0,r0 0x1C 00 00 00 +:mulli D,A,SIMM is $(NOTVLE) & OP=7 & D & A & SIMM +{ + D = A * SIMM; +} + +#mullw r0,r0,r0 0x7C 00 01 D6 +:mullw D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=235 & Rc=0 +{ +@ifdef BIT_64 + D = sext(A:4) * sext(B:4); +@else + D = A*B; +@endif +} + +#mullw. r0,r0,r0 0x7C 00 01 D7 +:mullw. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=235 & Rc=1 +{ +@ifdef BIT_64 + D = sext(A:4) * sext(B:4); +@else + D = A*B; +@endif + cr0flags(D); +} + +#mullwo r0,r0,r0 0x7C 00 05 D6 +:mullwo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=235 & Rc=0 +{ +@ifdef BIT_64 + D = sext(A:4) * sext(B:4); + mulOverflow64(D); +@else + tmp:8 = sext(A) * sext(B); + mulOverflow64(tmp); + D = tmp:4; +@endif +} + +#mullwo. r0,r0,r0 0x7C 00 05 D7 +:mullwo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=235 & Rc=1 +{ +@ifdef BIT_64 + D = sext(A:4) * sext(B:4); + mulOverflow64(D); +@else + tmp:8 = sext(A) * sext(B); + mulOverflow64(tmp); + D = tmp:4; +@endif + cr0flags(D); +} + +#nand r0,r0,r0 0x7C 00 03 B8 +:nand A,S,B is OP=31 & S & A & B & XOP_1_10=476 & Rc=0 +{ + A = ~(S & B); +} + +#nand. r0,r0,r0 0x7C 00 03 B9 +:nand. A,S,B is OP=31 & S & A & B & XOP_1_10=476 & Rc=1 +{ + A = ~(S & B); + cr0flags( A ); +} + +#neg r0,r0 0x7C 00 00 D0 +:neg D,A is OP=31 & D & A & BITS_11_15=0 & OE=0 & XOP_1_9=104 & Rc=0 +{ + D = -A; +} + +#neg. r0,r0 0x7C 00 00 D1 +:neg. D,A is OP=31 & D & A & BITS_11_15=0 & OE=0 & XOP_1_9=104 & Rc=1 +{ + D = -A; + cr0flags( D ); +} + +#nego r0,r0 0x7C 00 04 D0 +:nego D,A is $(NOTVLE) & OP=31 & D & A & BITS_11_15=0 & OE=1 & XOP_1_9=104 & Rc=0 +{ + subOverflow(A,1); + D = -A; +} + +#nego. r0,r0 0x7C 00 04 D1 +:nego. D,A is OP=31 & D & A & BITS_11_15=0 & OE=1 & XOP_1_9=104 & Rc=1 +{ + subOverflow(A,1); + D = -A; + cr0flags( D ); +} + +#nor r0,r0,r0 0x7C 00 00 F8 +:nor A,S,B is OP=31 & A & S & B & XOP_1_10=124 & Rc=0 +{ + A = ~(S | B); +} + +#nor. r0,r0,r0 0x7C 00 00 F9 +:nor. A,S,B is OP=31 & A & S & B & XOP_1_10=124 & Rc=1 +{ + A = ~(S | B); + cr0flags(A); +} + +#or r0,r0,r0 0x7C 00 03 78 +:or A,S,B is OP=31 & A & S & B & XOP_1_10=444 & Rc=0 +{ + A = (S | B); +} + +#or. r0,r0,r0 0x7C 00 03 79 +:or. A,S,B is OP=31 & A & S & B & XOP_1_10=444 & Rc=1 +{ + A = (S | B); + cr0flags(A); +} + +#orc r0,r0,r0 0x7C 00 03 38 +:orc A,S,B is OP=31 & A & S & B & XOP_1_10=412 & Rc=0 +{ + A = S | ~B; +} + +#orc. r0,r0,r0 0x7C 00 03 39 +:orc. A,S,B is OP=31 & A & S & B & XOP_1_10=412 & Rc=1 +{ + A = S | ~B; + cr0flags(A); +} + +#ori r0,r0,r0 0x60 00 00 00 +:ori A,S,UIMM is $(NOTVLE) & OP=24 & A & S & UIMM +{ + A = S | UIMM; +} + +#oris r0,r0,r0 0x64 00 00 00 +:oris A,S,UIMM is $(NOTVLE) & OP=25 & A & S & UIMM +{ + A = S | (UIMM << 16); +} + +#rfid 0x4c 00 00 24 +:rfid is $(NOTVLE) & OP=19 & BITS_11_25=0 & XOP_1_10=18 & BIT_0=0 +{ + MSR = returnFromInterrupt(MSR, SRR1); + local ra = SRR0; + return[ra]; +} + +@ifdef BIT_64 +#rldcl r0,r0,r0,0 0x78 00 00 10 +:rldcl A,S,B,MB is $(NOTVLE) & OP=30 & S & A & B & MB & XOP_1_4=8 & Rc=0 +{ + shift:$(REGISTER_SIZE) = B & 0x3f; + tmp:$(REGISTER_SIZE)=(S<>(64-shift)); + A = tmp & (0xffffffffffffffff >> MB); +} +#rldcl. r0,r0,r0,0 0x78 00 00 11 +:rldcl. A,S,B,MB is $(NOTVLE) & OP=30 & S & A & B & MB & XOP_1_4=8 & Rc=1 +{ + shift:$(REGISTER_SIZE) = B & 0x3f; + tmp:$(REGISTER_SIZE)=(S<>(64-shift)); + A = tmp & (0xffffffffffffffff >> MB); + cr0flags(A); +} +#rldcr r0,r0,r0,0 0x78 00 00 12 +:rldcr A,S,B,MB is $(NOTVLE) & OP=30 & S & A & B & MB & XOP_1_4=9 & Rc=0 & rotmask_Z +{ + shift:$(REGISTER_SIZE) = B & 0x3f; + tmp:$(REGISTER_SIZE)=(S<>(64-shift)); + A = tmp & rotmask_Z; +} +#rldcr. r0,r0,r0,0 0x78 00 00 13 +:rldcr. A,S,B,MB is $(NOTVLE) & OP=30 & S & A & B & MB & XOP_1_4=9 & Rc=1 & rotmask_Z +{ + shift:$(REGISTER_SIZE) = B & 0x3f; + tmp:$(REGISTER_SIZE)=(S<>(64-shift)); + A = tmp & rotmask_Z; + cr0flags(A); +} + +#rldic r0,r0,r0,0 0x78 00 00 08 +:rldic A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=2 & Rc=0 & rotmask_SH +{ + shift:4 = SH; + tmp:$(REGISTER_SIZE)=(S<>(64-shift)); + A = tmp & rotmask_SH; +} +#rldic. r0,r0,r0,0 0x78 00 00 09 +:rldic. A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=2 & Rc=1 & rotmask_SH +{ + shift:4 = SH; + tmp:$(REGISTER_SIZE)=(S<>(64-shift)); + A = tmp & rotmask_SH; + cr0flags(A); +} + +#rldicl r0,r0,r0,0 0x78 00 00 00 +:rldicl A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=0 & Rc=0 +{ + shift:4 = SH; + tmp:$(REGISTER_SIZE)=(S<>(64-shift)); + A = tmp & (0xffffffffffffffff >> MB); +} +#rldicl. r0,r0,r0,0 0x78 00 00 01 +:rldicl. A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=0 & Rc=1 +{ + shift:4 = SH; + tmp:$(REGISTER_SIZE)=(S<>(64-shift)); + A = tmp & (0xffffffffffffffff >> MB); + cr0flags(A); +} +#rldicr r0,r0,r0,0 0x78 00 00 04 +:rldicr A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=1 & Rc=0 +{ + shift:4 = SH; + tmp:$(REGISTER_SIZE)=(S<>(64-shift)); + A = tmp & (0xffffffffffffffff << (63-MB)); +} +#rldicr. r0,r0,r0,0 0x78 00 00 05 +:rldicr. A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=1 & Rc=1 +{ + shift:4 = SH; + tmp:$(REGISTER_SIZE)=(S<>(64-shift)); + A = tmp & (0xffffffffffffffff << (63-MB)); + cr0flags(A); +} +#rldimi r0,r0,r0,0 0x78 00 00 0c +:rldimi A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=3 & Rc=0 & rotmask_SH +{ + shift:4 = SH; + tmp:$(REGISTER_SIZE)=(S<>(64-shift)); + A = (tmp & rotmask_SH) | (A & ~rotmask_SH); +} +#rldimi. r0,r0,r0,0 0x78 00 00 0d +:rldimi. A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=3 & Rc=1 & rotmask_SH +{ + shift:4 = SH; + tmp:$(REGISTER_SIZE)=(S<>(64-shift)); + A = (tmp & rotmask_SH) | (A & ~rotmask_SH); + cr0flags(A); +} +@endif + + + +#rlwimi r0,r0,0,0,0 0x50 00 00 00 +:rlwimi A,S,SHL,MBL,ME is $(NOTVLE) & OP=20 & S & A & SHL & MBL & ME & Rc=0 & rotmask +{ + shift:1 = SHL; +@ifdef BIT_64 + tmp:$(REGISTER_SIZE) = (S << 32) | (S & 0xffffffff); + tmp2:$(REGISTER_SIZE) = (tmp<>(64-shift)); + A = (tmp2 & rotmask) | (A & ~(rotmask)); +@else + tmp = (S<>(32-shift)); + A = (tmp & rotmask) | (A & ~rotmask); +@endif +} + +#rlwimi. r0,r0,0,0,0 0x50 00 00 01 +:rlwimi. A,S,SHL,MBL,ME is $(NOTVLE) & OP=20 & S & A & SHL & MBL & ME & Rc=1 & rotmask +{ + shift:1 = SHL; +@ifdef BIT_64 + tmp:$(REGISTER_SIZE) = (S << 32) | (S & 0xffffffff); + tmp2:$(REGISTER_SIZE) = (tmp<>(64-shift)); + A = (tmp2 & rotmask) | (A & ~(rotmask)); +@else + tmp = (S<>(32-shift)); + A = (tmp & rotmask) | (A & ~rotmask); +@endif + cr0flags(A); +} + +#rlwinm r0,r0,0,0,0 0x54 00 00 00 +:rlwinm A,S,SHL,MBL,ME is $(NOTVLE) & OP=21 & S & A & SHL & MBL & ME & Rc=0 & rotmask +{ + shift:1 = SHL; +@ifdef BIT_64 + tmp:$(REGISTER_SIZE) = (S << 32) | (S & 0xffffffff); + tmp2:$(REGISTER_SIZE) = (tmp<>(64-shift)); + A = tmp2 & rotmask; +@else + tmp = (S<>(32-shift)); + A = (tmp & rotmask); +@endif +} + +#rlwinm. r0,r0,0,0,0 0x54 00 00 01 +:rlwinm. A,S,SHL,MBL,ME is $(NOTVLE) & OP=21 & S & A & SHL & MBL & ME & Rc=1 & rotmask +{ + shift:1 = SHL; +@ifdef BIT_64 + tmp:$(REGISTER_SIZE) = (S << 32) | (S & 0xffffffff); + tmp2:$(REGISTER_SIZE) = (tmp<>(64-shift)); + A = tmp2 & rotmask; +@else + tmp = (S<>(32-shift)); + A = (tmp & rotmask); +@endif + cr0flags(A); +} + +#rlwnm r0,r0,0,0,0 0x5C 00 00 00 +:rlwnm A,S,B,MBL,ME is $(NOTVLE) & OP=23 & S & A & B & MBL & ME & Rc=0 & rotmask +{ + shift:$(REGISTER_SIZE) = B & 0x1f; +@ifdef BIT_64 + tmp:$(REGISTER_SIZE) = (S << 32) | (S & 0xffffffff); + tmp2:$(REGISTER_SIZE) = (tmp<>(64-shift)); + A = tmp2 & rotmask; +@else + tmp = (S<>(32-shift)); + A = (tmp & rotmask); +@endif +} + +#rlwnm. r0,r0,0,0,0 0x5C 00 00 01 +:rlwnm. A,S,B,MBL,ME is $(NOTVLE) & OP=23 & S & A & B & MBL & ME & Rc=1 & rotmask +{ + shift:$(REGISTER_SIZE) = B & 0x1f; +@ifdef BIT_64 + tmp:$(REGISTER_SIZE) = (S << 32) | (S & 0xffffffff); + tmp2:$(REGISTER_SIZE) = (tmp<>(64-shift)); + A = tmp2 & rotmask; +@else + tmp = (S<>(32-shift)); + A = (tmp & rotmask); +@endif + cr0flags(A); +} + +#sc 0x44 00 00 02 +:sc LEV is $(NOTVLE) & OP=17 & BITS_12_25=0 & LEV & BITS_2_4=0 & BIT_1=1 & BIT_0=0 +{ + syscall(); +} + +#slbia 0x7C 00 03 E4 +:slbia is $(NOTVLE) & OP=31 & BITS_11_25=0 & XOP_1_10=498 & BIT_0=0 +{ + slbInvalidateAll(); +} + +#slbie r0 0x7C 00 03 64 +:slbie B is $(NOTVLE) & OP=31 & BITS_16_20=0 & B & XOP_1_10=434 & BIT_0=0 +{ + slbInvalidateEntry(); +} + +#slbmfee r0,r0 0x7C 00 07 26 +:slbmfee D,B is $(NOTVLE) & OP=31 & D & BITS_16_20=0 & B & XOP_1_10=915 & BIT_0=0 +{ + slbMoveFromEntryESID(); +} + +#slbmfev r0,r0 0x7C 00 06 A6 +:slbmfev D,B is $(NOTVLE) & OP=31 & D & BITS_16_20=0 & B & XOP_1_10=851 & BIT_0=0 +{ + slbMoveFromEntryVSID(); +} + +#slbmte r0,r0 0x7C 00 03 24 +:slbmte S,B is $(NOTVLE) & OP=31 & S & BITS_16_20=0 & B & XOP_1_10=402 & BIT_0=0 +{ + slbMoveToEntry(); +} + +@ifdef BIT_64 +#sld r0,r0,r0 0x7C 00 00 36 +:sld A,S,B is OP=31 & S & A & B & XOP_1_10=27 & Rc=0 +{ + A = S << (B & 0x7f); +} + +#sld. 0x7C 00 00 37 +:sld. A,S,B is OP=31 & S & A & B & XOP_1_10=27 & Rc=1 +{ + A = S << (B & 0x7f); + cr0flags(A); +} +@endif + +#slw r0,r0,r0 0x7C 00 00 30 +:slw A,S,B is OP=31 & S & A & B & XOP_1_10=24 & Rc=0 +{ +@ifdef BIT_64 + shift:4 = B:4 & 0x3f; + tmp:4 = S:4 << shift; + A = zext(tmp); +@else + shift = B & 0x3f; + A = S << shift; +@endif +} + + +#slw. r0,r0,r0 0x7C 00 00 31 +:slw. A,S,B is OP=31 & S & A & B & XOP_1_10=24 & Rc=1 +{ +@ifdef BIT_64 + shift:4 = B:4 & 0x3f; + tmp:4 = S:4 << shift; + A = zext(tmp); +@else + shift = B & 0x3f; + A = S << shift; +@endif + cr0flags(A); +} + +@ifdef BIT_64 +#srad r0,r0,r0 0x7C 00 06 34 +:srad A,S,B is OP=31 & A & S & B & XOP_1_10=794 & Rc=0 +{ + tmp:$(REGISTER_SIZE) = B & 0x7f; + shiftCarry(S,tmp); + A = S s>> tmp; +} + +#srad. r0,r0,r0 0x7C 00 06 35 +:srad. A,S,B is OP=31 & A & S & B & XOP_1_10=794 & Rc=1 +{ + tmp:$(REGISTER_SIZE) = B & 0x7f; + shiftCarry(S,tmp); + A = S s>> tmp; + cr0flags(A); +} + +#sradi r0,r0,r0 0x7C 00 06 74 +:sradi A,S,SH is OP=31 & A & S & SH & XOP_2_10=413 & Rc=0 +{ + shiftCarry(S,SH); + A = S s>> SH; +} + +#sradi. r0,r0,r0 0x7C 00 06 75 +:sradi. A,S,SH is OP=31 & A & S & SH & XOP_2_10=413 & Rc=1 +{ + shiftCarry(S,SH); + A = S s>> SH; +} + +@endif + + +#sraw r0,r0,r0 0x7C 00 06 30 +:sraw A,S,B is OP=31 & A & S & B & XOP_1_10=792 & Rc=0 +{ +@ifdef BIT_64 + shift:4 = B:4 & 0x3f; + shiftCarry(S:4,shift); + tmp2:4 = S:4 s>> shift; + A = sext(tmp2); +@else + shift = B & 0x3f; + shiftCarry(S,shift); + A = S s>> shift; +@endif +} +#sraw. r0,r0,r0 0x7C 00 06 31 +:sraw. A,S,B is OP=31 & A & S & B & XOP_1_10=792 & Rc=1 +{ +@ifdef BIT_64 + shift:4 = B:4 & 0x3f; + shiftCarry(S:4,shift); + tmp2:4 = S:4 s>> shift; + A = sext(tmp2); +@else + shift = B & 0x3f; + shiftCarry(S,shift); + A = S s>> shift; +@endif + cr0flags(A); +} + +#srawi r0,r0,r0 0x7C 00 06 70 +:srawi A,S,SHL is OP=31 & A & S & SHL & XOP_1_10=824 & Rc=0 +{ +@ifdef BIT_64 + shift:4 = SHL; + shiftCarry(S:4,shift); + tmp2:4 = S:4 s>> shift; + A = sext(tmp2); +@else + shiftCarry(S,SHL); + A = S s>> SHL; +@endif +} +#srawi. r0,r0,r0 0x7C 00 06 71 +:srawi. A,S,SHL is OP=31 & A & S & SHL & XOP_1_10=824 & Rc=1 +{ +@ifdef BIT_64 + shift:4 = SHL; + shiftCarry(S:4,shift); + tmp2:4 = S:4 s>> shift; + A = sext(tmp2); +@else + shiftCarry(S,SHL); + A = S s>> SHL; +@endif + cr0flags(A); +} + +@ifdef BIT_64 +#srd r0,r0,r0 0x7C 00 04 36 +:srd A,S,B is OP=31 & S & A & B & XOP_1_10=539 & Rc=0 +{ + A = S >> (B & 0x7f); +} + +#srd. 0x7C 00 04 37 +:srd. A,S,B is OP=31 & S & A & B & XOP_1_10=539 & Rc=1 +{ + A = S >> (B & 0x7f); + cr0flags(A); +} +@endif + +#srw r0,r0,r0 0x7C 00 04 30 +:srw A,S,B is OP=31 & S & A & B & XOP_1_10=536 & Rc=0 +{ +@ifdef BIT_64 + shift:4 = B:4 & 0x3f; + tmp:4 = S:4 >> shift; + A = zext(tmp); +@else + shift = B & 0x3f; + A = S >> shift; +@endif +} + + +#srw. r0,r0,r0 0x7C 00 04 31 +:srw. A,S,B is OP=31 & S & A & B & XOP_1_10=536 & Rc=1 +{ +@ifdef BIT_64 + shift:4 = B:4 & 0x3f; + tmp:4 = S:4 >> shift; + A = zext(tmp); +@else + shift = B & 0x3f; + A = S >> shift; +@endif + cr0flags(A); +} + + + +#stb r0,3(0) 0x98 00 00 00 +#stb r0,3(r2) 0x98 02 00 00 +:stb S,dPlusRaOrZeroAddress is $(NOTVLE) & OP=38 & S & dPlusRaOrZeroAddress +{ + *:1(dPlusRaOrZeroAddress) = S:1; +} + +#stbu r0,3(0) 0x9c 00 00 00 +#stbu r0,3(r2) 0x9c 02 00 00 +:stbu S,dPlusRaAddress is $(NOTVLE) & OP=39 & S & dPlusRaAddress & A +{ + *:1(dPlusRaAddress) = S:1; + A = dPlusRaAddress; +} + +#stbux r0,r2,r0 0x7c 00 01 ee ### WARNING the B in this definition is different from manual - I think the manual is wrong +:stbux S,A,B is OP=31 & S & A & B & XOP_1_10=247 & BIT_0=0 +{ + tmp:$(REGISTER_SIZE) = A+B; # S may be same register as A + *tmp = S:1; # So do store before updating A + A = tmp; +} + +#stbx r0,r2,r0 0x7c 00 01 ae ### WARNING the B in this definition is different from manual - I think the manual is wrong +:stbx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=215 & BIT_0=0 +{ + *(RA_OR_ZERO+B) = S:1; +} + +@ifdef BIT_64 +#std r0,8(0) 0xf8 00 00 08 +#std r0,8(r2) 0xf8 02 00 08 +:std S,dsPlusRaOrZeroAddress is $(NOTVLE) & OP=62 & S & dsPlusRaOrZeroAddress & BITS_0_1=0 +{ + *:8(dsPlusRaOrZeroAddress) = S; +} + +#Special case when saving r2 to stack prior to function call (for inline call stub case) +#std r2,0x28(r1) +:std r2,dsPlusRaOrZeroAddress is $(NOTVLE) & OP=62 & S=2 & r2 & A=1 & SIMM_DS=0xa & dsPlusRaOrZeroAddress & BITS_0_1=0 +{ + r2Save = r2; + *:8(dsPlusRaOrZeroAddress) = r2; +} + +#stdcx. r0,8(0) 0x7c 00 01 AD +:stdcx. S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=214 & BIT_0=1 +{ + EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; + if (RESERVE == 0) goto inst_next; + *[ram]:8 EA = storeDoubleWordConditionalIndexed(S,RA_OR_ZERO,B); + # set when a stwcx. or stdcx. successfully completes + cr0flags(0:$(REGISTER_SIZE)); +} + +#stdu r0,8(0) 0xf8 00 00 01 +#stdu r0,8(r2) 0xf8 02 00 01 +:stdu S,dsPlusRaAddress is $(NOTVLE) & OP=62 & S & A & dsPlusRaAddress & BITS_0_1=1 +{ + *:8(dsPlusRaAddress) = S; + A = dsPlusRaAddress; +} + +#stdux r0,r2,r0 0x7c 00 01 6a +:stdux S,A,B is OP=31 & S & A & B & XOP_1_10=181 & BIT_0=0 +{ + local ea:$(REGISTER_SIZE) = A+B; + *:8(ea) = S; + A = ea; +} + +#stdx r0,r2,r0 0x7c 00 01 2a +:stdx S,RA_OR_ZERO,B is OP=31 & S & B & RA_OR_ZERO & XOP_1_10=149 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; + *:8(ea) = S; +} + +@endif + +#stfd fr0,8(0) 0xD8 00 00 08 +#stfd fr0,8(r2) 0xD8 02 00 08 +:stfd fS,dPlusRaOrZeroAddress is $(NOTVLE) & OP=54 & fS & dPlusRaOrZeroAddress +{ + *:8(dPlusRaOrZeroAddress) = fS; +} + +#stfdu fr0,8(0) 0xDC 00 00 08 +#stfdu fr0,8(r2) 0xDC 02 00 08 +:stfdu fS,dPlusRaAddress is $(NOTVLE) & OP=55 & fS & dPlusRaAddress & A +{ + ea:$(REGISTER_SIZE) = dPlusRaAddress; + *:8(ea) = fS; + A = ea; +} + +#stfdux fr0,r2,r0 0x7C 00 05 EE +:stfdux fS,A,B is $(NOTVLE) & OP=31 & fS & A & B & XOP_1_10=759 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = A+B; + *:8(ea) = fS; + A = ea; +} + +#stfdx fr0,r0,r0 0x7C 00 05 AE +:stfdx fS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fS & B & RA_OR_ZERO & XOP_1_10=727 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; + *:8(ea) = fS; +} + +#stfiwx fr0,r0,r0 0x7C 00 07 AE +:stfiwx fS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fS & B & RA_OR_ZERO & XOP_1_10=983 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; + *:4(ea) = fS:4; +} + +#stfs fr0,8(0) 0xD0 00 00 08 +#stfs fr0,8(r2) 0xD0 02 00 08 +:stfs fS,dPlusRaOrZeroAddress is $(NOTVLE) & OP=52 & fS & dPlusRaOrZeroAddress +{ + tmp:4 = float2float(fS); + *:4(dPlusRaOrZeroAddress) = tmp; +} + +#stfsu fr0,8(0) 0xD4 00 00 08 +#stfsu fr0,8(r2) 0xD4 02 00 08 +:stfsu fS,dPlusRaAddress is $(NOTVLE) & OP=53 & fS & dPlusRaAddress & A +{ + tmp:4 = float2float(fS); + *:4(dPlusRaAddress) = tmp; + A = dPlusRaAddress; +} + +#stfsux fr0,r0,r0 0x7C 00 05 6E +:stfsux fS,A,B is $(NOTVLE) & OP=31 & fS & B & A & XOP_1_10=695 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = A + B; + tmp:4 = float2float(fS); + *:4(ea) = tmp; + A = ea; +} + +#stfsx fr0,r0,r0 0x7C 00 05 2E +:stfsx fS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fS & B & RA_OR_ZERO & XOP_1_10=663 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + tmp:4 = float2float(fS); + *:4(ea) = tmp; +} + +#sth r0,r0 0xB0 00 00 00 +:sth S,dPlusRaOrZeroAddress is $(NOTVLE) & OP=44 & S & dPlusRaOrZeroAddress +{ + *:2(dPlusRaOrZeroAddress) = S:2; +} + +#sthbrx r0,r0,r0 0x7C 00 07 2C +:sthbrx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=918 & BIT_0=0 +{ + tmp:2 = zext(S:1) <<8; + tmp2:2 = S:2 >>8; + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *:2(ea) = tmp2 | tmp; +} + +#sthu r0,r0 0xB4 00 00 00 +:sthu S,dPlusRaAddress is $(NOTVLE) & OP=45 & S & A & dPlusRaAddress +{ + *:2(dPlusRaAddress) = S:2; + A = dPlusRaAddress; +} + +#sthux r0,r0,r0 0x7C 00 03 6E +:sthux S,A,B is OP=31 & S & A & B & XOP_1_10=439 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = A + B; + *:2(ea) = S:2; + A = ea; +} + +#sthx r0,r0,r0 0x7C 00 03 2E +:sthx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=407 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *:2(ea) = S:2; +} + +#### +#stm instruction +@include "stmwInstructions.sinc" + +@include "stswiInstructions.sinc" +#stswi r0,r0,0 0x7c 00 05 aa +#:stswi S,A,NB is $(NOTVLE) & OP=31 & S & A & NB & XOP_1_10=725 & BIT_0=0 +#{ +# tmp:1 = NB; +# storeString(S,A,tmp); +#} + +#stswx r0,r0,0 0x7c 00 05 2a +define pcodeop stswxOp; +:stswx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=661 & BIT_0=0 +{ + EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *[ram]:1 EA = stswxOp(S,RA_OR_ZERO,B); +} + +#stw r0,r0,0 0x90 00 00 00 +:stw S,dPlusRaOrZeroAddress is $(NOTVLE) & OP=36 & S & dPlusRaOrZeroAddress +{ +@ifdef BIT_64 + *:4(dPlusRaOrZeroAddress) = S:4; +@else + *:4(dPlusRaOrZeroAddress) = S; +@endif +} + +#stwbrx r0,r0,0 0x7c 00 05 2c +:stwbrx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=662 & BIT_0=0 +{ +@ifdef BIT_64 + value:4 = S:4; +@else + value:$(REGISTER_SIZE) = S; +@endif + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + tmp1:4 = value << 24; + tmp2:4 = (value << 8) & 0xff0000; + tmp3:4 = (value >> 8) & 0x00ff00; + tmp4:4 = value >> 24; + *:4(ea) = tmp1 | tmp2 | tmp3 | tmp4; +} + +#stwcx. r0,8(0) 0x7c 00 01 2D +:stwcx. S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=150 & BIT_0=1 +{ + EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; + if (RESERVE == 0) goto inst_next; + *[ram]:4 EA = storeWordConditionalIndexed(S,RA_OR_ZERO,B); + # set when a stwcx. or stdcx. successfully completes + cr0flags(0:$(REGISTER_SIZE)); +} + +#stwu r0,r0 0x94 00 00 00 +:stwu S,dPlusRaAddress is $(NOTVLE) & OP=37 & S & A & dPlusRaAddress +{ +@ifdef BIT_64 + *:4(dPlusRaAddress) = S:4; +@else + *:4(dPlusRaAddress) = S; +@endif + A = dPlusRaAddress; +} + +#stwux r0,r0,r0 0x7C 00 01 6E +:stwux S,A,B is OP=31 & S & A & B & XOP_1_10=183 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = A + B; +@ifdef BIT_64 + *:4(ea) = S:4; +@else + *:4(ea) = S; +@endif + A = ea; +} + +#stwx r0,r0,r0 0x7C 00 01 2E +:stwx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=151 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; +@ifdef BIT_64 + *:4(ea) = S:4; +@else + *:4(ea) = S; +@endif +} + + +#subf r0,r0,r0 0x7c 00 00 50 +:subf D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=40 & Rc=0 +{ + D = B - A; +} + +#subf. r0,r0,r0 0x7c 00 00 51 +:subf. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=40 & Rc=1 +{ + D = B - A; + cr0flags(D); +} + +#subfo r1,r2,r3 0x7c 00 04 50 +:subfo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=40 & Rc=0 +{ + subOverflow(B,A); + D = B - A; +} + +#subfo. r1,r2,r3 0x7c 00 04 51 +:subfo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=40 & Rc=1 +{ + subOverflow(B,A); + D = B - A; + cr0flags(D); +} + +#subfc r0,r0,r0 0x7c 00 00 10 +:subfc D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=8 & Rc=0 +{ + xer_ca = (A <= B); + D = B - A; +} + +#subfc. r0,r0,r0 0x7c 00 00 11 +:subfc. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=8 & Rc=1 +{ + xer_ca = (A <= B); + D = B - A; + cr0flags(D); +} + +#subfco r0,r0,r0 0x7c 00 04 10 +:subfco D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=8 & Rc=0 +{ + xer_ca = (A <= B); + subOverflow(B,A); + D = B - A; +} + +#subfco. r0,r0,r0 0x7c 00 04 11 +:subfco. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=8 & Rc=1 +{ + xer_ca = (A <= B); + subOverflow( B, A ); + D = B - A; + cr0flags(D); +} + +#subfe r0,r0,r0 0x7c 00 01 10 +:subfe D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=136 & Rc=0 +{ + tmp:$(REGISTER_SIZE) = A + zext(!xer_ca); + subExtendedCarry(B,A); + D = B - tmp; +} + +#subfe. r0,r0,r0 0x7c 00 01 11 +:subfe. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=136 & Rc=1 +{ + tmp:$(REGISTER_SIZE) = A + zext(!xer_ca); + subExtendedCarry(B,A); + D = B - tmp; + cr0flags(D); +} + +#subfeo r0,r0,r0 0x7c 00 05 10 +:subfeo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=136 & Rc=0 +{ + tmp:$(REGISTER_SIZE) = zext(!xer_ca)+A; + subExtendedOverflow(B,A); + subExtendedCarry(B,A); + D = B - tmp; +} + +#subfeo. r0,r0,r0 0x7c 00 05 11 +:subfeo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=136 & Rc=1 +{ + tmp:$(REGISTER_SIZE) = zext(!xer_ca)+A; + subExtendedOverflow(B,A); + subExtendedCarry(B,A); + D = B - tmp; + cr0flags(D); +} + +#subfic r0,r0,2 0x20 00 00 02 +:subfic D,A,SIMM is $(NOTVLE) & OP=8 & D & A & SIMM +{ + xer_ca = !(SIMM + b = (63 - (i*8+7)); + tmp = (S >> (63 - (i*8+7))); b = tmp & 1; # GetBit + s = s ^ b; + i = i + 1; + if (i < 8) goto ; + A = s; +} + +# PowerISA II: 3.3.12 Fixed-Point Logical Instructions +# CMT: Compare Bytes +# FORM: X-form +# binutils: 476.d: dc: 7c 83 2b f8 cmpb r3,r4,r5 +# binutils: 476.d: e0: 7c 83 2b f8 cmpb r3,r4,r5 +# binutils: a2.d: 104: 7d 6a 63 f8 cmpb r10,r11,r12 +# binutils: power6.d: 20: 7c 83 2b f8 cmpb r3,r4,r5 +# binutils: power7.d: 90: 7c 83 2b f8 cmpb r3,r4,r5 +# name cmpb code 7c0003f8 mask ff0700fc00000000 flags @POWER6 @476 @A2 operands 31 3b 38 0 0 0 0 0 +:cmpb S,A,B is $(NOTVLE) & OP=31 & S & A & B & XOP_1_10=508 & BIT_0=0 { # PCODE-YES + tmpS:8 = 0; + tmpB:8 = 0; + val:8 = 0; + zero:8 = 0; + ones:8 = 0xff; + + # Unrolled the loop + tmpS = (S >> 56) & 0xFF; # get next S byte + tmpB = (B >> 56) & 0xFF; # get next B byte + val = (zext(tmpS == tmpB) * ones) + (zext(tmpS != tmpB) * zero); + tmpS = (S >> 48) & 0xFF; # get next S byte + tmpB = (B >> 48) & 0xFF; # get next B byte + val = val << 8 | (zext(tmpS == tmpB) * ones) + (zext(tmpS != tmpB) * zero); + tmpS = (S >> 40) & 0xFF; # get next S byte + tmpB = (B >> 40) & 0xFF; # get next B byte + val = val << 8 | (zext(tmpS == tmpB) * ones) + (zext(tmpS != tmpB) * zero); + tmpS = (S >> 32) & 0xFF; # get next S byte + tmpB = (B >> 32) & 0xFF; # get next B byte + val = val << 8 | (zext(tmpS == tmpB) * ones) + (zext(tmpS != tmpB) * zero); + tmpS = (S >> 24) & 0xFF; # get next S byte + tmpB = (B >> 24) & 0xFF; # get next B byte + val = val << 8 | (zext(tmpS == tmpB) * ones) + (zext(tmpS != tmpB) * zero); + tmpS = (S >> 16) & 0xFF; # get next S byte + tmpB = (B >> 16) & 0xFF; # get next B byte + val = val << 8 | (zext(tmpS == tmpB) * ones) + (zext(tmpS != tmpB) * zero); + tmpS = (S >> 8) & 0xFF; # get next S byte + tmpB = (B >> 8) & 0xFF; # get next B byte + val = val << 8 | (zext(tmpS == tmpB) * ones) + (zext(tmpS != tmpB) * zero); + tmpS = S & 0xFF; # get next S byte + tmpB = B & 0xFF; # get next B byte + val = val << 8 | (zext(tmpS == tmpB) * ones) + (zext(tmpS != tmpB) * zero); + A = val; +} +# PowerISA II: 3.3.12 Fixed-Point Logical Instructions +# CMT: Bit Permute Doubleword [Category: Embedded.Phased-in, Server] +# FORM: X-form +# binutils: a2.d: fc: 7d 6a 61 f8 bpermd r10,r11,r12 +# binutils: power7.d: d8: 7e 27 d9 f8 bpermd r7,r17,r27 +# name bpermd code 7c0001f8 mask ff0700fc00000000 flags @POWER7 @A2 operands 31 3b 38 0 0 0 0 0 +define pcodeop BitPermuteDoubleword; +:bpermd A,S,B is $(NOTVLE) & OP=31 & S & A & B & XOP_1_10=252 & BIT_0=0 { + BitPermuteDoubleword(A,S,B); +} + +# PowerISA II: 3.3.12 Fixed-Point Logical Instructions +# CMT: Population Count Words [Category: Server] [Category: Embedded.Phased-In] +# FORM: X-form +# binutils: a2.d: 64c: 7d 6a 02 f4 popcntw r10,r11 +# binutils: power7.d: dc: 7e 8a 02 f4 popcntw r10,r20 +# name popcntw code 7c0002f4 mask ffff00fc00000000 flags @POWER7 @A2 operands 31 3b 0 0 0 0 0 0 +:popcntw A,S is $(NOTVLE) & OP=31 & S & A & XOP_1_10=378 & Rc & BITS_11_15=0 { + local tmp1:4 = S(0); + tmp1 = popcount(tmp1); + local tmp2:4 = S(4); + tmp2 = popcount(tmp2); + A = (zext(tmp2) << 32) + zext(tmp1); +} + +# PowerISA II: 3.3.12 Fixed-Point Logical Instructions +# CMT: Population Count Bytes +# FORM: X-form +# binutils: 476.d: 618: 7c 83 00 f4 popcntb r3,r4 +# binutils: a2.d: 644: 7d 6a 00 f4 popcntb r10,r11 +# name popcntb code 7c0000f4 mask ffff00fc00000000 flags @POWER5 operands 31 3b 0 0 0 0 0 0 +:popcntb A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=122 & BIT_0=0 { + local i:8 = 0; + local tmp:8 = 0; + local tmpb:1 = 0; + local mask:8 = 0xff; + + tmp = (S >> (i*8)); + tmpb = tmp(0); + tmpb = popcount(tmpb); + A = (A & ~(mask)) + (zext(tmpb) << (i*8)); + mask = mask << 8; + i = i + 1; + if (i < 8) goto ; +} + + +# PowerISA II: 3.3.12 Fixed-Point Logical Instructions +# CMT: Parity Word +# FORM: X-form +# binutils: 476.d: 61c: 7c 83 01 34 prtyw r3,r4 +# binutils: a2.d: 654: 7d 6a 01 34 prtyw r10,r11 +# binutils: power6.d: 10: 7c 83 01 34 prtyw r3,r4 +# binutils: power7.d: 80: 7c 83 01 34 prtyw r3,r4 +# name prtyw code 7c000134 mask ffff00fc00000000 flags @POWER6 @476 @A2 operands 31 3b 0 0 0 0 0 0 +:prtyw A,S is $(NOTVLE) & OP=31 & S & A & BITS_11_15=0 & XOP_1_10=154 & BIT_0=0 { + local temp:8 = S; + A[0,32] = zext(((popcount(temp & 0x01010101:8)) & 1:8) == 1:8); + A[32,32] = zext(((popcount(temp & 0x0101010100000000:8)) & 1:8) == 1:8); +} + +# ======================================================================= + +# PowerISA II: 4.4.1 Fixed-Point Load and Store Caching Inhibited Instructions +# CMT: Load Word and Zero Caching Inhibited Indexed +# binutils: power6.d: 2c: 7d 4b 66 2a lwzcix r10,r11,r12 +# binutils: power7.d: 94: 7d 4b 66 2a lwzcix r10,r11,r12 +# name lwzcix code 7c00062a mask ff0700fc00000000 flags @POWER6 operands 3b 32 38 0 0 0 0 0 +define pcodeop LoadWordAndZeroCachingInhibited; +:lwzcix TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & XOP_1_10=789 & BIT_0=0 { # PCODE-YES + tmp:8 = *(A + B); + tmp = tmp << 32; + TH = tmp; +} + +# ======================================================================= + +# PowerISA II: 3.3.14 Binary Coded Decimal (BCD) Assist Instructions [Category: Embedded.Phased-in, Server] +# CMT: Convert Declets To Binary Coded Decimal +# FORM: X-form +# binutils: power6.d: f0: 7d 6a 02 34 cdtbcd r10,r11 +# name cdtbcd code 7c000234 mask ffff00fc00000000 flags @POWER6 operands 31 3b 0 0 0 0 0 0 +define pcodeop ConvertDecletsToBinaryCodedDecimal; +:cdtbcd A,S is $(NOTVLE) & OP=31 & S & A & XOP_1_10=282 & BITS_11_15=0 & BIT_0=0 { ConvertDecletsToBinaryCodedDecimal(S,A); } + +# PowerISA II: 3.3.14 Binary Coded Decimal (BCD) Assist Instructions [Category: Embedded.Phased-in, Server] +# CMT: Add and Generate Sixes +# FORM: XO-form +# binutils: power6.d: f4: 7d 4b 60 94 addg6s r10,r11,r12 +# name addg6s code 7c000094 mask ff0700fc00000000 flags @POWER6 operands 3b 31 38 0 0 0 0 0 +define pcodeop AddAndGenerateSixes; +:addg6s TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & XOP_1_9=74 & BIT_10=0 & BIT_0=0 { # PCODE-YES + AddAndGenerateSixes(TH,A,B); +} + +# ========================================================================== + +# PowerISA II: 3.3.8 Fixed-Point Arithmetic Instructions +# CMT: Divide Word Extended [Category: Server] [Category: Embedded.Phased-In] +# FORM: XO-form +# binutils: power7.d: b8: 7d 4b 63 56 divwe r10,r11,r12 +# name divwe code 7c000356 mask ff0700fc00000000 flags @POWER7 @A2 operands 3b 31 38 0 0 0 0 0 +:divwe TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & OE=0 & XOP_1_9=427 & Rc=0 { + tmp:8 = 0; + + # A high 4 bytes to a + tmp = tmp >> 32; + a:4 = tmp:4; + + # B high 4 bytes to b + tmp = tmp >> 32; + b:4 = tmp:4; + + # C + c:4 = (a s/ b); + + # C low 4 bytes to TH high 4 bytes + tmp = zext(c); + tmp = tmp << 32; + TH = tmp; +} + + +# PowerISA II: 3.3.8 Fixed-Point Arithmetic Instructions +# CMT: Divide Word Extended [Category: Server] [Category: Embedded.Phased-In] +# FORM: XO-form +# binutils: power7.d: bc: 7d 6c 6b 57 divwe. r11,r12,r13 +# name divwe. code 7c000357 mask ff0700fc00000000 flags @POWER7 @A2 operands 3b 31 38 0 0 0 0 0 +:divwe. TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & OE=0 & XOP_1_9=427 & Rc=1 { + tmp:8 = 0; + + # A high 4 bytes to a + tmp = tmp >> 32; + a:4 = tmp:4; + + # B high 4 bytes to b + tmp = tmp >> 32; + b:4 = tmp:4; + + # C + c:4 = (a s/ b); + + # C low 4 bytes to TH high 4 bytes + tmp = zext(c); + tmp = tmp << 32; + TH = tmp; + + cr0flags(TH); +} + +# PowerISA II: 3.3.8 Fixed-Point Arithmetic Instructions +# CMT: Divide Word Extended [Category: Server] [Category: Embedded.Phased-In] +# FORM: XO-form +# binutils: power7.d: c0: 7d 8d 77 56 divweo r12,r13,r14 +# name divweo code 7c000756 mask ff0700fc00000000 flags @POWER7 @A2 operands 3b 31 38 0 0 0 0 0 +:divweo TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & OE=1 & XOP_1_9=427 & Rc=0 { + tmp:8 = 0; + + # A high 4 bytes to a + tmp = tmp >> 32; + a:4 = tmp:4; + + # B high 4 bytes to b + tmp = tmp >> 32; + b:4 = tmp:4; + + # C + c:4 = (a s/ b); + + # C low 4 bytes to TH high 4 bytes + tmp = zext(c); + tmp = tmp << 32; + divOverflow(A,B); + TH = tmp; +} + +# PowerISA II: 3.3.8 Fixed-Point Arithmetic Instructions +# CMT: Divide Word Extended [Category: Server] [Category: Embedded.Phased-In] +# FORM: XO-form +# binutils: power7.d: c4: 7d ae 7f 57 divweo. r13,r14,r15 +# name divweo. code 7c000757 mask ff0700fc00000000 flags @POWER7 @A2 operands 3b 31 38 0 0 0 0 0 +define pcodeop DivideWordExtended4; +:divweo. TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & OE=1 & XOP_1_9=427 & Rc=1 { + tmp:8 = 0; + + # A high 4 bytes to a + tmp = tmp >> 32; + a:4 = tmp:4; + + # B high 4 bytes to b + tmp = tmp >> 32; + b:4 = tmp:4; + + # C + c:4 = (a s/ b); + + # C low 4 bytes to TH high 4 bytes + tmp = zext(c); + tmp = tmp << 32; + divOverflow(A,B); + TH = tmp; + + cr0flags(TH); +} + +# PowerISA II: 3.3.8 Fixed-Point Arithmetic Instructions +# CMT: Divide Word Extended [Category: Server] [Category: Embedded.Phased-In] +# FORM: XO-form +# binutils: power7.d: c8: 7d 4b 63 16 divweu r10,r11,r12 +# name divweu code 7c000316 mask ff0700fc00000000 flags @POWER7 @A2 operands 3b 31 38 0 0 0 0 0 +:divweu TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & OE=0 & Rc=0 & XOP_1_9=395 { + tmp:8 = 0; + + # A high 4 bytes to a + tmp = tmp >> 32; + a:4 = tmp:4; + + # B high 4 bytes to b + tmp = tmp >> 32; + b:4 = tmp:4; + + # C + c:4 = (a / b); + + # C low 4 bytes to TH high 4 bytes + tmp = zext(c); + tmp = tmp << 32; + TH = tmp; +} + +# PowerISA II: 3.3.8 Fixed-Point Arithmetic Instructions +# CMT: Divide Word Extended [Category: Server] [Category: Embedded.Phased-In] +# FORM: XO-form +# binutils: power7.d: cc: 7d 6c 6b 17 divweu. r11,r12,r13 +# name divweu. code 7c000317 mask ff0700fc00000000 flags @POWER7 @A2 operands 3b 31 38 0 0 0 0 0 +:divweu. TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & OE=0 & Rc=1 & XOP_1_9=395 { + tmp:8 = 0; + + # A high 4 bytes to a + tmp = tmp >> 32; + a:4 = tmp:4; + + # B high 4 bytes to b + tmp = tmp >> 32; + b:4 = tmp:4; + + # C + c:4 = (a / b); + + # C low 4 bytes to TH high 4 bytes + tmp = zext(c); + tmp = tmp << 32; + TH = tmp; + + cr0flags(TH); +} + +# PowerISA II: 3.3.8 Fixed-Point Arithmetic Instructions +# CMT: Divide Word Extended Unsigned [Category: Server] [Category: Embedded.Phased-In] +# FORM: XO-form +# binutils: power7.d: d0: 7d 8d 77 16 divweuo r12,r13,r14 +# name divweuo code 7c000716 mask ff0700fc00000000 flags @POWER7 @A2 operands 3b 31 38 0 0 0 0 0 +:divweuo TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & OE=1 & Rc=0 & XOP_1_9=395 { + tmp:8 = 0; + + # A high 4 bytes to a + tmp = tmp >> 32; + a:4 = tmp:4; + + # B high 4 bytes to b + tmp = tmp >> 32; + b:4 = tmp:4; + + # C + c:4 = (a / b); + + # C low 4 bytes to TH high 4 bytes + tmp = zext(c); + tmp = tmp << 32; + divOverflow(A,B); + TH = tmp; +} + +# PowerISA II: 3.3.8 Fixed-Point Arithmetic Instructions +# CMT: Divide Word Extended Unsigned [Category: Server] [Category: Embedded.Phased-In] +# FORM: XO-form +# binutils: power7.d: d4: 7d ae 7f 17 divweuo. r13,r14,r15 +# name divweuo. code 7c000717 mask ff0700fc00000000 flags @POWER7 @A2 operands 3b 31 38 0 0 0 0 0 +:divweuo. TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & OE=1 & Rc=1 & XOP_1_9=395 { + tmp:8 = 0; + + # A high 4 bytes to a + tmp = tmp >> 32; + a:4 = tmp:4; + + # B high 4 bytes to b + tmp = tmp >> 32; + b:4 = tmp:4; + + # C + c:4 = (a / b); + + # C low 4 bytes to TH high 4 bytes + tmp = zext(c); + tmp = tmp << 32; + divOverflow(A,B); + TH = tmp; + + cr0flags(TH); +} + +# ======================================================================= + +# PowerISA II: 3.3.12.1 64-bit Fixed-Point Logical Instructions [Category: 64-Bit] +# CMT: Population Count Doubleword [Category: Server.64-bit] [Category: Embedded.64-bit.Phased-In] +# FORM: X-form +# binutils: a2.d: 648: 7d 6a 03 f4 popcntd r10,r11 +# binutils: power7.d: e0: 7e 8a 03 f4 popcntd r10,r20 +# name popcntd code 7c0003f4 mask ffff00fc00000000 flags @POWER7 @A2 operands 31 3b 0 0 0 0 0 0 +:popcntd A,S is $(NOTVLE) & OP=31 & S & A & XOP_1_10=506 & Rc & BITS_11_15=0 { + A = popcount(S); +} + +# ======================================================================= + +# PowerISA II: 3.3.4.1 64-Bit Load and Store with Byte Reversal Instructions [Category: 64-bit] +# CMT: Load Doubleword Byte-Reverse Indexed +# FORM: X-form +# Category: 64 +# binutils: a2.d: 418: 7d 4b 64 28 ldbrx r10,r11,r12 +# binutils: cell.d: 40: 7c 00 0c 28 ldbrx r0,0,r1 +# binutils: cell.d: 44: 7c 01 14 28 ldbrx r0,r1,r2 +# binutils: power7.d: e4: 7e 95 b4 28 ldbrx r20,r21,r22 +# name ldbrx code 7c000428 mask ff0700fc00000000 flags @POWER7 @CELL @A2 operands 3b 32 38 0 0 0 0 0 +define pcodeop LoadDoublewordByteReverseIndexed; +:ldbrx D,A,B is $(NOTVLE) & OP=31 & D & A & B & XOP_1_10=532 & Rc { D = LoadDoublewordByteReverseIndexed(D,A,B); } + +# ====================================================================== + +# PowerISA II: 4.4.2 Load and Reserve and Store Conditional Instructions +# CMT: Store Byte Conditional Indexed +# FORM: X-form +# binutils: power7.d: 164: 7d 4b 65 6d stbcx. r10,r11,r12 +# name stbcx. code 7c00056d mask ff0700fc00000000 flags @POWER7 operands 3b 32 38 0 0 0 0 0 +define pcodeop StoreByteConditionalIndexed; +:stbcx. S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=694 & Rc=1 { + EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *[ram]:1 EA = StoreByteConditionalIndexed(S,RA_OR_ZERO,B); + setCrBit(cr0, 2, 1); +} + +# ====================================================================== + +# PowerISA II: 5.4.1 Move To/From System Register Instructions +# CMT: Move From Device Control Register Indexed [Category: Embedded.Device Control] +# FORM: X-form +# binutils: 476.d: 49c: 7c 85 02 06 mfdcrx r4,r5 +# binutils: a2.d: 520: 7d 4b 02 06 mfdcrx r10,r11 +# binutils: booke.d: 28: 7c 85 02 06 mfdcrx r4,r5 +# binutils: booke_xcoff.d: 24: 7c 85 02 06 mfdcrx r4,r5 +# name mfdcrx code 7c000206 mask ff0700fc00000000 flags @476 @BOOKE @A2 operands 3b 31 0 0 0 0 0 0 +define pcodeop MoveFromDeviceControlRegisterIndexed; +:mfdcrx D,A is OP=31 & D & A & XOP_1_9=259 & Rc=0 { # + MoveFromDeviceControlRegisterIndexed(D,A); +} + + +# PowerISA II: 5.4.1 Move To/From System Register Instructions +# CMT: Move To Device Control Register Indexed [Category: Embedded.Device Control] +# FORM: X-form +# binutils: 476.d: 4cc: 7c e6 03 06 mtdcrx r6,r7 +# binutils: a2.d: 568: 7d 6a 03 06 mtdcrx r10,r11 +# binutils: booke.d: 30: 7c e6 03 06 mtdcrx r6,r7 +# binutils: booke_xcoff.d: 2c: 7c e6 03 06 mtdcrx r6,r7 +# binutils: 4cc: 7c e6 03 06 mtdcrx r6,r7 +# name mtdcrx code 7c000306 mask ff0700fc00000000 flags @476 @BOOKE @A2 operands 31 3b 0 0 0 0 0 0 +define pcodeop MoveToDeviceControlRegisterIndexed; +:mtdcrx A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=387 & BIT_0=0 { MoveToDeviceControlRegisterIndexed(S,A); } # + +# ======================================================================== + +# PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] +# CMT: Data Cache Block Flush by External PID +# FORM: X-form +# binutils: a2.d: 154: 7c 0a 58 fe dcbfep r10,r11 +# binutils: e500mc.d: 9c: 7c 01 10 fe dcbfep r1,r2 +# name dcbfep code 7c0000fe mask ff07e0ff00000000 flags @E500MC @A2 operands 31 38 0 0 0 0 0 0 +define pcodeop DataCacheBlockFlushByExternalPID; +:dcbfep A,B is OP=31 & A & B & XOP_1_10=127 & BIT_0=0 & BITS_21_25=0 { # + DataCacheBlockFlushByExternalPID(A,B); +} + +# PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] +# CMT: Data Cache Block Store by External PID +# FORM: X-form +# binutils: a2.d: 168: 7c 0a 58 7e dcbstep r10,r11 +# binutils: e500mc.d: 98: 7c 1f 00 7e dcbstep r31,r0 +# name dcbstep code 7c00007e mask ff07e0ff00000000 flags @E500MC @A2 operands 31 38 0 0 0 0 0 0 +define pcodeop DataCacheBlockStoreByExternalPID; +:dcbstep A,B is OP=31 & BITS_21_25=0 & A & B & XOP_1_10=63 & BIT_0=0 { # + DataCacheBlockStoreByExternalPID(A,B); +} + + +# PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] +# CMT: Data Cache Block set to Zero by External PID +# FORM: X-form +# binutils: a2.d: 198: 7c 0a 5f fe dcbzep r10,r11 +# binutils: e500mc.d: a8: 7c 0b 67 fe dcbzep r11,r12 +# name dcbzep code 7c0007fe mask ff07e0ff00000000 flags @E500MC @A2 operands 31 38 0 0 0 0 0 0 +define pcodeop DataCacheBlockSetToZeroByExternalPID; +:dcbzep A,B is OP=31 & BITS_21_25=0 & A & B & XOP_1_10=1023 & BIT_0=0 { + DataCacheBlockSetToZeroByExternalPID(A,B); +} + +# PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] +# CMT: Instruction Cache Block Invalidate by External PID +# FORM: X-form +# binutils: a2.d: 3b8: 7c 0a 5f be icbiep r10,r11 +# binutils: e500mc.d: 10: 7c 09 57 be icbiep r9,r10 +# name icbiep code 7c0007be mask ff07e0ff00000000 flags @E500MC @A2 operands 31 38 0 0 0 0 0 0 +define pcodeop InstructionCacheBlockInvalidateByExternalPID; +:icbiep A,B is OP=31 & BITS_21_25=0 & A & B & XOP_1_10=991 & BIT_0=0 { + InstructionCacheBlockInvalidateByExternalPID(A,B); +} + +# PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] +# CMT: Load Floating-Point Double by External Process ID Indexed +# FORM: X-form +# binutils: a2.d: 438: 7e 8a 5c be lfdepx f20,r10,r11 +# binutils: e500mc.d: 50: 7d ae 7c be lfdepx f13,r14,r15 +# name lfdepx code 7c0004be mask ff0700fc00000000 flags @E500MC @A2 operands 22 31 38 0 0 0 0 0 +:lfdepx fT,RA_OR_ZERO,B is OP=31 & fT & B & RA_OR_ZERO & XOP_1_10=607 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; + fT = *:8(ea); +} + + +# PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] +# CMT: Store Byte by External Process ID Indexed +# FORM: X-form +# binutils: a2.d: 700: 7d 4b 61 be stbepx r10,r11,r12 +# binutils: e500mc.d: 54: 7e 11 91 be stbepx r16,r17,r18 +# name stbepx code 7c0001be mask ff0700fc00000000 flags @E500MC @A2 operands 3b 31 38 0 0 0 0 0 +define pcodeop StoreByteByExternalProcessIDIndexed; +:stbepx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=223 & BIT_0=0 { # + EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *[ram]:1 EA = StoreByteByExternalProcessIDIndexed(S,RA_OR_ZERO,B); +} + +# PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] +# CMT: Store Halfword by External Process ID Indexed +# FORM: X-form +# binutils: a2.d: 784: 7d 4b 63 3e sthepx r10,r11,r12 +# binutils: e500mc.d: 58: 7e 74 ab 3e sthepx r19,r20,r21 +# name sthepx code 7c00033e mask ff0700fc00000000 flags @E500MC @A2 operands 3b 31 38 0 0 0 0 0 +define pcodeop StoreHalfwordByExternalProcessIDIndexed; +:sthepx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=415 & BIT_0=0 { # + EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *[ram]:2 EA = StoreHalfwordByExternalProcessIDIndexed(S,RA_OR_ZERO,B); +} + +# PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] +# CMT: Store Word by External Process ID Indexed +# FORM: X-form +# binutils: a2.d: 7b0: 7d 4b 61 3e stwepx r10,r11,r12 +# binutils: e500mc.d: 5c: 7e d7 c1 3e stwepx r22,r23,r24 +# name stwepx code 7c00013e mask ff0700fc00000000 flags @E500MC @A2 operands 3b 31 38 0 0 0 0 0 +define pcodeop StoreWordByExternalProcessIDIndexed; +:stwepx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=159 & BIT_0=0 { # + EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *[ram]:4 EA = StoreWordByExternalProcessIDIndexed(S,RA_OR_ZERO,B); +} + +# PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] +# CMT: Store Doubleword Byte-Reverse Indexed +# FORM: X-form +# binutils: a2.d: 71c: 7d 4b 65 28 stdbrx r10,r11,r12 +# binutils: cell.d: 48: 7c 00 0d 28 stdbrx r0,0,r1 +# binutils: cell.d: 4c: 7c 01 15 28 stdbrx r0,r1,r2 +# binutils: power7.d: e8: 7e 95 b5 28 stdbrx r20,r21,r22 +# name stdbrx code 7c000528 mask ff0700fc00000000 flags @POWER7 @CELL @A2 operands 3b 32 38 0 0 0 0 0 +define pcodeop StoreDoublewordByteReverseIndexed; +:stdbrx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=660 & BIT_0=0 { # + EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *[ram]:8 EA = StoreDoublewordByteReverseIndexed(S,RA_OR_ZERO,B); +} + +# PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] +# CMT: Store Doubleword Byte-Reverse Indexed +# FORM: X-form +# binutils: a2.d: 724: 7d 4b 61 3a stdepx r10,r11,r12 +# binutils: e500mc.d: 60: 7f 3a d9 3a stdepx r25,r26,r27 +# name stdepx code 7c00013a mask ff0700fc00000000 flags @E500MC @A2 operands 3b 31 38 0 0 0 0 0 +define pcodeop StoreDoublewordByteReverseIndexed1; +:stdepx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=157 & BIT_0=0 { # + EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *[ram]:8 EA = StoreDoublewordByteReverseIndexed1(S,RA_OR_ZERO,B); +} + + +# PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] +# CMT: Load Byte by External Process ID Indexed +# FORM: X-form +# binutils: a2.d 3ec: 7d 4b 60 be lbepx r10,r11,r12 +# binutils: e500mc.d 40: 7c 22 18 be lbepx r1,r2,r3 +# name lbepx code 7c0000be mask ff0700fc00000000 flags @E500MC @A2 operands 3b 31 38 0 0 0 0 0 +# Note: no support for context modeling here +:lbepx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=95 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; + D = zext(*:1(ea)); +} + +# PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] +# CMT: Data Cache Block Touch by External PID +# FORM: X-form +# binutils: a2.d: 174: 7d 4b 62 7e dcbtep r10,r11,r12 +# binutils: e500mc.d: a4: 7c c7 42 7e dcbtep r6,r7,r8 +# NOTE: BITS_21_25 => TH (register) +# name dcbtep code 7c00027e mask ff0700fc00000000 flags @E500MC @A2 operands 3b 31 38 0 0 0 0 0 +# No PCODE necessary +define pcodeop DataCacheBlockTouchByExternalPID2; +:dcbtep TH,RA_OR_ZERO,B is OP=31 & TH & RA_OR_ZERO & B & XOP_1_10=319 & BIT_0=0 { + DataCacheBlockTouchByExternalPID2(TH,RA_OR_ZERO,B); +} + + +# PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] +# CMT: Load Doubleword by External Process ID Indexed +# FORM: X-form +# binutils: a2.d: 41c: 7d 4b 60 3a ldepx r10,r11,r12 +# binutils: e500mc.d: 4c: 7d 4b 60 3a ldepx r10,r11,r12 +# name ldepx code 7c00003a mask ff0700fc00000000 flags @E500MC @A2 operands 3b 31 38 0 0 0 0 0 +# Note: no support for context modeling here +:ldepx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=29 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; + D = *:8(ea); +} + +# PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] +# CMT: Load Word by External Process ID Indexed +# FORM: X-form +# binutils: a2.d: 4c8: 7d 4b 60 3e lwepx r10,r11,r12 +# binutils: e500mc.d: 48: 7c e8 48 3e lwepx r7,r8,r9 +# Note: no support for context modeling here +:lwepx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=31 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; + D = *:4(ea); +} + +# PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] +# CMT: Store Floating-Point Double by External Process ID Indexed +# FORM: X-form +# binutils: a2.d: 740: 7e 8a 5d be stfdepx f20,r10,r11 +# binutils: e500mc.d: 64: 7f 9d f5 be stfdepx f28,r29,r30 +# NOTE: BITS_21_25 => FRS (float register) => fS +# Note: no support for context modeling here +:stfdepx fS,RA_OR_ZERO,B is OP=31 & fS & B & RA_OR_ZERO & XOP_1_10=735 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; + *:8(ea) = fS; +} + + +# PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] +# CMT: Load Halfword by External Process ID Indexed +# FORM: X-form +# binutils: a2.d: 480: 7d 4b 62 3e lhepx r10,r11,r12 +# binutils: e500mc.d: 44: 7c 85 32 3e lhepx r4,r5,r6 +# Note: no support for context modeling here +:lhepx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=287 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; + D = zext(*:2(ea)); +} + +# ======================================================================== + +# PowerISA II: 3.3.15.2 Move To/From System Registers [Category: Embedded] +# CMT: Move From Device Control Register User-mode Indexed [Category: Embedded.Device Control] +# FORM: X-form +# binutils: 476.d: 498: 7c 64 02 46 mfdcrux r3,r4 +define pcodeop MoveFromDeviceControlRegisterUserModeIndexed; +:mfdcrux RT,A is OP=31 & RT & A & BITS_11_15=0 & XOP_1_10=291 & BIT_0=0 +{ + +@ifdef BIT_64 + tmp:8 = dcr000 + (A * $(REGISTER_SIZE)); + RT = *[register]:8 (tmp:4); +@else + tmp = dcr000 + (A * $(REGISTER_SIZE)); + RT = *[register]:4 (tmp); +@endif + +} + +# PowerISA II: 3.3.15.2 Move To/From System Registers [Category: Embedded] +# CMT: Move To Device Control Register User-mode Indexed [Category: Embedded.Device Control] +# FORM: X-form +# binutils: 476.d: 4c8: 7c 83 03 46 mtdcrux r3,r4 +:mtdcrux S,A is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=419 & BIT_0=0 +{ +@ifdef BIT_64 + tmp:8 = dcr000 + (A * $(REGISTER_SIZE)); + *[register]:8 (tmp:4) = S; +@else + tmp = dcr000 + (A * $(REGISTER_SIZE)); + *[register]:4 (tmp) = S; +@endif + +} + + +# ======================================================================== + +# PowerISA II: Chapter 10. Legacy Move Assist Instruction [Category: Legacy Move Assist] +# CMT: Determine Leftmost Zero Byte +# FORM: X-form +# binutils: 476.d: 1a4: 7c 83 28 9c dlmzb r3,r4,r5 +# binutils: titan.d: 158: 7c 22 00 9c dlmzb r2,r1,r0 +define pcodeop DetermineLeftmostZeroByte; +:dlmzb S,A,B is OP=31 & S & A & B & XOP_1_10=78 & Rc=0 +{ + # search from left for the first occurrence of null byte + + # low 32 bits of RS concatenated with low 32 bits of RB +@ifdef BIT_64 + tmpD:8 = zext( S:4 ); +@else + tmpD:8 = zext( S ); +@endif + + tmpD = tmpD << 32; + +@ifdef BIT_64 + tmpD = tmpD | zext( B:4 ); +@else + tmpD = tmpD | zext( B ); +@endif + + tmpX:8 = 0; + + + + if ( tmpX == 8 ) goto ; + tmpX = tmpX + 1; + + if ( ( ( tmpD << ( (tmpX-1) * 8 ) ) & 0xFF00000000000000 ) != 0 ) goto ; + + + + # place byte number in register A and low 7 bits of XER +@ifdef BIT_64 + A = tmpX; + XER = ( XER & 0xFFFFFFFFFFFFFF80 ) | tmpX; +@else + A = tmpX:4; + XER = ( XER & 0xFFFFFF80 ) | tmpX:4; +@endif + +} + +# PowerISA II: Chapter 10. Legacy Move Assist Instruction [Category: Legacy Move Assist] +# CMT: Determine Leftmost Zero Byte +# FORM: X-form +# binutils: 476.d: 1a8: 7c 83 28 9d dlmzb\. r3,r4,r5 +# binutils: titan.d: 15c: 7c 22 00 9d dlmzb\. r2,r1,r0 +define pcodeop DetermineLeftmostZeroByte1; +:dlmzb. S,A,B is OP=31 & S & A & B & XOP_1_10=78 & Rc=1 +{ + # search from left for the first occurrence of null byte + + # low 32 bits of RS concatenated with low 32 bits of RB +@ifdef BIT_64 + tmpD:8 = zext( S:4 ); +@else + tmpD:8 = zext( S ); +@endif + + tmpD = tmpD << 32; + +@ifdef BIT_64 + tmpD = tmpD | zext( B:4 ); +@else + tmpD = tmpD | zext( B ); +@endif + + tmpX:8 = 0; + tmpY:8 = 0; + + + + if ( tmpX == 8 ) goto ; + tmpX = tmpX + 1; + + if ( ( ( tmpD << ( (tmpX - 1) * 8 ) ) & 0xFF00000000000000 ) != 0 ) goto ; + + # matched + tmpY = 1; + + + + # place byte number in register A and low 7 bits of XER +@ifdef BIT_64 + A = tmpX; + XER = ( XER & 0xFFFFFFFFFFFFFF80 ) | tmpX; +@else + A = tmpX:4; + XER = ( XER & 0xFFFFFF80 ) | tmpX:4; +@endif + + # Rc section + + # Set bit 35 of CR to SO + cr0 = (cr0 & 0xe) | zext( xer_so & 1); + + + # Set bits 32:34 of CR + if ( tmpY != 1 ) goto ; + + if ( tmpX >= 5 ) goto ; + cr0 = ( cr0 & 0x1 ) | 4; + goto ; + + + cr0 = ( cr0 & 0x1 ) | 8; + goto ; + + + cr0 = ( cr0 & 0x1 ) | 2; + +} + +# ======================================================================== + +# PowerISA II: 4.6.5 Floating-Point Move Instructions +# CMT: Floating Copy Sign +# FORM: X-form +# binutils: 476.d: 1f0: fd 4b 60 10 fcpsgn f10,f11,f12 +# binutils: a2.d: 268: fe 95 b0 10 fcpsgn f20,f21,f22 +:fcpsgn fT,fA,fB is $(NOTVLE) & OP=63 & fT & fA & fB & XOP_1_10=8 & Rc=0 +{ + fT = ( fB & 0x7FFFFFFFFFFFFFFF ) | ( fA & 0x8000000000000000 ); +} + +# PowerISA II: 4.6.5 Floating-Point Move Instructions +# CMT: Floating Copy Sign +# FORM: X-form +# binutils: 476.d: 1f4: fd 4b 60 11 fcpsgn\. f10,f11,f12 +# binutils: a2.d: 264: fe 95 b0 11 fcpsgn\. f20,f21,f22 +:fcpsgn. fT,fA,fB is $(NOTVLE) & OP=63 & fT & fA & fB & XOP_1_10=8 & Rc=1 +{ + fT = ( fB & 0x7FFFFFFFFFFFFFFF ) | ( fA & 0x8000000000000000 ); + cr1flags(); +} +# ======================================================================== + +# PowerISA II: 4.6.2 Floating-Point Load Instructions +# CMT: Load Floating-Point as Integer Word Algebraic Indexed +# FORM: X-form +# binutils: 476.d: 350: 7d 43 26 ae lfiwax f10,r3,r4 +# binutils: a2.d: 44c: 7e 8a 5e ae lfiwax f20,r10,r11 +define pcodeop LoadFloatingPointAsIntegerWordAlgebraicIndexed; +:lfiwax fT,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fT & RA_OR_ZERO & B & XOP_1_10=855 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; + fT = sext( *:4(ea) ); +} + +# PowerISA II: 4.6.2 Floating-Point Load Instructions +# CMT: Load Floating-Point as Integer Word and Zero Indexed [Category: Floating-Point.Phased-in] +# FORM: X-form +# bintutils: a2.d: 450: 7e 8a 5e ee lfiwzx f20,r10,r11 +# bintutils: power7.d: ec: 7d 40 56 ee lfiwzx f10,0,r10 +# bintutils: power7.d: f0: 7d 49 56 ee lfiwzx f10,r9,r10 +define pcodeop LoadFloatingPointAsIntegerWordAndZeroIndexed; +:lfiwzx fT,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fT & RA_OR_ZERO & B & XOP_1_10=887 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; + fT = zext( *:4(ea) ); +} + +# ======================================================================= + +# PowerISA II: A.1 Embedded Cache Initialization [Category: Embedded.Cache Initialization] +# CMT: Instruction Cache Invalidate +# FORM: X-form +# binutils: 476.d: 31c: 7c 20 07 8c ici 1 +# binutils: a2.d: 3d8: 7d 40 07 8c ici 10 +# Note: Using CT, but limited to 4 bits, not 5 (PPC bit 6 is 0 and is a don't care anyhow as CT is unused) +# No PCODE for this function +define pcodeop InstructionCacheInvalidate; +:ici CT is OP=31 & CT & BITS_16_20=0 & BITS_11_15=0 & XOP_1_10=966 & BIT_0=0 +{ + InstructionCacheInvalidate(); +} + +# PowerISA II: A.1 Embedded Cache Initialization [Category: Embedded.Cache Initialization] +# CMT: Data Cache Invalidate +# FORM: X-form +# Note: Using CT, but limited to 4 bits, not 5 (PPC bit 6 is 0 and is a don't care anyhow as CT is unused) +# No PCODE for this function +# binutils: 476.d: 180: 7c 20 03 8c dci 1 +# binutils: a2.d: 1a8: 7d 40 03 8c dci 10 +define pcodeop DataCacheInvalidate; +:dci CT is OP=31 & CT & BITS_11_20=0 & BITS_11_15=0 & XOP_1_10=454 & BIT_0=0 +{ + DataCacheInvalidate(); +} + +# ======================================================================= + +# PowerISA II: 4.3.1 Instruction Cache Instructions +# CMT: Instruction Cache Block Touch [Category: Embedded] +# FORM: X-form +# binutils: 476.d: 308: 7c a8 48 2c icbt 5,r8,r9 +# binutils: a2.d: 3bc: 7c 0a 58 2c icbt r10,r11 +# binutils: a2.d: 3c0: 7c ea 58 2c icbt 7,r10,r11 +# binutils: booke.d: 0: 7c a8 48 2c icbt 5,r8,r9 +# binutils: booke_xcoff.d: 8: 7c a8 48 2c icbt 5,r8,r9 +# Note: Using CT, but limited to 4 bits, not 5 (PPC bit 6 is 0 and is a don't care anyhow as CT is unused) +# No PCODE for this function +define pcodeop InstructionCacheBlockTouch; +:icbt CT,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & CT & RA_OR_ZERO & B & XOP_1_10=22 & BIT_0=0 +{ + InstructionCacheBlockTouch(RA_OR_ZERO,B); +} + +# ====================================================================== + +# PowerISA II: 4.6.6.1 Floating-Point Elementary Arithmetic Instructions +# CMT: Floating Reciprocal Square Root Estimate [Single] +# FORM: A-form +# binutils: 476.d: 2d0: ed c0 78 34 frsqrtes f14,f15 +# binutils: a2.d: 374: ee 80 a8 34 frsqrtes f20,f21 +# binutils: a2.d: 37c: ee 80 a8 34 frsqrtes f20,f21 +# binutils: a2.d: 384: ee 81 a8 34 frsqrtes f20,f21,1 +# binutils: power7.d: 184: ed c0 78 34 frsqrtes f14,f15 +# NOTE: binutils allows BITS_16_20=1 but manual says BITS_16_20=0. We take the manuals side. (pg 136) +:frsqrtes fT,fB is $(NOTVLE) & OP=59 & fT & fB & BITS_16_20=0 & BITS_6_10=0 & XOP_1_5=26 & Rc=0 +{ + # divide 1 by square root of fB to create reciprocal + tmp1:8 = 0x3FF0000000000000; + fT = tmp1 f/ sqrt( fB ); + setFPDivFlags(tmp1,fB,fT); +} + +# PowerISA II: 4.6.6.1 Floating-Point Elementary Arithmetic Instructions +# CMT: Floating Reciprocal Square Root Estimate [Single] +# FORM: A-form +# binutils: 476.d: 2d4: ed c0 78 35 frsqrtes. f14,f15 +# binutils: a2.d: 378: ee 80 a8 35 frsqrtes. f20,f21 +# binutils: a2.d: 380: ee 80 a8 35 frsqrtes. f20,f21 +# binutils: a2.d: 388: ee 81 a8 35 frsqrtes. f20,f21,1 +# binutils: power7.d: 188: ed c0 78 35 frsqrtes. f14,f15 +# NOTE: binutils allows BITS_16_20=1 but manual says BITS_16_20=0. We take the manuals side. (pg 136) +define pcodeop FloatingReciprocalSquareRootEstimate1; +:frsqrtes. fT,fB is $(NOTVLE) & OP=59 & fT & fB & BITS_16_20=0 & BITS_6_10=0 & XOP_1_5=26 & Rc=1 +{ + # divide 1 by square root of fB to create reciprocal + tmp1:8 = 0x3FF0000000000000; + fT = tmp1 f/ sqrt( fB ); + setFPDivFlags(tmp1,fB,fT); + cr1flags(); +} + +# PowerISA II: 4.6.6.1 Floating-Point Elementary Arithmetic Instructions +# CMT: Floating Reciprocal Estimate [Single] +# FORM: A-form +# binutils: 476.d: 290: fd c0 78 30 fre f14,f15 +# binutils: a2.d: 308: fe 80 a8 30 fre f20,f21 +# binutils: a2.d: 310: fe 80 a8 30 fre f20,f21 +# binutils: a2.d: 318: fe 81 a8 30 fre f20,f21,1 +# binutils: power7.d: 16c: fd c0 78 30 fre f14,f15 +# NOTE: binutils allows BITS_16_20!=0 but manual says BITS_16_20=0. We take the manuals side. (pg 135) +:fre fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=24 & Rc=0 +{ + # divide 1 by fB to create reciprocal + tmp1:8 = 0x3FF0000000000000; + fT = tmp1 f/ fB; + setFPDivFlags(tmp1,fB,fT); +} + +# PowerISA II: 4.6.6.1 Floating-Point Elementary Arithmetic Instructions +# CMT: Floating Reciprocal Estimate [Single] +# FORM: A-form +# binutils: 476.d: 294: fd c0 78 31 fre. f14,f15 +# binutils: a2.d: 304: fe 80 a8 31 fre. f20,f21 +# binutils: a2.d: 30c: fe 80 a8 31 fre. f20,f21 +# binutils: a2.d: 314: fe 81 a8 31 fre. f20,f21,1 +# binutils: power7.d: 170: fd c0 78 31 fre. f14,f15 +# NOTE: binutils allows BITS_16_20!=0 but manual says BITS_16_20=0. We take the manuals side. (pg 135) +:fre. fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=24 & Rc=1 +{ + # divide 1 by fB to create reciprocal + tmp1:8 = 0x3FF0000000000000; + fT = tmp1 f/ fB; + setFPDivFlags(tmp1,fB,fT); + cr1flags(); +} +# ====================================================================== + +# PowerISA II: 4.6.7.3 Floating Round to Integer Instructions +# CMT: Floating Round to Integer Minus +# FORM: X-form +# binutils: 476.d: 2a0: fd 40 5b d0 frim f10,f11 +# binutils: a2.d: 338: fe 80 ab d0 frim f20,f21 +:frim fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=488 & Rc=0 +{ + fT = floor( fB ); + setFPRF(fT); + setSummaryFPSCR(); +} + +# PowerISA II: 4.6.7.3 Floating Round to Integer Instructions +# CMT: Floating Round to Integer Minus +# FORM: X-form +# binutils: 476.d: 2a4: fd 40 5b d1 frim. f10,f11 +# binutils: a2.d: 334: fe 80 ab d1 frim. f20,f21 +:frim. fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=488 & Rc=1 +{ + fT = floor( fB ); + setFPRF(fT); + setSummaryFPSCR(); + cr1flags(); +} + +# PowerISA II: 4.6.7.3 Floating Round to Integer Instructions +# CMT: Floating Round to Integer Nearest +# FORM: X-form +# binutils: 476.d: 2a8: fd 40 5b 10 frin f10,f11 +# binutils: a2.d: 340: fe 80 ab 10 frin f20,f21 +:frin fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=392 & Rc=0 +{ + fT = round( fB ); + setFPRF(fT); + setSummaryFPSCR(); +} + +# PowerISA II: 4.6.7.3 Floating Round to Integer Instructions +# CMT: Floating Round to Integer Nearest +# FORM: X-form +# binutils: 476.d: 2ac: fd 40 5b 11 frin. f10,f11 +# binutils: a2.d: 33c: fe 80 ab 11 frin. f20,f21 +:frin. fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=392 & Rc=1 +{ + fT = round( fB ); + setFPRF(fT); + setSummaryFPSCR(); + cr1flags(); +} + +# PowerISA II: 4.6.7.3 Floating Round to Integer Instructions +# CMT: Floating Round to Integer Plus +# FORM: X-form +# binutils: 476.d: 2b0: fd 40 5b 90 frip f10,f11 +# binutils: a2.d: 348: fe 80 ab 90 frip f20,f21 +:frip fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=456 & Rc=0 +{ + fT = ceil( fB ); + setFPRF(fT); + setSummaryFPSCR(); +} + +# PowerISA II: 4.6.7.3 Floating Round to Integer Instructions +# CMT: Floating Round to Integer Plus +# FORM: X-form +# binutils: 476.d: 2b4: fd 40 5b 91 frip. f10,f11 +# binutils: a2.d: 344: fe 80 ab 91 frip. f20,f21 +:frip. fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=456 & Rc=1 +{ + fT = ceil( fB ); + setFPRF(fT); + setSummaryFPSCR(); + cr1flags(); +} + +# PowerISA II: 4.6.7.3 Floating Round to Integer Instructions +# CMT: Floating Round to Integer Toward Zero +# FORM: X-form +# binutils: 476.d: 2b8: fd 40 5b 50 friz f10,f11 +# binutils: a2.d: 350: fe 80 ab 50 friz f20,f21 +:friz fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=424 & Rc=0 +{ + fT = trunc( fB ); + setFPRF(fT); + setSummaryFPSCR(); +} + +# PowerISA II: 4.6.7.3 Floating Round to Integer Instructions +# CMT: Floating Round to Integer Toward Zero +# FORM: X-form +# binutils: 476.d: 2bc: fd 40 5b 51 friz. f10,f11 +# binutils: a2.d: 34c: fe 80 ab 51 friz. f20,f21 +define pcodeop FloatingRoundToIntegerTowardZero1; +:friz. fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=424 & Rc=1 +{ + fT = trunc( fB ); + setFPRF(fT); + setSummaryFPSCR(); + cr1flags(); +} + +# ======================================================================= + +# PowerISA II: 4.4.4 Wait Instruction +# CMT: Wait +# FORM: X-form +# binutils: a2.d: 86c: 7c 00 00 7c wait +# binutils: a2.d: 870: 7c 00 00 7c wait +# binutils: e500mc.d: 1c: 7c 00 00 7c wait +# binutils: e500mc.d: 20: 7c 00 00 7c wait +# binutils: power7.d: 58: 7c 00 00 7c wait +# binutils: power7.d: 5c: 7c 00 00 7c wait +define pcodeop waitOp; +:wait BITS_21_22 is OP=31 & crfD=0 & BITS_21_22 & BITS_11_20=0 & XOP_1_10=62 & BIT_0=0 { waitOp(); } + +# ======================================================================= + +# PowerISA II: 4.3.1 System Linkage Instructions +# CMT: Return From Guest Interrupt [Category:Embedded.Hypervisor] +# FORM: XL-form +# binutils: e500mc.d: 0: 4c 00 00 4e rfdi +define pcodeop ReturnFromGuestInterrupt; +:rfgi is $(NOTVLE) & OP=19 & BITS_11_25=0 & XOP_1_10=102 & BIT_0=0 { + MSR = returnFromGuestInterrupt(MSR, spr17b); #GSRR1 + return[spr17a]; #GSRR0 +} + +# ======================================================================= + +# PowerISA II: 4.4.2.1 64-Bit Load and Reserve and Store Conditional Instructions [Category: 64-Bit] +# CMT: Load Doubleword And Reserve Indexed +# FORM: X-form +# binutils: a2.d: 410: 7d 4b 60 a8 ldarx r10,r11,r12 +# binutils: a2.d: 414: 7d 4b 60 a9 ldarx r10,r11,r12,1 +:ldarx TH,RA_OR_ZERO,B,BIT_0 is OP=31 & TH & RA_OR_ZERO & B & XOP_1_10=84 & BIT_0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; + RESERVE = 1; + RESERVE_LENGTH = 8; + RESERVE_ADDRESS = ea; + TH = *:8 (ea); +} + +# ======================================================================= + +# PowerISA II: 4.4.1 Instruction Synchronize Instruction +# CMT: Load Word And Reserve Indexed +# FORM: X-form +# binutils: 476.d: 394: 7c 64 28 28 lwarx r3,r4,r5 +# binutils: 476.d: 398: 7c 64 28 28 lwarx r3,r4,r5 +# binutils: 476.d: 39c: 7c 64 28 29 lwarx r3,r4,r5,1 +# binutils: a2.d: 4b4: 7d 4b 60 28 lwarx r10,r11,r12 +# binutils: a2.d: 4b8: 7d 4b 60 29 lwarx r10,r11,r12,1 +:lwarx TH,RA_OR_ZERO,B,BIT_0 is OP=31 & TH & RA_OR_ZERO & B & XOP_1_10=20 & BIT_0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; + RESERVE = 1; + RESERVE_LENGTH = 4; + RESERVE_ADDRESS = ea; + TH = zext( *:4 (ea) ); +} + +# ======================================================================= + +# PowerISA II: 11.3 Processor Control Instructions +# CMT: Message Clear +# FORM: X-form +# binutils: a2.d: 544: 7c 00 51 dc msgclr r10 +# binutils: e500mc.d: 14: 7c 00 69 dc msgclr r13 +define pcodeop MessageClear; +:msgclr B is OP=31 & BITS_21_25=0 & BITS_16_20=0 & B & XOP_1_10=238 & BIT_0=0 { MessageClear(B); } + +# PowerISA II: 11.3 Processor Control Instructions +# CMT: Message Send +# FORM: X-form +# binutils: a2.d: 548: 7c 00 51 9c msgsnd r10 +# binutils: e500mc.d: 18: 7c 00 71 9c msgsnd r14 +define pcodeop MessageSend; +:msgsnd B is OP=31 & BITS_21_25=0 & BITS_16_20=0 & B & XOP_1_10=206 & BIT_0=0 { MessageSend(); } + +# ======================================================================= + + +# PowerISA III: TLB Management Instructions (expanded by ISA 3.0) +# CMT: TLB Invalidate Entry (expands on form in ppc_instructions.sinc) +# FORM: X-form +:tlbie RB_OR_ZERO,RS_OR_ZERO,"2",BIT_17,BIT_16 is $(NOTVLE) & OP=31 & RS_OR_ZERO & RB_OR_ZERO & BIT_20=0 & BITS_18_19=2 & BIT_17 & BIT_16 & XOP_1_10=306 & BIT_0=0 { # RIC=2 + # RIC = 2 + # PRS = BIT_17 + # R = BIT_16 + TLBInvalidateEntry(RB_OR_ZERO,RS_OR_ZERO,2:1,BIT_17:1,BIT_16:1); +} +:tlbie RB_OR_ZERO,RS_OR_ZERO,BIT_18,BIT_17,"1" is $(NOTVLE) & OP=31 & RS_OR_ZERO & RB_OR_ZERO & BIT_20=0 & (BITS_18_19=0 | BITS_18_19=1) & BIT_18 & BIT_17 & BIT_16=1 & XOP_1_10=306 & BIT_0=0 { # RIC=0|1 & R=1 + # RIC = BITS_18_19 (0 or 1) + # PRS = BIT_17 + # R = 1 + TLBInvalidateEntry(RB_OR_ZERO,RS_OR_ZERO,BIT_18:1,BIT_17:1,1:1); +} +:tlbie RB_OR_ZERO,RS_OR_ZERO,"3","0","0" is $(NOTVLE) & OP=31 & RS_OR_ZERO & RB_OR_ZERO & BIT_20=0 & BITS_18_19=3 & BIT_17=0 & BIT_16=0 & XOP_1_10=306 & BIT_0=0 { # RIC=3 & PRS=0 & R=0 + # RIC = 3 + # PRS = 0 + # R = 0 + TLBInvalidateEntry(RB_OR_ZERO,RS_OR_ZERO,3:1,0:1,0:1); +} + +# PowerISA III: TLB Management Instructions (expanded by ISA 3.0) +# CMT: TLB Invalidate Entry Local (expands on form in ppc_instructions.sinc) +# FORM: X-form +:tlbiel RB_OR_ZERO,RS_OR_ZERO,"0","0","0" is $(NOTVLE) & OP=31 & RS_OR_ZERO & RB_OR_ZERO & BITS_16_20=0 & XOP_1_10=274 & BIT_0=0 { # RIC=0 & PRS=0 & R=0 + # RIC = 0 + # PRS = 0 + # R = 0 + TLBInvalidateEntryLocal(RB_OR_ZERO,RS_OR_ZERO,0:1,0:1,0:1); +} +:tlbiel RB_OR_ZERO,RS_OR_ZERO,"2",BIT_17,BIT_16 is $(NOTVLE) & OP=31 & RS_OR_ZERO & RB_OR_ZERO & BIT_20=0 & BITS_18_19=2 & BIT_17 & BIT_16 & XOP_1_10=274 & BIT_0=0 { # RIC=2 + # RIC = 2 + # PRS = BIT_17 + # R = BIT_16 + TLBInvalidateEntryLocal(RB_OR_ZERO,RS_OR_ZERO,2:1,BIT_17:1,BIT_16:1); +} +:tlbiel RB_OR_ZERO,RS_OR_ZERO,BIT_18,BIT_17,"1" is $(NOTVLE) & OP=31 & RS_OR_ZERO & RB_OR_ZERO & BIT_20=0 & (BITS_18_19=0 | BITS_18_19=1) & BIT_18 & BIT_17 & BIT_16=1 & XOP_1_10=274 & BIT_0=0 { # RIC=0|1 & R=1 + # RIC = BITS_18_19 (0 or 1) + # PRS = BIT_17 + # R = 1 + TLBInvalidateEntryLocal(RB_OR_ZERO,RS_OR_ZERO,BIT_18:1,BIT_17:1,1:1); +} + + +# PowerISA II: 6.11.4.9 TLB Management Instructions +# CMT: TLB Search and Reserve Indexed Category: Embedded.TLB Write Conditional] +# FORM: X-form +# binutils: a2.d: 848: 7c 0a 5e a5 tlbsrx\. r10,r11 +define pcodeop TLBSearchAndReserveIndexedCategory; +:tlbsrx. A,B is OP=31 & BITS_21_25=0 & A & B & XOP_1_10=850 & BIT_0=1 { TLBSearchAndReserveIndexedCategory(A,B,cr0); } + +# ======================================================================= + +# PowerISA II: 4.6.10 Floating-Point Status and Control Register Instructions +# CMT: Move To FPSCR Fields +# FORM: X-form +# binutils: 476.d: 4e0: fc 0c 55 8e mtfsf 6,f10 +# binutils: 476.d: 4e4: fc 0c 55 8e mtfsf 6,f10 +# binutils: 476.d: 4e8: fc 0d 55 8e mtfsf 6,f10,0,1 +# binutils: 476.d: 4ec: fe 0c 55 8e mtfsf 6,f10,1,0 +# binutils: a2.d: 580: fc 0c a5 8e mtfsf 6,f20 +# binutils: a2.d: 588: fc 0c a5 8e mtfsf 6,f20 +# binutils: a2.d: 590: fe 0d a5 8e mtfsf 6,f20,1,1 +# binutils: common.d: 210: fc 0c 55 8e mtfsf 6,f10 +# binutils: power6.d: b4: fc 0c 55 8e mtfsf 6,f10 +# binutils: power6.d: bc: fc 0c 55 8e mtfsf 6,f10 +# binutils: power6.d: c4: fc 0d 55 8e mtfsf 6,f10,0,1 +# binutils: power6.d: cc: fe 0c 55 8e mtfsf 6,f10,1,0 +define pcodeop MoveToFPSCRFields; +:mtfsf BITS_17_24,fB,BIT_25,BIT_16 is $(NOTVLE) & OP=63 & BIT_25 & BITS_17_24 & BIT_16 & fB & XOP_1_10=711 & Rc=0 { # PCODE + MoveToFPSCRFields(fB); +} + +# PowerISA II: 4.6.10 Floating-Point Status and Control Register Instructions +# CMT: Move To FPSCR Fields +# FORM: X-form +# binutils: 476.d: 4f0: fc 0c 5d 8f mtfsf. 6,f11 +# binutils: 476.d: 4f4: fc 0c 5d 8f mtfsf. 6,f11 +# binutils: 476.d: 4f8: fc 0d 5d 8f mtfsf. 6,f11,0,1 +# binutils: 476.d: 4fc: fe 0c 5d 8f mtfsf. 6,f11,1,0 +# binutils: a2.d: 57c: fc 0c a5 8f mtfsf. 6,f20 +# binutils: a2.d: 584: fc 0c a5 8f mtfsf. 6,f20 +# binutils: a2.d: 58c: fe 0d a5 8f mtfsf. 6,f20,1,1 +define pcodeop MoveToFPSCRFields1; +:mtfsf. BITS_17_24,fB,BIT_25,BIT_16 is $(NOTVLE) & OP=63 & BIT_25 & BITS_17_24 & BIT_16 & fB & XOP_1_10=711 & Rc=1 { # PCODE + MoveToFPSCRFields1(fB); +} + +# PowerISA II: 4.6.10 Floating-Point Status and Control Register Instructions +# CMT: Move To FPSCR Field Immediate +# FORM: X-form +# binutils: 476.d: 500: ff 00 01 0c mtfsfi 6,0 +# binutils: 476.d: 504: ff 00 01 0c mtfsfi 6,0 +# binutils: 476.d: 508: ff 00 01 0c mtfsfi 6,0 +# binutils: 476.d: 50c: ff 01 01 0c mtfsfi 6,0,1 +# binutils: a2.d: 598: ff 00 01 0c mtfsfi 6,0 +# binutils: a2.d: 5a0: ff 00 d1 0c mtfsfi 6,13 +# binutils: a2.d: 5a8: ff 01 d1 0c mtfsfi 6,13,1 +# binutils: common.d: 218: ff 00 01 0c mtfsfi 6,0 +# binutils: power6.d: d4: ff 00 01 0c mtfsfi 6,0 +# binutils: power6.d: dc: ff 00 01 0c mtfsfi 6,0 +# binutils: power6.d: e4: ff 01 01 0c mtfsfi 6,0,1 +define pcodeop MoveToFPSCRFieldImmediate; +:mtfsfi BF2,BITS_12_15,BIT_16 is $(NOTVLE) & OP=63 & BF2 & BITS_21_22=0 & BITS_17_20=0 & BIT_16 & BITS_12_15 & BIT_11=0 & XOP_1_10=134 & Rc=0 { + MoveToFPSCRFieldImmediate(); +} + +# PowerISA II: 4.6.10 Floating-Point Status and Control Register Instructions +# CMT: Move To FPSCR Field Immediate +# FORM: X-form +# binutils: 476.d: 510: ff 00 f1 0d mtfsfi. 6,15 +# binutils: 476.d: 514: ff 00 f1 0d mtfsfi. 6,15 +# binutils: 476.d: 518: ff 00 f1 0d mtfsfi. 6,15 +# binutils: 476.d: 51c: ff 01 f1 0d mtfsfi. 6,15,1 +# binutils: a2.d: 594: ff 00 01 0d mtfsfi. 6,0 +# binutils: a2.d: 59c: ff 00 d1 0d mtfsfi. 6,13 +# binutils: a2.d: 5a4: ff 01 d1 0d mtfsfi. 6,13,1 +define pcodeop MoveToFPSCRFieldImmediate1; +:mtfsfi. BITS_23_25,BITS_12_15,BIT_16 is $(NOTVLE) & OP=63 & BITS_23_25 & BITS_21_22=0 & BITS_17_20=0 & BIT_16 & BITS_12_15 & BIT_11=0 & XOP_1_10=134 & Rc=1 { + MoveToFPSCRFieldImmediate1(); +} + +# ======================================================================= + +# PowerISA II: 3.3.15 Move To/From System Register Instructions +# CMT: Move To Condition Register Fields +# FORM: XFX-form +# binutils: 476.d: 48c: 7c 60 00 26 mfcr r3 +define pcodeop mfcrOp; +:mfcr TO is OP=31 & TO & BIT_20=0 & BITS_12_19=0 & XOP_1_10=190 & BIT_0=0 { mfcrOp(); } + +# ======================================================================= + +# PowerISA II: 5.6.1 DFP Arithmetic Instructions +# CMT: DFP Add [Quad] +# FORM: X-form +# binutils: power6.d: 38: fe 96 c0 04 daddq f20,f22,f24 +# binutils: power7.d: 9c: fe 96 c0 04 daddq f20,f22,f24 +define pcodeop daddqOp; +:daddq fT,fA,fB is $(NOTVLE) & OP=63 & fT & fA & fB & XOP_1_10=2 & Rc=0 { daddqOp(fA,fB); } + +# PowerISA II: 5.6.1 DFP Arithmetic Instructions +# CMT: DFP Add [Quad] +# FORM: X-form +define pcodeop daddqDotOp; +:daddq. fT,fA,fB is $(NOTVLE) & OP=63 & fT & fA & fB & XOP_1_10=2 & Rc=1 { daddqDotOp(fA); } + +# ======================================================================= + +# ======================================================================= +# binutils: 476.d: 30c: 7d ae 7b cc icbtls 13,r14,r15 +# binutils: a2.d: 3c4: 7c 0a 5b cc icbtls r10,r11 +# binutils: a2.d: 3c8: 7c ea 5b cc icbtls 7,r10,r11 +# binutils: e500.d: 10: 7d ae 7b cc icbtls 13,r14,r15 +# binutils: titan.d: 198: 7c 02 0b cc icbtls r2,r1 +# binutils: titan.d: 19c: 7c 02 0b cc icbtls r2,r1 +# binutils: titan.d: 1a0: 7c 22 0b cc icbtls 1,r2,r1 +# :icbtls BITS_21_24,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & BIT_25=0 & BITS_21_24 & B & XOP_1_10=486 & BIT_0=0 & RA_OR_ZERO +# { +# ea = RA_OR_ZERO + B; +# # prefetchInstructionCacheBlockLockSetX(ea); +# } +# Source for information on instructions: +# PowerISA_V2.06B_PUBLIC.pdf (dated: July 23, 2010) +# and binutils-2.21.1 +# Have test case for about 200 of these instructions + +# Extended Mnemonic +# xvmovdp XT,XB => xvcpsgndp XT,XB,XB +# xvmovsp XT,XB => xvcpsgnsp XT,XB,XB +# xxmrghd T,A,B => xxpermdi T,A,B,0b00 +# xxmrgld T,A,B => xxpermdi T,A,B,0b11 +# xxspltd T,A,0 => xxpermdi T,A,A,0b00 +# xxswapd T,A => xxpermdi T,A,A,0b10 + +@include "vsx.sinc" + +# binutils-descr: "brinc", VX (4, 527), VX_MASK, PPCSPE, PPCNONE, {RS, RA, RB} +define pcodeop brincOp; +# ISA-cmt: brinc - Bit Reversed Increment +# ISA-info: brinc - Form "EVX" Page 510 Category "SP" +# binutils: mytest.d: 1d0: 10 22 1a 0f brinc r1,r2,r3 +:brinc S,A,B is OP=4 & XOP_0_10=527 & S & A & B { brincOp(S,A,B); } + +# binutils-descr: "hrfid", XL(19,274), 0xffffffff, POWER5|CELL, PPC476, {0} +define pcodeop hrfidOp; +# ISA-info: hrfid - Form "XL" Page 739 Category "S" +# binutils: mytest.d: 0: 4c 00 02 24 hrfid +:hrfid is $(NOTVLE) & OP=19 & XOP_1_10=274 & BITS_11_25=0 & BIT_0=0 { hrfidOp(); } + +define pcodeop bcctrOp; +# ZZZ NO-PARSE XLLK - "bcctr", XLLK(19,528,0), XLBH_MASK, PPCCOM, PPCNONE, {BO, BI, BH} +:bcctr BO,BI_BITS,BH is $(NOTVLE) & OP=19 & BO & BI_BITS & BITS_13_15=0 & BH & XOP_1_10=528 & LK=0 { bcctrOp(); } + +define pcodeop bcctrlOp; +# ZZZ NO-PARSE XLLK - "bcctrl", XLLK(19,528,1), XLBH_MASK, PPCCOM, PPCNONE, {BO, BI, BH} +:bcctrl BO,BI_BITS,BH is $(NOTVLE) & OP=19 & BO & BI_BITS & BITS_13_15=0 & BH & XOP_1_10=528 & LK=1 { bcctrlOp(); } + +# binutils-descr: "lbarx", X(31,52), XEH_MASK, POWER7, PPCNONE, {RT, RA0, RB, EH} +define pcodeop lbarxOp; +# ISA-cmt: lbarx - Load Byte and Reserve Indexed +# ISA-info: lbarx - Form "X" Page 689 Category "B" +# binutils: power7.d: 14c: 7d 4b 60 68 lbarx r10,r11,r12 +# binutils: power7.d: 150: 7d 4b 60 68 lbarx r10,r11,r12 +# binutils: power7.d: 154: 7d 4b 60 69 lbarx r10,r11,r12,1 +:lbarx RT,A,B,BIT_0 is OP=31 & XOP_1_10=52 & RT & A & B & BIT_0 { + A = A + B; + RT = *:1 A; +} + +# binutils-descr: "lharx", X(31,116), XEH_MASK, POWER7, PPCNONE, {RT, RA0, RB, EH} +define pcodeop lharxOp; +# ISA-cmt: lharx - Load Halfword and Reserve Indexed +# ISA-info: lharx - Form "X" Page 690 Category "B" +# binutils: power7.d: 158: 7e 95 b0 e8 lharx r20,r21,r22 +# binutils: power7.d: 15c: 7e 95 b0 e8 lharx r20,r21,r22 +# binutils: power7.d: 160: 7e 95 b0 e9 lharx r20,r21,r22,1 +:lharx RT,A,B,BIT_0 is OP=31 & XOP_1_10=116 & RT & A & B & BIT_0 { + A = A + B; + RT = *:2 A; +} + +# binutils-descr: "ehpriv", X(31,270), 0xffffffff, E500MC|PPCA2, PPCNONE, {0} +define pcodeop ehprivOp; +# ISA-info: ehpriv - Form "XL" Page 889 Category "E.HV" +# binutils: NO-EXAMPLE - ehpriv +:ehpriv BITS_11_25 is OP=31 & BITS_11_25 & XOP_1_10=270 & BIT_0=0 { ehprivOp(); } + +# binutils-descr: "cbcdtd", X(31,314), XRB_MASK, POWER6, PPCNONE, {RA, RS} +define pcodeop cbcdtdOp; +# ISA-info: cbcdtd - Form "X" Page 97 Category "BCDA" +# binutils: power6.d: ec: 7d 6a 02 74 cbcdtd r10,r11 +:cbcdtd S,A is $(NOTVLE) & OP=31 & S & A & BITS_11_15=0 & XOP_1_10=314 & BIT_0=0 { cbcdtdOp(S,A); } + +# binutils-descr: "divdeu", XO(31,393,0,0), XO_MASK, POWER7|PPCA2, PPCNONE, {RT, RA, RB} +define pcodeop divdeuOp; +# binutils: mytest.d: 4: 7c 64 2b 12 divdeu r3,r4,r5 +:divdeu RT,A,B is $(NOTVLE) & OP=31 & XOP_1_9=393 & OE=0 & Rc=0 & RT & A & B { + RT = A/B; +} + +# binutils-descr: "divdeu.", XO(31,393,0,1), XO_MASK, POWER7|PPCA2, PPCNONE, {RT, RA, RB} +define pcodeop divdeuDotOp; +# binutils: mytest.d: 8: 7c 64 2b 13 divdeu. r3,r4,r5 +:divdeu. RT,A,B is $(NOTVLE) & OP=31 & XOP_1_9=393 & OE=0 & Rc=1 & RT & A & B { + RT = A/B; + cr0flags(RT); +} + +# binutils-descr: "divde", XO(31,425,0,0), XO_MASK, POWER7|PPCA2, PPCNONE, {RT, RA, RB} +define pcodeop divdeOp; +# binutils: mytest.d: c: 7c 64 2b 52 divde r3,r4,r5 +:divde RT,A,B is $(NOTVLE) & OP=31 & XOP_1_9=425 & OE=0 & Rc=0 & RT & A & B { + RT = A s/ B; +} + +# binutils-descr: "divde.", XO(31,425,0,1), XO_MASK, POWER7|PPCA2, PPCNONE, {RT, RA, RB} +define pcodeop divdeDotOp; +# binutils: mytest.d: 10: 7c 64 2b 53 divde. r3,r4,r5 +:divde. RT,A,B is $(NOTVLE) & OP=31 & XOP_1_9=425 & OE=0 & Rc=1 & RT & A & B { + RT = A s/ B; + cr0flags(RT); +} + +# binutils-descr: "dsn", X(31,483), XRT_MASK, E500MC, PPCNONE, {RA, RB} +define pcodeop dsnOp; +# ISA-info: dsn - Form "X" Page 710 Category "DS" +# binutils: e500mc.d: 3c: 7c 18 cb c6 dsn r24,r25 +:dsn A,B is OP=31 & XOP_1_10=483 & A & B & BITS_21_25=0 & BIT_0=0 { dsnOp(A,B); } + +# binutils-descr: "lbdx", X(31,515), X_MASK, E500MC, PPCNONE, {RT, RA, RB} +define pcodeop lbdxOp; +# ISA-info: lbdx - Form "X" Page 708 Category "DS" +# binutils: e500mc.d: 68: 7c 01 14 06 lbdx r0,r1,r2 +:lbdx RT,A,B is OP=31 & XOP_1_10=515 & RT & A & B & BIT_0=0 { RT = lbdxOp(RT,A,B); } + +# binutils-descr: "lhdx", X(31,547), X_MASK, E500MC, PPCNONE, {RT, RA, RB} +define pcodeop lhdxOp; +# ISA-info: lhdx - Form "X" Page 708 Category "DS" +# binutils: e500mc.d: 6c: 7d 8d 74 46 lhdx r12,r13,r14 +:lhdx RT,A,B is OP=31 & XOP_1_10=547 & RT & A & B & BIT_0=0 { RT = lhdxOp(RT,A,B); } + +# binutils-descr: "lwdx", X(31,579), X_MASK, E500MC, PPCNONE, {RT, RA, RB} +define pcodeop lwdxOp; +# ISA-info: lwdx - Form "X" Page 708 Category "DS" +# binutils: e500mc.d: 70: 7c 64 2c 86 lwdx r3,r4,r5 +:lwdx RT,A,B is OP=31 & XOP_1_10=579 & RT & A & B & BIT_0=0 { RT = lwdxOp(RT,A,B); } + +# binutils-descr: "lddx", X(31,611), X_MASK, E500MC, PPCNONE, {RT, RA, RB} +define pcodeop lddxOp; +# ISA-info: lddx - Form "X" Page 708 Category "DS" +# binutils: e500mc.d: 78: 7d f0 8c c6 lddx r15,r16,r17 +:lddx RT,A,B is OP=31 & XOP_1_10=611 & RT & A & B & BIT_0=0 { RT = lddxOp(RT,A,B); } + +# ISA-info: lddx - Form "X" Page 50 Category "DS" +:ldx RT,RA_OR_ZERO,B is OP=31 & XOP_1_10=21 & RT & RA_OR_ZERO & B & BIT_0=0 { + RT = *:8 (RA_OR_ZERO + B); +} + +# binutils-descr: "stbdx", X(31,643), X_MASK, E500MC, PPCNONE, {RS, RA, RB} +define pcodeop stbdxOp; +# ISA-info: stbdx - Form "X" Page 709 Category "DS" +# binutils: e500mc.d: 7c: 7c c7 45 06 stbdx r6,r7,r8 +:stbdx S,A,B is OP=31 & XOP_1_10=643 & S & A & B & BIT_0=0 { *[ram]:1 B =stbdxOp(S,A,B); } + +# binutils-descr: "sthdx", X(31,675), X_MASK, E500MC, PPCNONE, {RS, RA, RB} +define pcodeop sthdxOp; +# ISA-info: sthdx - Form "X" Page 709 Category "DS" +# binutils: e500mc.d: 80: 7e 53 a5 46 sthdx r18,r19,r20 +:sthdx S,A,B is OP=31 & XOP_1_10=675 & S & A & B & BIT_0=0 { *[ram]:2 B = sthdxOp(S,A,B); } + +# binutils-descr: "stwdx", X(31,707), X_MASK, E500MC, PPCNONE, {RS, RA, RB} +define pcodeop stwdxOp; +# ISA-info: stwdx - Form "X" Page 709 Category "DS" +# binutils: e500mc.d: 84: 7d 2a 5d 86 stwdx r9,r10,r11 +:stwdx S,A,B is OP=31 & XOP_1_10=707 & S & A & B & BIT_0=0 { *[ram]:4 B = stwdxOp(S,A,B); } + +# binutils-descr: "sthcx.", XRC(31,726,1), X_MASK, POWER7, PPCNONE, {RS, RA0, RB} +define pcodeop sthcxDotOp; +# ISA-info: sthcx. - Form "X" Page 692 Category "B" +# binutils: mytest.d: 14: 7c 64 2d ad sthcx. r3,r4,r5 +:sthcx. S,RA_OR_ZERO,B is OP=31 & XOP_1_10=726 & Rc=1 & S & RA_OR_ZERO & B { + EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *[ram]:2 EA = sthcxDotOp(S,RA_OR_ZERO,B); + setCrBit(cr0, 2, 1); +} + +# binutils-descr: "stddx", X(31,739), X_MASK, E500MC, PPCNONE, {RS, RA, RB} +define pcodeop stddxOp; +# ISA-cmt: stddx - Store Doubleword with Decoration Indexed +# ISA-info: stddx - Form "X" Page 709 Category "DS" +# binutils: e500mc.d: 8c: 7e b6 bd c6 stddx r21,r22,r23 +:stddx S,A,B is OP=31 & XOP_1_10=739 & S & A & B & BIT_0=0 { *[ram]:8 B = stddxOp(S,A,B); } + +# binutils-descr: "lfdpx", X(31,791), X_MASK, POWER6, POWER7, {FRT, RA, RB} +define pcodeop lfdpxOp; +# ISA-cmt: lfdpx - Load Floating-Point Double Pair Indexed +# ISA-info: lfdpx - Form "X" Page 131 Category "FP.out" +# binutils: power6.d: 30: 7d ae 7e 2e lfdpx f13,r14,r15 +:lfdpx fT,A,B is $(NOTVLE) & OP=31 & XOP_1_10=791 & fT & A & B & BIT_0=0 { fT = lfdpxOp(fT,A,B); } + +# binutils-descr: "lfddx", X(31,803), X_MASK, E500MC, PPCNONE, {FRT, RA, RB} +define pcodeop lfddxOp; +# ISA-cmt: lfddx - Load Floating Doubleword with Decoration Indexed +# ISA-info: lfddx - Form "X" Page 708 Category "DS" +# binutils: e500mc.d: 74: 7f 5b e6 46 lfddx f26,r27,r28 +:lfddx fT,A,B is OP=31 & XOP_1_10=803 & fT & A & B & BIT_0=0 { fT = lfddxOp(fT,A,B); } + +# binutils-descr: "lhzcix", X(31,821), X_MASK, POWER6, PPCNONE, {RT, RA0, RB} +define pcodeop lhzcixOp; +# ISA-cmt: lhzcix - Load Halfword and Zero Caching Inhibited Indexed +# ISA-info: lhzcix - Form "X" Page 749 Category "S" +# binutils: mytest.d: 18: 7c 64 2e 6a lhzcix r3,r4,r5 +:lhzcix RT,A,B is $(NOTVLE) & OP=31 & XOP_1_10=821 & RT & A & B & BIT_0=0 { + A = A + B; + RT = *:2 A; +} + +# binutils-descr: "lbzcix", X(31,853), X_MASK, POWER6, PPCNONE, {RT, RA0, RB} +define pcodeop lbzcixOp; +# ISA-cmt: lbzcix - Load Byte and Zero Caching Inhibited Indexed +# ISA-info: lbzcix - Form "X" Page 749 Category "S" +# binutils: mytest.d: 1c: 7c 64 2e aa lbzcix r3,r4,r5 +:lbzcix RT,A,B is $(NOTVLE) & OP=31 & XOP_1_10=853 & RT & A & B & BIT_0=0 { + A = A + B; + RT = *:1 A; +} + +# binutils-descr: "eieio", X(31,854), 0xffffffff, PPC, BOOKE|PPCA2|PPC476, {0} +define pcodeop eieioOp; +# ISA-cmt: eieio - Enforce In-order Execution of I/O +# ISA-info: eieio - Form "X" Page 698 Category "S" +# binutils: mytest.d: 20: 7c 00 06 ac eieio +:eieio is $(NOTVLE) & OP=31 & XOP_1_10=854 & BITS_11_25=0 & BIT_0=0 { eieioOp(); } + + +# binutils-descr: "ldcix", X(31,885), X_MASK, POWER6, PPCNONE, {RT, RA0, RB} +# ISA-cmt: ldcix - Load Doubleword Caching Inhibited Indexed +# ISA-info: ldcix - Form "X" Page 749 Category "S" +# binutils: mytest.d: 24: 7c 64 2e ea ldcix r3,r4,r5 +:ldcix RT,A,B is $(NOTVLE) & OP=31 & XOP_1_10=885 & RT & A & B & BIT_0=0 { + A = A + B; + RT = *:8 A; +} + +# binutils-descr: "divdeuo", XO(31,393,1,0), XO_MASK, POWER7|PPCA2, PPCNONE, {RT, RA, RB} +# binutils: mytest.d: 28: 7c 64 2f 12 divdeuo r3,r4,r5 +:divdeuo RT,A,B is $(NOTVLE) & OP=31 & XOP_1_9=393 & OE=1 & Rc=0 & RT & A & B { + divOverflow(A,B); + RT = A/B; +} + +# binutils-descr: "divdeuo.", XO(31,393,1,1), XO_MASK, POWER7|PPCA2, PPCNONE, {RT, RA, RB} +define pcodeop divdeuoDotOp; +# binutils: mytest.d: 2c: 7c 64 2f 13 divdeuo. r3,r4,r5 +:divdeuo. RT,A,B is $(NOTVLE) & OP=31 & XOP_1_9=393 & OE=1 & Rc=1 & RT & A & B { + divOverflow(A,B); + RT = A/B; + cr0flags(RT); +} + +# binutils-descr: "stwcix", X(31,917), X_MASK, POWER6, PPCNONE, {RS, RA0, RB} +define pcodeop stwcixOp; +# ISA-cmt: stwcix - Store Word Caching Inhibited Indexed +# ISA-info: stwcix - Form "X" Page 750 Category "S" +# binutils: mytest.d: 30: 7c 64 2f 2a stwcix r3,r4,r5 +:stwcix S,A,B is $(NOTVLE) & OP=31 & XOP_1_10=917 & S & A & B & BIT_0=0 { + A = A + B; + *:4 A = S; +} + +# binutils-descr: "stfdpx", X(31,919), X_MASK, POWER6, PPCNONE, {FRS, RA, RB} +define pcodeop stfdpxOp; +# ISA-cmt: stfdpx - Store Floating-Point Double Pair Indexed +# ISA-info: stfdpx - Form "X" Page 131 Category "FP.out" +# binutils: mytest.d: 34: 7c 64 2f 2e stfdpx f3,r4,r5 +:stfdpx fS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XOP_1_10=919 & fS & RA_OR_ZERO & B & BIT_0=0 { + EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *[ram]:8 EA = stfdpxOp(fS,RA_OR_ZERO,B); +} + +# binutils-descr: "stfddx", X(31,931), X_MASK, E500MC, PPCNONE, {FRS, RA, RB} +define pcodeop stfddxOp; +# ISA-info: stfddx - Form "X" Page 709 Category "DS" +# binutils: e500mc.d: 88: 7f be ff 46 stfddx f29,r30,r31 +:stfddx fS,A,B is OP=31 & XOP_1_10=931 & fS & A & B & BIT_0=0 { *[ram]:8 B = stfddxOp(fS,A,B); } + +# binutils-descr: "divdeo", XO(31,425,1,0), XO_MASK, POWER7|PPCA2, PPCNONE, {RT, RA, RB} +define pcodeop divdeoOp; +# binutils: mytest.d: 38: 7c 64 2f 52 divdeo r3,r4,r5 +:divdeo RT,A,B is $(NOTVLE) & OP=31 & XOP_1_9=425 & OE=1 & Rc=0 & RT & A & B { + divOverflow(A,B); + RT = A s/ B; +} + +# binutils-descr: "divdeo.", XO(31,425,1,1), XO_MASK, POWER7|PPCA2, PPCNONE, {RT, RA, RB} +define pcodeop divdeoDotOp; +# binutils: mytest.d: 3c: 7c 64 2f 53 divdeo. r3,r4,r5 +:divdeo. RT,A,B is $(NOTVLE) & OP=31 & XOP_1_9=425 & OE=1 & Rc=1 & RT & A & B { + divOverflow(A,B); + RT = A s/ B; + cr0flags(RT); +} + +# binutils-descr: "sthcix", X(31,949), X_MASK, POWER6, PPCNONE, {RS, RA0, RB} +define pcodeop sthcixOp; +# ISA-info: sthcix - Form "X" Page 750 Category "S" +# binutils: mytest.d: 40: 7c 64 2f 6a sthcix r3,r4,r5 +:sthcix S,A,B is $(NOTVLE) & OP=31 & XOP_1_10=949 & S & A & B & BIT_0=0 { + A = A + B; + *:2 A = S; +} + +define pcodeop slbfeeDotOp; +# ISA-info: slbfee - Form "X" Page 794 Category "?" +:slbfee. RT,B is $(NOTVLE) & OP=31 & RT & BITS_16_20=0 & B & XOP_1_10=979 & BIT_0=1 { slbfeeDotOp(RT,B); } + +# binutils-descr: "stbcix", X(31,981), X_MASK, POWER6, PPCNONE, {RS, RA0, RB} +define pcodeop stbcixOp; +# ISA-info: stbcix - Form "X" Page 750 Category "S" +# binutils: mytest.d: 44: 7c 64 2f aa stbcix r3,r4,r5 +:stbcix S,A,B is $(NOTVLE) & OP=31 & XOP_1_10=981 & S & A & B & BIT_0=0 { + A = A + B; + *:1 A = A; +} + +# binutils-descr: "stdcix", X(31,1013), X_MASK, POWER6, PPCNONE, {RS, RA0, RB} +define pcodeop stdcixOp; +# ISA-info: stdcix - Form "X" Page 750 Category "S" +# binutils: mytest.d: 48: 7c 64 2f ea stdcix r3,r4,r5 +:stdcix S,A,B is $(NOTVLE) & OP=31 & XOP_1_10=1013 & S & A & B & BIT_0=0 { + A = A + B; + *:8 A = S; +} + +# binutils-descr: "lq", OP(56), OP_MASK, POWER4, PPC476, {RTQ, DQ, RAQ} +define pcodeop lqOp; +# ISA-cmt: lq - Load Quadword +# ISA-info: lq - Form "DQ" Page 751 Category "LSQ" +# binutils: power4.d: +0: e0 83 00 00 lq r4,0\(r3\) +# binutils: power4.d: +4: e0 83 00 00 lq r4,0\(r3\) +:lq RT,A,DQ is $(NOTVLE) & OP=56 & RT & A & DQ & BITS_0_3=0 { RT = lqOp(RT,A); } + +define pcodeop lvepxOp; +:lvepx RT,A,B is OP=31 & RT & A & B & XOP_1_10=295 & BIT_0=0 { RT = lvepxOp(RT,A,B); } + +define pcodeop lvepxlOp; +:lvepxl RT,A,B is OP=31 & RT & A & B & XOP_1_10=263 & BIT_0=0 { RT = lvepxlOp(RT,A,B); } + +# binutils-descr: "lfdp", OP(57), OP_MASK, POWER6, POWER7, {FRT, D, RA0} +define pcodeop lfdpOp; +# ISA-cmt: lfdp - Load Floating-Point Double Pair +# ISA-info: lfdp - Form "DS" Page 131 Category "FP.out" +# binutils: NO-EXAMPLE - lfdp +:lfdp fT,A,DS is $(NOTVLE) & OP=57 & fT & A & DS & BITS_0_1=0 { fT = lfdpOp(fT,A,DS:2); } + +# binutils-descr: "dadd", XRC(59,2,0), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} +define pcodeop daddOp; +# ISA-cmt: dadd - DFP Add +# binutils: power6.d: 34: ee 11 90 04 dadd f16,f17,f18 +# binutils: power7.d: 98: ee 11 90 04 dadd f16,f17,f18 +:dadd fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=2 & Rc=0 & fT & fA & fB { daddOp(fT,fA,fB); } + +# binutils-descr: "dadd.", XRC(59,2,1), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} +define pcodeop daddDotOp; +# ISA-cmt: dadd. - DFP Add Rc +# binutils: mytest.d: 50: ec 43 20 05 dadd. f2,f3,f4 +:dadd. fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=2 & Rc=1 & fT & fA & fB { daddDotOp(fT,fA,fB); } + +# binutils-descr: "dqua", ZRC(59,3,0), Z2_MASK, POWER6, PPCNONE, {FRT,FRA,FRB,RMC} +define pcodeop dquaOp; +# ISA-cmt: dqua - DFP Quantize +# binutils: mytest.d: 54: ec 22 18 06 dqua f1,f2,f3,0 +:dqua fT,fA,fB,RMC is $(NOTVLE) & OP=59 & XOP_1_8=3 & Rc=0 & fT & fA & fB & RMC { dquaOp(fT,fA,fB); } + +# binutils-descr: "dqua.", ZRC(59,3,1), Z2_MASK, POWER6, PPCNONE, {FRT,FRA,FRB,RMC} +define pcodeop dquaDotOp; +# ISA-cmt: dqua. - DFP Quantize Rc +# binutils: mytest.d: 58: ec 22 18 07 dqua. f1,f2,f3,0 +:dqua. fT,fA,fB,RMC is $(NOTVLE) & OP=59 & XOP_1_8=3 & Rc=1 & fT & fA & fB & RMC { dquaDotOp(fT,fA,fB); } + +# binutils-descr: "dmul", XRC(59,34,0), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} +define pcodeop dmulOp; +# ISA-cmt: dmul - DFP Multiply +# binutils: mytest.d: 5c: ec 43 20 44 dmul f2,f3,f4 +:dmul fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=34 & Rc=0 & fT & fA & fB { dmulOp(fT,fA,fB); } + +# binutils-descr: "dmul.", XRC(59,34,1), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} +define pcodeop dmulDotOp; +# ISA-cmt: dmul. - DFP Multiply Rc +# binutils: mytest.d: 60: ec 43 20 45 dmul. f2,f3,f4 +:dmul. fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=34 & Rc=1 & fT & fA & fB { dmulDotOp(fT,fA,fB); } + +# binutils-descr: "drrnd", ZRC(59,35,0), Z2_MASK, POWER6, PPCNONE, {FRT, FRA, FRB, RMC} +define pcodeop drrndOp; +# ISA-cmt: drrnd - DFP Reround +# binutils: mytest.d: 64: ec 43 20 46 drrnd f2,f3,f4,0 +:drrnd fT,fA,fB,RMC is $(NOTVLE) & OP=59 & XOP_1_8=35 & Rc=0 & fT & fA & fB & RMC { drrndOp(fT,fA,fB); } + +# binutils-descr: "drrnd.", ZRC(59,35,1), Z2_MASK, POWER6, PPCNONE, {FRT, FRA, FRB, RMC} +define pcodeop drrndDotOp; +# ISA-cmt: drrnd. - DFP Reround Rc +# binutils: mytest.d: 68: ec 43 20 47 drrnd. f2,f3,f4,0 +:drrnd. fT,fA,fB,RMC is $(NOTVLE) & OP=59 & XOP_1_8=35 & Rc=1 & fT & fA & fB & RMC { drrndDotOp(fT,fA,fB); } + +# binutils-descr: "dscli", ZRC(59,66,0), Z_MASK, POWER6, PPCNONE, {FRT, FRA, SH16} +define pcodeop dscliOp; +# ISA-cmt: dscli - DFP Shift Significand Left Immediate +# binutils: mytest.d: 6c: ec 43 10 84 dscli f2,f3,4 +# Y {OP 0 5 {}} {fT 6 10 {}} {fA 11 15 {}} {SH16 16 21 {}} {XOP_1_9 22 30 {}} {Rc 31 31 {}} +# X 00--------------------------05 06---------------------10 11---------------------15 16--------------------------21 22-----------------------------------------30 31-31 +# X --------OP=111011(59)---------|-----------fT------------|-----------fA------------|-------------SH16-------------|-------------XOP_1_9=1000010(66)-------------|Rc=0-| +:dscli fT,fA,SH16 is $(NOTVLE) & OP=59 & XOP_1_9=66 & Rc=0 & fT & fA & SH16 { dscliOp(fT,fA); } + +# binutils-descr: "dscli.", ZRC(59,66,1), Z_MASK, POWER6, PPCNONE, {FRT, FRA, SH16} +define pcodeop dscliDotOp; +# ISA-cmt: dscli. - DFP Shift Significand Left Immediate Rc +# binutils: mytest.d: 70: ec 43 10 85 dscli. f2,f3,4 +:dscli. fT,fA,SH16 is $(NOTVLE) & OP=59 & XOP_1_9=66 & Rc=1 & fT & fA & SH16 { dscliDotOp(fT,fA); } + +# binutils-descr: "dquai", ZRC(59,67,0), Z2_MASK, POWER6, PPCNONE, {TE, FRT,FRB,RMC} +define pcodeop dquaiOp; +# ISA-cmt: dquai - DFP Quantize Immediate +# binutils: mytest.d: 74: ec 62 20 86 dquai 2,f3,f4,0 +:dquai fT,BITS_16_20,fB,RMC is $(NOTVLE) & OP=59 & fT & BITS_16_20 & fB & RMC & XOP_1_8=67 & Rc=0 { dquaiOp(fT,fB); } + +# binutils-descr: "dquai.", ZRC(59,67,1), Z2_MASK, POWER6, PPCNONE, {TE, FRT,FRB,RMC} +define pcodeop dquaiDotOp; +# ISA-cmt: dquai. - DFP Quantize Immediate Rc +# binutils: mytest.d: 78: ec 62 20 87 dquai. 2,f3,f4,0 +:dquai. fT,BITS_16_20,fB,RMC is $(NOTVLE) & OP=59 & fT & BITS_16_20 & fB & RMC & XOP_1_8=67 & Rc=1 { dquaiDotOp(fT,fB); } + +# binutils-descr: "dscri", ZRC(59,98,0), Z_MASK, POWER6, PPCNONE, {FRT, FRA, SH16} +define pcodeop dscriOp; +# ISA-cmt: dscri - DFP Shift Significand Right Immediate +# binutils: mytest.d: 7c: ec 43 10 c4 dscri f2,f3,4 +:dscri fT,fA,SH16 is $(NOTVLE) & OP=59 & XOP_1_9=98 & Rc=0 & fT & fA & SH16 { dscriOp(fT,fA); } + +# binutils-descr: "dscri.", ZRC(59,98,1), Z_MASK, POWER6, PPCNONE, {FRT, FRA, SH16} +define pcodeop dscriDotOp; +# ISA-cmt: dscri. - DFP Shift Significand Right Immediate Rc +# binutils: mytest.d: 80: ec 43 10 c5 dscri. f2,f3,4 +:dscri. fT,fA,SH16 is $(NOTVLE) & OP=59 & XOP_1_9=98 & Rc=1 & fT & fA & SH16 { dscriDotOp(fT,fA); } + +# binutils-descr: "drintx", ZRC(59,99,0), Z2_MASK, POWER6, PPCNONE, {R, FRT, FRB, RMC} +define pcodeop drintxOp; +# ISA-cmt: drintx - DFP Round To FP Integer With Inexact +# binutils: mytest.d: 84: ec 61 20 c6 drintx 1,f3,f4,0 +:drintx fT,fB,RMC is $(NOTVLE) & OP=59 & fT & BITS_17_20=0 & BIT_16 & fB & RMC & XOP_1_8=99 & Rc=0 { drintxOp(fT,fB); } + +# binutils-descr: "drintx.", ZRC(59,99,1), Z2_MASK, POWER6, PPCNONE, {R, FRT, FRB, RMC} +define pcodeop drintxDotOp; +# ISA-cmt: drintx - DFP Round To FP Integer With Inexact +# binutils: mytest.d: 84: ec 61 20 c6 drintx 1,f3,f4,0 +:drintx. fT,fB,RMC is $(NOTVLE) & OP=59 & fT & BITS_17_20=0 & BIT_16 & fB & RMC & XOP_1_8=99 & Rc=1 { drintxDotOp(fT,fB); } + +# binutils-descr: "dcmpo", X(59,130), X_MASK, POWER6, PPCNONE, {BF, FRA, FRB} +define pcodeop dcmpoOp; +# ISA-cmt: dcmpo - DFP Compare Ordered +# ISA-info: dcmpo - Form "X" Page 179 Category "DFP" +# binutils: mytest.d: 8c: ed 03 21 04 dcmpo cr2,f3,f4 +:dcmpo CRFD,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=130 & CRFD & fA & fB & BITS_21_22=0 & BIT_0=0 { dcmpoOp(CRFD,fA,fB); } + +# binutils-descr: "dtstex", X(59,162), X_MASK, POWER6, PPCNONE, {BF, FRA, FRB} +define pcodeop dtstexOp; +# ISA-cmt: dtstex - DFP Test Exponent +# ISA-info: dtstex - Form "X" Page 181 Category "DFP" +# binutils: mytest.d: 90: ed 03 21 44 dtstex cr2,f3,f4 +:dtstex CRFD,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=162 & CRFD & fA & fB & BITS_21_22=0 & BIT_0=0 { dtstexOp(CRFD,fA,fB); } + +# binutils-descr: "dtstdc", Z(59,194), Z_MASK, POWER6, PPCNONE, {BF, FRA, DCM} +define pcodeop dtstdcOp; +# ISA-cmt: dtstdc - DFP Test Data Class +# ISA-info: dtstdc - Form "Z23" Page 180 Category "DFP" +# binutils: mytest.d: 94: ed 03 11 84 dtstdc cr2,f3,4 +:dtstdc CRFD,fA,DCM is $(NOTVLE) & OP=59 & XOP_1_9=194 & CRFD & fA & DCM & BITS_21_22=0 & BIT_0=0 { dtstdcOp(CRFD,fA); } + +# binutils-descr: "dtstdg", Z(59,226), Z_MASK, POWER6, PPCNONE, {BF, FRA, DGM} +define pcodeop dtstdgOp; +# ISA-cmt: dtstdg - DFP Test Data Group +# ISA-info: dtstdg - Form "Z23" Page 180 Category "DFP" +# binutils: mytest.d: 98: ed 03 11 c4 dtstdg cr2,f3,4 +:dtstdg CRFD,fA,DGM is $(NOTVLE) & OP=59 & XOP_1_9=226 & CRFD & fA & DGM & BITS_21_22=0 & BIT_0=0 { dtstdgOp(CRFD,fA); } + +# binutils-descr: "drintn", ZRC(59,227,0), Z2_MASK, POWER6, PPCNONE, {R, FRT, FRB, RMC} +define pcodeop drintnOp; +# ISA-cmt: drintn - DFP Round To FP Integer Without Inexact +# binutils: mytest.d: 9c: ec 61 21 c6 drintn 1,f3,f4,0 +:drintn fT,fB,RMC is $(NOTVLE) & OP=59 & XOP_1_8=227 & Rc=0 & BIT_16 & fT & fB & RMC & BITS_17_20=0 { drintnOp(fT,fB); } + +# binutils-descr: "drintn.", ZRC(59,227,1), Z2_MASK, POWER6, PPCNONE, {R, FRT, FRB, RMC} +define pcodeop drintnDotOp; +# ISA-cmt: drintn. - DFP Round To FP Integer Without Inexact Rc +# binutils: mytest.d: a0: ec 61 21 c7 drintn. 1,f3,f4,0 +:drintn. fT,fB,RMC is $(NOTVLE) & OP=59 & XOP_1_8=227 & Rc=1 & BIT_16 & fT & fB & RMC & BITS_17_20=0 { drintnDotOp(fT,fB); } + +# binutils-descr: "dctdp", XRC(59,258,0), X_MASK, POWER6, PPCNONE, {FRT, FRB} +define pcodeop dctdpOp; +# ISA-cmt: dctdp - DFP Convert To DFP Long +# binutils: mytest.d: a4: ec 40 1a 04 dctdp f2,f3 +:dctdp fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=258 & Rc=0 & fT & fB & BITS_16_20=0 { dctdpOp(fT,fB); } + +# binutils-descr: "dctdp.", XRC(59,258,1), X_MASK, POWER6, PPCNONE, {FRT, FRB} +define pcodeop dctdpDotOp; +# ISA-cmt: dctdp. - DFP Convert To DFP Long Rc +# binutils: mytest.d: a8: ec 40 1a 05 dctdp. f2,f3 +:dctdp. fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=258 & Rc=1 & fT & fB & BITS_16_20=0 { dctdpDotOp(fT,fB); } + +# binutils-descr: "dctfix", XRC(59,290,0), X_MASK, POWER6, PPCNONE, {FRT, FRB} +define pcodeop dctfixOp; +# ISA-cmt: dctfix - DFP Convert To Fixed +# binutils: mytest.d: ac: ec 40 1a 44 dctfix f2,f3 +:dctfix fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=290 & Rc=0 & fT & fB & BITS_16_20=0 { dctfixOp(fT,fB); } + +# binutils-descr: "dctfix.", XRC(59,290,1), X_MASK, POWER6, PPCNONE, {FRT, FRB} +define pcodeop dctfixDotOp; +# ISA-cmt: dctfix. - DFP Convert To Fixed Rc +# binutils: mytest.d: b0: ec 40 1a 45 dctfix. f2,f3 +:dctfix. fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=290 & Rc=1 & fT & fB & BITS_16_20=0 { dctfixDotOp(fT,fB); } + +# binutils-descr: "ddedpd", XRC(59,322,0), X_MASK, POWER6, PPCNONE, {SP, FRT, FRB} +define pcodeop ddedpdOp; +# ISA-cmt: ddedpd - DFP Decode DPD To BCD +# binutils: mytest.d: b4: ec 70 22 84 ddedpd 2,f3,f4 +:ddedpd fT,SP,fB is $(NOTVLE) & OP=59 & fT & SP & BITS_16_18=0 & fB & XOP_1_10=322 & Rc=0 { ddedpdOp(fT,fB); } # & BITS_16_18=0 + +# binutils-descr: "ddedpd.", XRC(59,322,1), X_MASK, POWER6, PPCNONE, {SP, FRT, FRB} +define pcodeop ddedpdDotOp; +# ISA-cmt: ddedpd. - DFP Decode DPD To BCD Rc +# binutils: mytest.d: b8: ec 70 22 85 ddedpd. 2,f3,f4 +:ddedpd. fT,SP,fB is $(NOTVLE) & OP=59 & fT & SP & BITS_16_18=0 & fB & XOP_1_10=322 & Rc=1 { ddedpdDotOp(fT,fB); } # & BITS_16_18=0 + +# binutils-descr: "dxex", XRC(59,354,0), X_MASK, POWER6, PPCNONE, {FRT, FRB} +define pcodeop dxexOp; +# ISA-cmt: dxex - DFP Extract Biased Exponent +# binutils: mytest.d: bc: ec 40 1a c4 dxex f2,f3 +:dxex fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=354 & Rc=0 & fT & fB & BITS_16_20=0 { dxexOp(fT,fB); } + +# binutils-descr: "dxex.", XRC(59,354,1), X_MASK, POWER6, PPCNONE, {FRT, FRB} +define pcodeop dxexDotOp; +# ISA-cmt: dxex. - DFP Extract Biased Exponent Rc +# binutils: mytest.d: c0: ec 40 1a c5 dxex. f2,f3 +:dxex. fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=354 & Rc=1 & fT & fB & BITS_16_20=0 { dxexDotOp(fT,fB); } + +# binutils-descr: "dsub", XRC(59,514,0), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} +define pcodeop dsubOp; +# ISA-cmt: dsub - DFP Subtract +# binutils: mytest.d: c4: ec 43 24 04 dsub f2,f3,f4 +:dsub fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=514 & Rc=0 & fT & fA & fB { dsubOp(fT,fA,fB); } + +# binutils-descr: "dsub.", XRC(59,514,1), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} +define pcodeop dsubDotOp; +# ISA-cmt: dsub. - DFP Subtract Rc +# binutils: mytest.d: c8: ec 43 24 05 dsub. f2,f3,f4 +:dsub. fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=514 & Rc=1 & fT & fA & fB { dsubDotOp(fT,fA,fB); } + +# binutils-descr: "ddiv", XRC(59,546,0), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} +define pcodeop ddivOp; +# ISA-cmt: ddiv - DFP Divide +# binutils: mytest.d: cc: ec 43 24 44 ddiv f2,f3,f4 +:ddiv fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=546 & Rc=0 & fT & fA & fB { ddivOp(fT,fA,fB); } + +# binutils-descr: "ddiv.", XRC(59,546,1), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} +define pcodeop ddivDotOp; +# ISA-cmt: ddiv. - DFP Divide Rc +# binutils: mytest.d: d0: ec 43 24 45 ddiv. f2,f3,f4 +:ddiv. fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=546 & Rc=1 & fT & fA & fB { ddivDotOp(fT,fA,fB); } + +# binutils-descr: "dcmpu", X(59,642), X_MASK, POWER6, PPCNONE, {BF, FRA, FRB} +define pcodeop dcmpuOp; +# ISA-cmt: dcmpu - DFP Compare Unordered +# ISA-info: dcmpu - Form "X" Page 178 Category "DFP" +# binutils: mytest.d: d4: ed 03 25 04 dcmpu cr2,f3,f4 +:dcmpu CRFD,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=642 & CRFD & fA & fB & BITS_21_22=0 & BIT_0=0 { dcmpuOp(CRFD,fA,fB); } + +# binutils-descr: "dtstsf", X(59,674), X_MASK, POWER6, PPCNONE, {BF, FRA, FRB} +define pcodeop dtstsfOp; +# ISA-cmt: dtstsf - DFP Test Significance +# ISA-info: dtstsf - Form "X" Page 182 Category "DFP" +# binutils: mytest.d: d8: ed 03 25 44 dtstsf cr2,f3,f4 +:dtstsf CRFD,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=674 & CRFD & fA & fB & BITS_21_22=0 & BIT_0=0 { dtstsfOp(CRFD,fA,fB); } + +# binutils-descr: "drsp", XRC(59,770,0), X_MASK, POWER6, PPCNONE, {FRT, FRB} +define pcodeop drspOp; +# ISA-cmt: drsp - DFP Round To DFP Short +# binutils: mytest.d: dc: ec 40 1e 04 drsp f2,f3 +:drsp fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=770 & Rc=0 & fT & fB & BITS_16_20=0 { drspOp(fT,fB); } + +# binutils-descr: "drsp.", XRC(59,770,1), X_MASK, POWER6, PPCNONE, {FRT, FRB} +define pcodeop drspDotOp; +# ISA-cmt: drsp. - DFP Round To DFP Short Rc +# binutils: mytest.d: e0: ec 40 1e 05 drsp. f2,f3 +:drsp. fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=770 & Rc=1 & fT & fB & BITS_16_20=0 { drspDotOp(fT,fB); } + +# binutils-descr: "dcffix", XRC(59,802,0), X_MASK|FRA_MASK, POWER7, PPCNONE, {FRT, FRB} +define pcodeop dcffixOp; +# ISA-cmt: dcffix - DFP Convert From Fixed +# binutils: power7.d: 144: ed 40 66 44 dcffix f10,f12 +:dcffix fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=802 & Rc=0 & fT & fB & BITS_16_20=0 { dcffixOp(fT,fB); } + +# binutils-descr: "dcffix.", XRC(59,802,1), X_MASK|FRA_MASK, POWER7, PPCNONE, {FRT, FRB} +define pcodeop dcffixDotOp; +# ISA-cmt: dcffix. - DFP Convert From Fixed Rc +# binutils: mytest.d: e4: ec 40 1e 45 dcffix. f2,f3 +:dcffix. fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=802 & Rc=1 & fT & fB & BITS_16_20=0 { dcffixDotOp(fT,fB); } + +# binutils-descr: "denbcd", XRC(59,834,0), X_MASK, POWER6, PPCNONE, {S, FRT, FRB} +define pcodeop denbcdOp; +# ISA-cmt: denbcd - DFP Encode BCD To DPD +# binutils: mytest.d: e8: ec 70 26 84 denbcd 1,f3,f4 +:denbcd fT,fB is $(NOTVLE) & OP=59 & fT & BIT_20 & BITS_16_19=0 & fB & XOP_1_10=834 & Rc=0 { denbcdOp(fT,fB); } # & BITS_16_19=0 + +# binutils-descr: "denbcd.", XRC(59,834,1), X_MASK, POWER6, PPCNONE, {S, FRT, FRB} +define pcodeop denbcdDotOp; +# ISA-cmt: denbcd. - DFP Encode BCD To DPD Rc +# binutils: mytest.d: ec: ec 70 26 85 denbcd. 1,f3,f4 +:denbcd. fT,fB is $(NOTVLE) & OP=59 & fT & BIT_20 & BITS_16_19=0 & fB & XOP_1_10=834 & Rc=1 { denbcdDotOp(fT,fB); } # & BITS_16_19=0 + +# binutils-descr: "diex", XRC(59,866,0), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} +define pcodeop diexOp; +# ISA-cmt: diex - DFP Insert Biased Exponent +# binutils: mytest.d: f4: ec 43 26 c4 diex f2,f3,f4 +:diex fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=866 & Rc=0 & fT & fA & fB { diexOp(fT,fA,fB); } + +# binutils-descr: "diex.", XRC(59,866,1), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} +define pcodeop diexDotOp; +# ISA-cmt: diex. - DFP Insert Biased Exponent Rc +# binutils: mytest.d: f8: ec 43 26 c5 diex. f2,f3,f4 +:diex. fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=866 & Rc=1 & fT & fA & fB { diexDotOp(fT,fA,fB); } + +# binutils-descr: "stfdp", OP(61), OP_MASK, POWER6, PPCNONE, {FRT, D, RA0} +define pcodeop stfdpOp; +# ISA-cmt: stfdp - Store Floating-Point Double Pair +# ISA-info: stfdp - Form "DS" Page 131 Category "FP.out" +# binutils: NO-EXAMPLE - stfdp +:stfdp fS,RA_OR_ZERO,DS is $(NOTVLE) & OP=61 & fS & RA_OR_ZERO & DS & BITS_0_1=0 { + EA:$(REGISTER_SIZE) = RA_OR_ZERO + sext(DS:2 << 2); + *[ram]:8 EA = stfdpOp(fS,RA_OR_ZERO,DS:2); +} + +# binutils-descr: "stq", DSO(62,2), DS_MASK, POWER4, PPC476, {RSQ, DS, RA0} +define pcodeop stqOp; +# ISA-cmt: stq - Store Quadword +# ISA-info: stq - Form "DS" Page 751 Category "LSQ" +# binutils: power4.d: +50: f8 c7 00 02 stq r6,0\(r7\) +# binutils: power4.d: +54: f8 c7 00 12 stq r6,16\(r7\) +# binutils: power4.d: +58: f8 c7 ff f2 stq r6,-16\(r7\) +# binutils: power4.d: +5c: f8 c7 80 02 stq r6,-32768\(r7\) +# binutils: power4.d: +60: f8 c7 7f f2 stq r6,32752\(r7\) +:stq S,RA_OR_ZERO,DS is $(NOTVLE) & OP=62 & S & RA_OR_ZERO & DS & BITS_0_1=2 { + EA:$(REGISTER_SIZE) = RA_OR_ZERO + sext(DS:2 << 2); + *:16 EA = stqOp(S,RA_OR_ZERO,DS:2); +} + +define pcodeop stvepxOp; +:stvepx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=807 & BIT_0=0 { + EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *[ram]:16 EA = stvepxOp(S, RA_OR_ZERO, B); +} + +define pcodeop stvepxlOp; +:stvepxl S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=775 & BIT_0=0 { + EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *[ram]:16 EA = stvepxlOp(S, RA_OR_ZERO, B); +} + +# binutils-descr: "dquaq", ZRC(63,3,0), Z2_MASK, POWER6, PPCNONE, {FRT, FRA, FRB, RMC} +define pcodeop dquaqOp; +# ISA-cmt: dquaq - DFP Quantize Quad +# binutils: mytest.d: 100: fc 43 24 06 dquaq f2,f3,f4,2 +:dquaq fT,fA,fB,RMC is $(NOTVLE) & OP=63 & fT & fA & fB & RMC & XOP_1_8=3 & Rc=0 { dquaqOp(); } + +# binutils-descr: "dquaq.", ZRC(63,3,1), Z2_MASK, POWER6, PPCNONE, {FRT, FRA, FRB, RMC} +define pcodeop dquaqDotOp; +# ISA-cmt: dquaq. - DFP Quantize Quad Rc +# binutils: mytest.d: 104: fc 43 24 07 dquaq. f2,f3,f4,2 +:dquaq. fT,fA,fB,RMC is $(NOTVLE) & OP=63 & fT & fA & fB & RMC & XOP_1_8=3 & Rc=1 { dquaqDotOp(); } + +# binutils-descr: "dmulq", XRC(63,34,0), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} +define pcodeop dmulqOp; +# ISA-cmt: dmulq - DFP Multiply Quad +# binutils: mytest.d: 108: fc 43 20 44 dmulq f2,f3,f4 +:dmulq fT,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=34 & Rc=0 & fT & fA & fB { dmulqOp(fT,fA,fB); } + +# binutils-descr: "dmulq.", XRC(63,34,1), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} +define pcodeop dmulqDotOp; +# ISA-cmt: dmulq. - DFP Multiply Quad Rc +# binutils: mytest.d: 10c: fc 43 20 45 dmulq. f2,f3,f4 +:dmulq. fT,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=34 & Rc=1 & fT & fA & fB { dmulqDotOp(fT,fA,fB); } + +# binutils-descr: "drrndq", ZRC(63,35,0), Z2_MASK, POWER6, PPCNONE, {FRT, FRA, FRB, RMC} +define pcodeop drrndqOp; +# ISA-cmt: drrndq - DFP Reround Quad +# binutils: mytest.d: 110: fc 43 22 46 drrndq f2,f3,f4,1 +:drrndq fT,fA,fB,RMC is $(NOTVLE) & OP=63 & fT & fA & fB & RMC & XOP_1_8=35 & Rc=0 { drrndqOp(fT,fA,fB); } + +# binutils-descr: "drrndq.", ZRC(63,35,1), Z2_MASK, POWER6, PPCNONE, {FRT, FRA, FRB, RMC} +define pcodeop drrndqDotOp; +# ISA-cmt: drrndq - DFP Reround Quad +# binutils: mytest.d: 110: fc 43 22 46 drrndq f2,f3,f4,1 +:drrndq. fT,fA,fB,RMC is $(NOTVLE) & OP=63 & fT & fA & fB & RMC & XOP_1_8=35 & Rc=1 { drrndqDotOp(fT,fA,fB); } + + +# binutils-descr: "dscliq", ZRC(63,66,0), Z_MASK, POWER6, PPCNONE, {FRT, FRA, SH16} +define pcodeop dscliqOp; +# ISA-cmt: dscliq - DFP Shift Significand Left Immediate Quad +# binutils: mytest.d: 118: fc 43 10 84 dscliq f2,f3,4 +:dscliq fT,fA,SH16 is $(NOTVLE) & OP=63 & fT & fA & SH16 & XOP_1_9=66 & Rc=0 { dscliqOp(fT,fA); } + +# binutils-descr: "dscliq.", ZRC(63,66,1), Z_MASK, POWER6, PPCNONE, {FRT, FRA, SH16} +define pcodeop dscliqDotOp; +# ISA-cmt: dscliq. - DFP Shift Significand Left Immediate Quad Rc +# binutils: mytest.d: 11c: fc 43 10 85 dscliq. f2,f3,4 +:dscliq. fT,fA,SH16 is $(NOTVLE) & OP=63 & fT & fA & SH16 & XOP_1_9=66 & Rc=1 { dscliqDotOp(fT,fA); } + +# binutils-descr: "dquaiq", ZRC(63,67,0), Z2_MASK, POWER6, PPCNONE, {TE, FRT, FRB, RMC} +define pcodeop dquaiqOp; +# ISA-cmt: dquaiq - DFP Quantize Immediate Quad +# binutils: mytest.d: 120: fc 62 24 86 dquaiq 2,f3,f4,2 +:dquaiq fT,A_BITS,fB,RMC is $(NOTVLE) & OP=63 & fT & A_BITS & fB & RMC & XOP_1_8=67 & Rc=0 { dquaiqOp(fT,fB); } + +# binutils-descr: "dquaiq.", ZRC(63,67,1), Z2_MASK, POWER6, PPCNONE, {TE, FRT, FRB, RMC} +define pcodeop dquaiqDotOp; +# ISA-cmt: dquaiq. - DFP Quantize Immediate Quad Rc +# binutils: mytest.d: 124: fc 62 24 87 dquaiq. 2,f3,f4,2 +:dquaiq. fT,A_BITS,fB,RMC is $(NOTVLE) & OP=63 & fT & A_BITS & fB & RMC & XOP_1_8=67 & Rc=1 { dquaiqDotOp(fT,fB); } + +# binutils-descr: "dscriq", ZRC(63,98,0), Z_MASK, POWER6, PPCNONE, {FRT, FRA, SH16} +define pcodeop dscriqOp; +# ISA-cmt: dscriq - DFP Shift Significand Right Immediate Quad +# binutils: mytest.d: 128: fc 43 10 c4 dscriq f2,f3,4 +:dscriq fT,fA,SH16 is $(NOTVLE) & OP=63 & fT & fA & SH16 & XOP_1_9=98 & Rc=0 { dscriqOp(); } + +# binutils-descr: "dscriq.", ZRC(63,98,1), Z_MASK, POWER6, PPCNONE, {FRT, FRA, SH16} +define pcodeop dscriqDotOp; +# ISA-cmt: dscriq. - DFP Shift Significand Right Immediate Quad Rc +# binutils: mytest.d: 12c: fc 43 10 c5 dscriq. f2,f3,4 +:dscriq. fT,fA,SH16 is $(NOTVLE) & OP=63 & fT & fA & SH16 & XOP_1_9=98 & Rc=1 { dscriqDotOp(); } + +# binutils-descr: "drintxq", ZRC(63,99,0), Z2_MASK, POWER6, PPCNONE, {R, FRT, FRB, RMC} +define pcodeop drintxqOp; +# ISA-cmt: drintxq - DFP Round To FP Integer With Inexact Quad +# binutils: mytest.d: 130: fc 61 22 c6 drintxq 1,f3,f4,1 +:drintxq fT,fB,RMC is $(NOTVLE) & OP=63 & fT & BITS_17_20=0 & BIT_16 & fB & RMC & XOP_1_8=99 & Rc=0 { drintxqOp(); } + +# binutils-descr: "drintxq.", ZRC(63,99,1), Z2_MASK, POWER6, PPCNONE, {R, FRT, FRB, RMC} +define pcodeop drintxqDotOp; +# ISA-cmt: drintxq. - DFP Round To FP Integer With Inexact Quad Rc +# binutils: mytest.d: 134: fc 61 22 c7 drintxq. 1,f3,f4,1 +:drintxq. fT,fB,RMC is $(NOTVLE) & OP=63 & fT & BITS_17_20=0 & BIT_16 & fB & RMC & XOP_1_8=99 & Rc=1 { drintxqDotOp(); } + +# binutils-descr: "dcmpoq", X(63,130), X_MASK, POWER6, PPCNONE, {BF, FRA, FRB} +define pcodeop dcmpoqOp; +# ISA-cmt: dcmpoq - DFP Compare Ordered Quad +# ISA-info: dcmpoq - Form "X" Page 179 Category "DFP" +# binutils: mytest.d: 138: fd 03 21 04 dcmpoq cr2,f3,f4 +:dcmpoq CRFD,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=130 & CRFD & fA & fB & BITS_21_22=0 & BIT_0=0 { dcmpoqOp(CRFD,fA,fB); } + +# binutils-descr: "dtstexq", X(63,162), X_MASK, POWER6, PPCNONE, {BF, FRA, FRB} +define pcodeop dtstexqOp; +# ISA-cmt: dtstexq - DFP Test Exponent Quad +# ISA-info: dtstexq - Form "X" Page 181 Category "DFP" +# binutils: mytest.d: 144: fd 03 21 44 dtstexq cr2,f3,f4 +:dtstexq CRFD,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=162 & CRFD & fA & fB & BITS_21_22=0 & BIT_0=0 { dtstexqOp(CRFD,fA,fB); } + +# binutils-descr: "dtstdcq", Z(63,194), Z_MASK, POWER6, PPCNONE, {BF, FRA, DCM} +define pcodeop dtstdcqOp; +# ISA-cmt: dtstdcq - DFP Test Data Class Quad +# ISA-info: dtstdcq - Form "Z22" Page 180 Category "DFP" +# binutils: mytest.d: 26c: fc 82 0d 84 dtstdcq cr1,f2,3 +:dtstdcq BF2,fA,DCM is $(NOTVLE) & OP=63 & BF2 & BITS_21_22=0 & fA & DCM & XOP_1_9=194 & BIT_0=0 { dtstdcqOp(fA); } + + +# binutils-descr: "dtstdgq", Z(63,226), Z_MASK, POWER6, PPCNONE, {BF, FRA, DGM} +define pcodeop dtstdgqOp; +# ISA-cmt: dtstdgq - DFP Test Data Group Quad +# ISA-info: dtstdgq - Form "Z22" Page 180 Category "DFP" +# binutils: mytest.d: 148: fd 03 11 c4 dtstdgq cr2,f3,4 +:dtstdgq BF2,fA,DGM is $(NOTVLE) & OP=63 & BF2 & BITS_21_22=0 & fA & DGM & XOP_1_9=226 & BIT_0=0 { dtstdgqOp(); } + +# binutils-descr: "drintnq", ZRC(63,227,0), Z2_MASK, POWER6, PPCNONE, {R, FRT, FRB, RMC} +define pcodeop drintnqOp; +# ISA-cmt: drintnq - DFP Round To FP Integer Without Inexact Quad +# binutils: mytest.d: 14c: fc 61 23 c6 drintnq 1,f3,f4,1 +:drintnq fT,fB,RMC is $(NOTVLE) & OP=63 & fT & BITS_17_20=0 & MSR_L & fB & RMC & XOP_1_8=227 & Rc=0 { drintnqOp(); } + +# binutils-descr: "drintnq.", ZRC(63,227,1), Z2_MASK, POWER6, PPCNONE, {R, FRT, FRB, RMC} +define pcodeop drintnqDotOp; +# ISA-cmt: drintnq. - DFP Round To FP Integer Without Inexact Quad Rc +# binutils: mytest.d: 150: fc 61 23 c7 drintnq. 1,f3,f4,1 +:drintnq. fT,fB,RMC is $(NOTVLE) & OP=63 & fT & BITS_17_20=0 & MSR_L & fB & RMC & XOP_1_8=227 & Rc=1 { drintnqDotOp(); } + +# binutils-descr: "dctqpq", XRC(63,258,0), X_MASK, POWER6, PPCNONE, {FRT, FRB} +define pcodeop dctqpqOp; +# ISA-cmt: dctqpq - DFP Convert To DFP Extended +# binutils: mytest.d: 154: fc 40 1a 04 dctqpq f2,f3 +:dctqpq fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=258 & Rc=0 & fT & fB & BITS_16_20=0 { dctqpqOp(fT,fB); } + +# binutils-descr: "dctqpq.", XRC(63,258,1), X_MASK, POWER6, PPCNONE, {FRT, FRB} +define pcodeop dctqpqDotOp; +# ISA-cmt: dctqpq. - DFP Convert To DFP Extended Rc +# binutils: mytest.d: 158: fc 40 1a 05 dctqpq. f2,f3 +:dctqpq. fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=258 & Rc=1 & fT & fB & BITS_16_20=0 { dctqpqDotOp(fT,fB); } + +# binutils-descr: "dctfixq", XRC(63,290,0), X_MASK, POWER6, PPCNONE, {FRT, FRB} +define pcodeop dctfixqOp; +# ISA-cmt: dctfixq - DFP Convert To Fixed Quad +# binutils: mytest.d: 15c: fc 40 1a 44 dctfixq f2,f3 +:dctfixq fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=290 & Rc=0 & fT & fB & BITS_16_20=0 { dctfixqOp(fT,fB); } + +# binutils-descr: "dctfixq.", XRC(63,290,1), X_MASK, POWER6, PPCNONE, {FRT, FRB} +define pcodeop dctfixqDotOp; +# ISA-cmt: dctfixq. - DFP Convert To Fixed Quad Rc +# binutils: mytest.d: 160: fc 40 1a 45 dctfixq. f2,f3 +:dctfixq. fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=290 & Rc=1 & fT & fB & BITS_16_20=0 { dctfixqDotOp(fT,fB); } + +# binutils-descr: "ddedpdq", XRC(63,322,0), X_MASK, POWER6, PPCNONE, {SP, FRT, FRB} +define pcodeop ddedpdqOp; +# ISA-cmt: ddedpdq - DFP Decode DPD To BCD Quad +# binutils: mytest.d: 164: fc 70 22 84 ddedpdq 2,f3,f4 +:ddedpdq fT,SP,fB is $(NOTVLE) & OP=63 & XOP_1_10=322 & Rc=0 & fT & fB & SP & BITS_16_18=0 { ddedpdqOp(fT,fB); } + +# binutils-descr: "ddedpdq.", XRC(63,322,1), X_MASK, POWER6, PPCNONE, {SP, FRT, FRB} +define pcodeop ddedpdqDotOp; +# ISA-cmt: ddedpdq. - DFP Decode DPD To BCD Quad Rc +# binutils: mytest.d: 168: fc 70 22 85 ddedpdq. 2,f3,f4 +:ddedpdq. fT,SP,fB is $(NOTVLE) & OP=63 & XOP_1_10=322 & Rc=1 & fT & fB & SP & BITS_16_18=0 { ddedpdqDotOp(fT,fB); } + +# binutils-descr: "dxexq", XRC(63,354,0), X_MASK, POWER6, PPCNONE, {FRT, FRB} +define pcodeop dxexqOp; +# ISA-cmt: dxexq - DFP Extract Biased Exponent Quad +# binutils: mytest.d: 16c: fc 40 1a c4 dxexq f2,f3 +:dxexq fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=354 & Rc=0 & fT & fB & BITS_16_20=0 { dxexqOp(fT,fB); } + +# binutils-descr: "dxexq.", XRC(63,354,1), X_MASK, POWER6, PPCNONE, {FRT, FRB} +define pcodeop dxexqDotOp; +# ISA-cmt: dxexq. - DFP Extract Biased Exponent Quad Rc +# binutils: mytest.d: 170: fc 40 1a c5 dxexq. f2,f3 +:dxexq. fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=354 & Rc=1 & fT & fB & BITS_16_20=0 { dxexqDotOp(fT,fB); } + +# binutils-descr: "dsubq", XRC(63,514,0), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} +define pcodeop dsubqOp; +# ISA-cmt: dsubq - DFP Subtract Quad +# binutils: mytest.d: 174: fc 43 24 04 dsubq f2,f3,f4 +:dsubq fT,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=514 & Rc=0 & fT & fA & fB { dsubqOp(fT,fA,fB); } + +# binutils-descr: "dsubq.", XRC(63,514,1), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} +define pcodeop dsubqDotOp; +# ISA-cmt: dsubq. - DFP Subtract Quad Rc +# binutils: mytest.d: 178: fc 43 24 05 dsubq. f2,f3,f4 +:dsubq. fT,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=514 & Rc=1 & fT & fA & fB { dsubqDotOp(fT,fA,fB); } + +# binutils-descr: "ddivq", XRC(63,546,0), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} +define pcodeop ddivqOp; +# ISA-cmt: ddivq - DFP Divide Quad +# binutils: mytest.d: 17c: fc 43 24 44 ddivq f2,f3,f4 +:ddivq fT,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=546 & Rc=0 & fT & fA & fB { ddivqOp(fT,fA,fB); } + +# binutils-descr: "ddivq.", XRC(63,546,1), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} +define pcodeop ddivqDotOp; +# ISA-cmt: ddivq. - DFP Divide Quad Rc +# binutils: mytest.d: 180: fc 43 24 45 ddivq. f2,f3,f4 +:ddivq. fT,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=546 & Rc=1 & fT & fA & fB { ddivqDotOp(fT,fA,fB); } + +# binutils-descr: "dcmpuq", X(63,642), X_MASK, POWER6, PPCNONE, {BF, FRA, FRB} +define pcodeop dcmpuqOp; +# ISA-cmt: dcmpuq - DFP Compare Unordered Quad +# ISA-info: dcmpuq - Form "X" Page 179 Category "DFP" +# binutils: mytest.d: 184: fd 03 25 04 dcmpuq cr2,f3,f4 +:dcmpuq CRFD,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=642 & CRFD & fA & fB & BITS_21_22=0 & BIT_0=0 { dcmpuqOp(CRFD,fA,fB); } + +# binutils-descr: "dtstsfq", X(63,674), X_MASK, POWER6, PPCNONE, {BF, FRA, FRB} +define pcodeop dtstsfqOp; +# ISA-cmt: dtstsfq - DFP Test Significance Quad +# ISA-info: dtstsfq - Form "X" Page 182 Category "DFP" +# binutils: mytest.d: 188: fd 03 25 44 dtstsfq cr2,f3,f4 +:dtstsfq CRFD,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=674 & CRFD & fA & fB & BITS_21_22=0 & BIT_0=0 { dtstsfqOp(CRFD,fA,fB); } + +# binutils-descr: "drdpq", XRC(63,770,0), X_MASK, POWER6, PPCNONE, {FRT, FRB} +define pcodeop drdpqOp; +# ISA-cmt: drdpq - DFP Round To DFP Long +# binutils: mytest.d: 18c: fc 40 1e 04 drdpq f2,f3 +:drdpq fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=770 & Rc=0 & fT & fB & BITS_16_20=0 { drdpqOp(fT,fB); } + +# binutils-descr: "drdpq.", XRC(63,770,1), X_MASK, POWER6, PPCNONE, {FRT, FRB} +define pcodeop drdpqDotOp; +# ISA-cmt: drdpq. - DFP Round To DFP Long Rc +# binutils: mytest.d: 190: fc 40 1e 05 drdpq. f2,f3 +:drdpq. fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=770 & Rc=1 & fT & fB & BITS_16_20=0 { drdpqDotOp(fT,fB); } + +# binutils-descr: "dcffixq", XRC(63,802,0), X_MASK, POWER6, PPCNONE, {FRT, FRB} +define pcodeop dcffixqOp; +# ISA-cmt: dcffixq - DFP Convert From Fixed Quad +# binutils: mytest.d: 194: fc 40 1e 44 dcffixq f2,f3 +:dcffixq fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=802 & Rc=0 & fT & fB & BITS_16_20=0 { dcffixqOp(fT,fB); } + +# binutils-descr: "dcffixq.", XRC(63,802,1), X_MASK, POWER6, PPCNONE, {FRT, FRB} +define pcodeop dcffixqDotOp; +# ISA-cmt: dcffixq. - DFP Convert From Fixed Quad Rc +# binutils: mytest.d: 198: fc 40 1e 45 dcffixq. f2,f3 +:dcffixq. fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=802 & Rc=1 & fT & fB & BITS_16_20=0 { dcffixqDotOp(fT,fB); } + +# binutils-descr: "denbcdq", XRC(63,834,0), X_MASK, POWER6, PPCNONE, {S, FRT, FRB} +define pcodeop denbcdqOp; +# ISA-cmt: denbcdq - DFP Encode BCD To DPD Quad +# binutils: mytest.d: 19c: fc 70 26 84 denbcdq 1,f3,f4 +:denbcdq fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=834 & Rc=0 & BIT_20 & fT & fB & SR=0 { denbcdqOp(fT,fB); } + +# binutils-descr: "denbcdq.", XRC(63,834,1), X_MASK, POWER6, PPCNONE, {S, FRT, FRB} +define pcodeop denbcdqDotOp; +# ISA-cmt: denbcdq. - DFP Encode BCD To DPD Quad Rc +# binutils: mytest.d: 1a0: fc 70 26 85 denbcdq. 1,f3,f4 +:denbcdq. fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=834 & Rc=1 & BIT_20 & fT & fB & SR=0 { denbcdqDotOp(fT,fB); } + +# binutils-descr: "diexq", XRC(63,866,0), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} +define pcodeop diexqOp; +# ISA-cmt: diexq - DFP Insert Biased Exponent Quad +# binutils: mytest.d: 1a4: fc 43 26 c4 diexq f2,f3,f4 +:diexq fT,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=866 & Rc=0 & fT & fA & fB { diexqOp(fT,fA,fB); } + +# binutils-descr: "diexq.", XRC(63,866,1), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} +define pcodeop diexqDotOp; +# ISA-cmt: diexq. - DFP Insert Biased Exponent Quad Rc +# binutils: mytest.d: 1a8: fc 43 26 c5 diexq. f2,f3,f4 +:diexq. fT,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=866 & Rc=1 & fT & fA & fB { diexqDotOp(fT,fA,fB); } + +# icbtls ct,ra,rb +# 31 / CT RA RB 486 / +# 0 6 7 11 16 21 31 +# 31 25 24 20 15 10 0 +#define pcodeop icbtlsOp; +#:icbtls CT2,A,B is $(NOTVLE) & OP=31 & BIT_25=0 & CT2 & A & B & XOP_1_10=486 & BIT_0=0 { icbtlsOp(A,B); } +define pcodeop InstructionCacheBlockLockSetX; +:icbtls CT,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & BIT_25=0 & CT & RA_OR_ZERO & B & XOP_1_10=486 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + InstructionCacheBlockLockSetX(ea); +} + +###################################### +# v2.07 non vsx additions. + +#=========================================================== +# Branch Conditional TAR(op=19, xop=560) +#=========================================================== + + +:bctar is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH=0 & XOP_1_10=560 +{ + goto [TAR]; +} + +:bctar is linkreg=1 & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH=0 & XOP_1_10=560 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + # don't do this anymore, detect another way + # call [CTR]; + # return [LR]; + goto [TAR]; +} + +:bctar BH is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH & XOP_1_10=560 +{ + goto [TAR]; +} + +:bctarl is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=1 & BITS_13_15=0 & BH=0 & XOP_1_10=560 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + LR = inst_next; + call [TAR]; +} +:bctarl BH is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=1 & BITS_13_15=0 & BH & XOP_1_10=560 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + LR = inst_next; + call [TAR]; +} + +:b^CC^"ctar" is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=560 +{ + if (!CC) goto inst_next; + goto [TAR]; +} +:b^CC^"ctar" BH is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH & BH_BITS!=0 & LK=0 & BITS_13_15=0 & XOP_1_10=560 +{ + if (!CC) goto inst_next; + goto [TAR]; +} + +:b^CC^"ctarl" is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH=0 & LK=1 & BITS_13_15=0 & XOP_1_10=560 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CC) goto inst_next; + LR = inst_next; + call [TAR]; +} +:b^CC^"ctarl" BH is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH & BH_BITS!=0 & LK=1 & BITS_13_15=0 & XOP_1_10=560 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CC) goto inst_next; + LR = inst_next; + call [TAR]; +} + +:b^CC^"ctar" BI_CR is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=560 +{ + if (!CC) goto inst_next; + goto [TAR]; +} + +:b^CC^"ctar" BI_CR,BH is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH & LK=0 & BITS_13_15=0 & XOP_1_10=560 +{ + if (!CC) goto inst_next; + goto [TAR]; +} + +:b^CC^"ctarl" BI_CR is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH=0 & LK=1 & BITS_13_15=0 & XOP_1_10=560 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CC) goto inst_next; + LR = inst_next; + call [TAR]; +} + +:b^CC^"ctarl" BI_CR,BH is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH & LK=1 & BITS_13_15=0 & XOP_1_10=560 + [ linkreg=0; globalset(inst_start,linkreg); ] +{ + if (!CC) goto inst_next; + LR = inst_next; + call [TAR]; +} + +:clrbhrb is $(NOTVLE) & OP=31 & XOP_1_10=430 & BITS_11_25=0 & BIT_0=0 { + clearHistory(); +} + +:fmrgew fT,fA,fB is $(NOTVLE) & OP=63 & fT & fA & fB & XOP_1_10=966 & Rc=0 { + fT[0,32] = fA:4; + fT[32,32] = fB:4; +} + +:fmrgow fT,fA,fB is $(NOTVLE) & OP=63 & fT & fA & fB & XOP_1_10=838 & Rc=0 { + fT[0,32] = fA(4); + fT[32,32] = fB(4); +} + +:lqarx D,RA_OR_ZERO,B,EX is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=276 & EX & Dp & regp [regpset = Dp+1;] { + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; +@if ENDIAN == "big" + D = *:$(REGISTER_SIZE) ea; + regp = *:$(REGISTER_SIZE) (ea + $(REGISTER_SIZE)); +@else + D = *:$(REGISTER_SIZE) (ea + $(REGISTER_SIZE)); + regp = *:$(REGISTER_SIZE) ea; +@endif +} + +:mfbhrbe D,BH_RBE is $(NOTVLE) & OP=31 & XOP_1_10=302 & BIT_0=0 & D & BH_RBE { + D = movebuffer(BH_RBE:2); +} + +:msgclrp B is OP=31 & XOP_1_10=174 & BITS_16_25=0 & BIT_0=0 & B { + message(B); +} + +:msgsndp B is OP=31 & XOP_1_10=142 & BITS_16_25=0 & BIT_0=0 & B { + message(B); +} + +:rfebb SBE is $(NOTVLE) & OP=19 & XOP_1_10=146 & BITS_12_25=0 & BIT_0=0 & SBE { + eventInterrupt(SBE:1); +} + +:stqcx. S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=182 & BIT_0=1 & Dp & regp [regpset = Dp+1;] { + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; +@if ENDIAN == "big" + *:$(REGISTER_SIZE) ea = S; + *:$(REGISTER_SIZE) (ea + $(REGISTER_SIZE)) = regp; +@else + *:$(REGISTER_SIZE) (ea + $(REGISTER_SIZE)) = S; + *:$(REGISTER_SIZE) ea = regp; +@endif + setCrBit(cr0, 2, 1); +} + +:tabort. A is $(NOTVLE) & OP=31 & XOP_1_10=910 & BIT_0=1 & BITS_11_15=0 & BITS_21_25=0 & A { + transaction(A); +} + +:tabortdc. TOA,A,B is $(NOTVLE) & OP=31 & XOP_1_10=814 & BIT_0=1 & A & B & TOA { + transaction(TOA:1,A,B); +} + +:tabortdci. TOA,A,S5IMM is $(NOTVLE) & OP=31 & XOP_1_10=878 & BIT_0=1 & A & S5IMM & TOA { + transaction(TOA:1,A,S5IMM:1); +} + +:tabortwc. TOA,A,B is $(NOTVLE) & OP=31 & XOP_1_10=782 & BIT_0=1 & A & B & TOA { + transaction(TOA:1,A,B); +} + +:tabortwci. TOA,A,S5IMM is $(NOTVLE) & OP=31 & XOP_1_10=846 & BIT_0=1 & A & S5IMM & TOA { + transaction(TOA:1,A,S5IMM:1); +} + +:tbegin. BIT_R is $(NOTVLE) & OP=31 & XOP_1_10=654 & BIT_0=1 & BITS_11_20=0 & BITS_22_24=0 & BIT_R { + transaction(BIT_R:1); +} + +:tcheck BF2 is $(NOTVLE) & OP=31 & XOP_1_10=718 & BIT_0=0 & BITS_11_22=0 & BF2 { + transaction(BF2:1); +} + +:tend. BIT_A is $(NOTVLE) & OP=31 & XOP_1_10=686 & BIT_0=1 & BITS_11_24=0 & BIT_A { + transaction(BIT_A:1); +} + +:trechkpt. is $(NOTVLE) & OP=31 & XOP_1_10=1006 & BIT_0=1 & BITS_11_25=0 { + transaction(); +} + +:treclaim. A is $(NOTVLE) & OP=31 & XOP_1_10=942 & BIT_0=1 & BITS_11_15=0 & BITS_21_25=0 & A { + transaction(A); +} + +:tsr. BIT_L is $(NOTVLE) & OP=31 & XOP_1_10=750 & BIT_0=1 & BITS_11_20=0 & BITS_22_25=0 & BIT_L { + transaction(BIT_L:1); +} + +####################### +# v3.0 + +:addpcis D,OFF16SH is $(NOTVLE) & OP=19 & XOP_1_5=2 & D & OFF16SH { + D = inst_next + sext(OFF16SH); +} + +:cmpeqb CRFD,A,B is $(NOTVLE) & OP=31 & BITS_21_22=0 & BIT_0=0 & XOP_1_10=224 & A & B & CRFD { + tmpa:1 = A:1; + match:1 = (tmpa == B[0,8]) | (tmpa == B[8,8]) | (tmpa == B[16,8]) | (tmpa == B[24,8]); +@if REGISTER_SIZE == "8" + match = match | (tmpa == B[32,8]) | (tmpa == B[40,8]) | (tmpa == B[48,8]) | (tmpa == B[56,8]); +@endif + # 0b0 | match | 0b0 | 0b0 + CRFD = (match & 1) << 2; +} + +:cmprb CRFD,L2,A,B is $(NOTVLE) & OP=31 & BIT_22=0 & BIT_0=0 & XOP_1_10=192 & A & B & CRFD & L2 { + tmpin:1 = A:1; + tmp1lo:1 = B[16,8]; + tmp1hi:1 = B[24,8]; + tmp2lo:1 = B[0,8]; + tmp2hi:1 = B[8,8]; + in_range:1 = ((tmpin >= tmp2lo) & (tmpin <= tmp2hi)) | (((tmpin >= tmp1lo) & (tmpin <= tmp1hi)) * L2:1); + # 0b0 | in_range | 0b0 | 0b0 + CRFD = (in_range & 1) << 2; +} + +:cnttzw A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=538 & Rc=0 { + A = countTrailingZeros(S); +} + +:cnttzw. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=538 & Rc=1 { + A = countTrailingZeros(S); + cr0flags(A); +} + +:cnttzd A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=570 & Rc=0 { + A = countTrailingZeros(S); +} + +:cnttzd. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=570 & Rc=1 { + A = countTrailingZeros(S); + cr0flags(A); +} + +:copy RA_OR_ZERO,B,L2 is $(NOTVLE) & OP=31 & BITS_22_25=0 & BIT_0=0 & XOP_1_10=774 & RA_OR_ZERO & B & L2 { + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + copytrans(ea,L2:1); +} + +:cp_abort is $(NOTVLE) & OP=31 & BITS_11_25=0 & BIT_0=0 & XOP_1_10=838{ + copytrans(); +} + +:darn D,L16 is $(NOTVLE) & OP=31 & BITS_18_20=0 & BITS_11_15=0 & BIT_0=0 & XOP_1_10=755 & D & L16 { + D = random(L16:1); +} + +:dtstsfi CRFD,UIMT,fB is $(NOTVLE) & OP=59 & XOP_1_10=675 & CRFD & UIMT & fB & BIT_22=0 & BIT_0=0 { + dtstsfOp(CRFD,UIMT:1,fB); +} + +:dtstsfiq CRFD,UIMT,fB is $(NOTVLE) & OP=63 & XOP_1_10=675 & CRFD & UIMT & fB & BIT_22=0 & BIT_0=0 { + dtstsfOp(CRFD,UIMT:1,fB); +} + +:extswsli A,S,SH is OP=31 & A & S & SH & XOP_2_10=445 & Rc=0 { + tmp:8 = sext(S:4); + A = tmp << SH; +} + +:extswsli. A,S,SH is OP=31 & A & S & SH & XOP_2_10=445 & Rc=1 { + tmp:8 = sext(S:4); + A = tmp << SH; + cr0flags(A); +} + +:ldat D,RA_OR_ZERO,FNC is $(NOTVLE) & OP=31 & D & RA_OR_ZERO & FNC & XOP_1_10=614 & BIT_0=0 & Dp & regp [regpset = Dp+1;] { + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + tmp:$(REGISTER_SIZE) = *:8 ea; + mematom(ea,tmp,D,regp,FNC:1); + D = tmp; +} + +:ldmx D,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & D & RA_OR_ZERO & B & XOP_1_10=309 & BIT_0=0 { + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + D = *:8 ea; +} + +:lwat D,RA_OR_ZERO,FNC is $(NOTVLE) & OP=31 & D & RA_OR_ZERO & FNC & XOP_1_10=582 & BIT_0=0 & Dp & regp [regpset = Dp+1;] { + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + tmp:$(REGISTER_SIZE) = zext(*:4 ea); + mematom(ea,tmp,D,regp,FNC:1); + D = tmp; +} + +:maddhd D,A,B,C is $(NOTVLE) & OP=4 & D & A & B & C & XOP_0_5=48 { + tmpa:16 = sext(A); + tmpb:16 = sext(B); + tmpc:16 = sext(C); + tmpp:16 = (tmpa * tmpb) + tmpc; + D = tmpp(8); +} + +:maddhdu D,A,B,C is $(NOTVLE) & OP=4 & D & A & B & C & XOP_0_5=49 { + tmpa:16 = zext(A); + tmpb:16 = zext(B); + tmpc:16 = zext(C); + tmpp:16 = (tmpa * tmpb) + tmpc; + D = tmpp(8); +} + +:maddld D,A,B,C is $(NOTVLE) & OP=4 & D & A & B & C & XOP_0_5=51 { + tmpa:16 = sext(A); + tmpb:16 = sext(B); + tmpc:16 = sext(C); + tmpp:16 = (tmpa * tmpb) + tmpc; + D = tmpp:8; +} + +:mcrxrx CRFD is $(NOTVLE) & OP=31 & BITS_11_22=0 & BIT_0=0 & XOP_1_10=576 & CRFD { + CRFD = (xer_ov << 3) | (xer_ov32 << 2) | (xer_ca << 1) | (xer_ca32); +} + +:modsd D,A,B is $(NOTVLE) & OP=31 & D & A & B & XOP_1_10=777 & BIT_0=0 { + tmpa:16 = sext(A); + tmpb:16 = sext(B); + tmpd:16 = tmpa s% tmpb; + D = tmpd:$(REGISTER_SIZE); +} + +:modsw D,A,B is $(NOTVLE) & OP=31 & D & A & B & XOP_1_10=779 & BIT_0=0 { + tmpa:16 = sext(A:4); + tmpb:16 = sext(B:4); + tmpd:16 = tmpa s% tmpb; + D = tmpd:$(REGISTER_SIZE); +} + +:modud D,A,B is $(NOTVLE) & OP=31 & D & A & B & XOP_1_10=265 & BIT_0=0 { + tmpa:16 = zext(A); + tmpb:16 = zext(B); + tmpd:16 = tmpa % tmpb; + D = tmpd:$(REGISTER_SIZE); +} + +:moduw D,A,B is $(NOTVLE) & OP=31 & D & A & B & XOP_1_10=267 & BIT_0=0 { + tmpa:4 = zext(A:4); + tmpb:4 = zext(B:4); + tmpd:4 = tmpa % tmpb; + D = zext(tmpd); +} + +:msgsync is $(NOTVLE) & OP=31 & BITS_11_25=0 & BIT_0=0 & XOP_1_10=886 { + message(); +} + +:paste RA_OR_ZERO,B,0 is $(NOTVLE) & OP=31 & BITS_22_25=0 & XOP_1_10=902 & RA_OR_ZERO & B & L2=0 & Rc=0 { + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + pastetrans(ea); +} + +:paste. RA_OR_ZERO,B,1 is $(NOTVLE) & OP=31 & BITS_22_25=0 & XOP_1_10=902 & RA_OR_ZERO & B & L2=1 & Rc=1 { + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + pastetrans(ea); + setCrBit(cr0, 2, 1); +} + +:setb D,BFA is $(NOTVLE) & OP=31 & BITS_11_17=0 & BIT_0=0 & XOP_1_10=128 & D & BFA { + tmpcr:8 = 1 << (8 * BFA:1); + tmpr0:1 = (BFA & 0x8) != 0; + tmpr1:1 = (BFA & 0x4) != 0; + D = (-1 * zext(tmpr0)) + (1 * zext(tmpr0 == 0) * zext(tmpr1)); +} + +:slbieg S,B is $(NOTVLE) & OP=31 & BITS_16_20=0 & BIT_0=0 & XOP_1_10=466 & S & B { + slbInvalidateEntry(S,B); +} + +:slbsync is $(NOTVLE) & OP=31 & BITS_11_25=0 & BIT_0=0 & XOP_1_10=338 { + sync(); +} + +:stdat S,RA_OR_ZERO,FNC is $(NOTVLE) & OP=31 & S & RA_OR_ZERO & FNC & XOP_1_10=742 & BIT_0=0 & Dp & regp [regpset = Dp+1;] { + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + tmp:$(REGISTER_SIZE) = *:8 ea; + mematom(ea,tmp,S,regp,FNC:1); +} + +:stop is $(NOTVLE) & OP=19 & BITS_11_25=0 & BIT_0=0 & XOP_1_10=370 { + stopT(); +} + +:stwat S,RA_OR_ZERO,FNC is $(NOTVLE) & OP=31 & S & RA_OR_ZERO & FNC & XOP_1_10=710 & BIT_0=0 & Dp & regp [regpset = Dp+1;] { + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + tmp:$(REGISTER_SIZE) = zext(*:4 ea); + mematom(ea,tmp,S,regp,FNC:1); +} + +:wait WC is $(NOTVLE) & OP=31 & BITS_23_25=0 & BITS_11_20=0 & BIT_0=0 & XOP_1_10=30 & WC { + waitT(WC:1); +} diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_vle.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_vle.sinc new file mode 100644 index 00000000..3b628347 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/ppc_vle.sinc @@ -0,0 +1,938 @@ + +CC16: "lt" is BI16_VLE=0 & BO16_VLE=1 & BI16_VLE { tmp:1 = 0; getCrBit(cr0, BI16_VLE, tmp); export tmp; } +CC16: "le" is BI16_VLE=1 & BO16_VLE=0 & BI16_VLE { tmp:1 = 0; getCrBit(cr0, BI16_VLE, tmp); tmp = !tmp; export tmp; } +CC16: "eq" is BI16_VLE=2 & BO16_VLE=1 & BI16_VLE { tmp:1 = 0; getCrBit(cr0, BI16_VLE, tmp); export tmp; } +CC16: "ge" is BI16_VLE=0 & BO16_VLE=0 & BI16_VLE { tmp:1 = 0; getCrBit(cr0, BI16_VLE, tmp); tmp = !tmp; export tmp; } +CC16: "gt" is BI16_VLE=1 & BO16_VLE=1 & BI16_VLE { tmp:1 = 0; getCrBit(cr0, BI16_VLE, tmp); export tmp; } +CC16: "ne" is BI16_VLE=2 & BO16_VLE=0 & BI16_VLE { tmp:1 = 0; getCrBit(cr0, BI16_VLE, tmp); tmp = !tmp; export tmp; } +CC16: "so" is BI16_VLE=3 & BO16_VLE=1 & BI16_VLE { tmp:1 = 0; getCrBit(cr0, BI16_VLE, tmp); export tmp; } +CC16: "ns" is BI16_VLE=3 & BO16_VLE=0 & BI16_VLE { tmp:1 = 0; getCrBit(cr0, BI16_VLE, tmp); tmp = !tmp; export tmp; } + + +CC32: "lt" is BI_CC_VLE=0 & BO_VLE=1 & BI_CR_VLE & BI_CC_VLE { tmp:1 = 0; getCrBit(BI_CR_VLE, BI_CC_VLE, tmp); export tmp; } +CC32: "le" is BI_CC_VLE=1 & BO_VLE=0 & BI_CR_VLE & BI_CC_VLE { tmp:1 = 0; getCrBit(BI_CR_VLE, BI_CC_VLE, tmp); tmp = !tmp; export tmp; } +CC32: "eq" is BI_CC_VLE=2 & BO_VLE=1 & BI_CR_VLE & BI_CC_VLE { tmp:1 = 0; getCrBit(BI_CR_VLE, BI_CC_VLE, tmp); export tmp; } +CC32: "ge" is BI_CC_VLE=0 & BO_VLE=0 & BI_CR_VLE & BI_CC_VLE { tmp:1 = 0; getCrBit(BI_CR_VLE, BI_CC_VLE, tmp); tmp = !tmp; export tmp; } +CC32: "gt" is BI_CC_VLE=1 & BO_VLE=1 & BI_CR_VLE & BI_CC_VLE { tmp:1 = 0; getCrBit(BI_CR_VLE, BI_CC_VLE, tmp); export tmp; } +CC32: "ne" is BI_CC_VLE=2 & BO_VLE=0 & BI_CR_VLE & BI_CC_VLE { tmp:1 = 0; getCrBit(BI_CR_VLE, BI_CC_VLE, tmp); tmp = !tmp; export tmp; } +CC32: "so" is BI_CC_VLE=3 & BO_VLE=1 & BI_CR_VLE & BI_CC_VLE { tmp:1 = 0; getCrBit(BI_CR_VLE, BI_CC_VLE, tmp); export tmp; } +CC32: "ns" is BI_CC_VLE=3 & BO_VLE=0 & BI_CR_VLE & BI_CC_VLE { tmp:1 = 0; getCrBit(BI_CR_VLE, BI_CC_VLE, tmp); tmp = !tmp; export tmp; } +CC32: "dnz" is BO_VLE=2 {CTR = CTR-1; tmp:1 = (CTR != 0); export tmp; } +CC32: "dz" is BO_VLE=3 {CTR = CTR-1; tmp:1 = (CTR == 0); export tmp; } + +addrBD8: reloc is BD8_VLE [ reloc = inst_start + (BD8_VLE << 1);] { export *[ram]:4 reloc; } +addrBD15: reloc is BD15_VLE [ reloc = inst_start + (BD15_VLE << 1);] { export *[ram]:4 reloc; } +addrBD24: reloc is BD24_VLE [ reloc = inst_start + (BD24_VLE << 1);] { export *[ram]:4 reloc; } + +d8PlusRaAddress: S8IMM(A) is S8IMM & A {tmp:$(REGISTER_SIZE) = A+S8IMM; export tmp; } +d8PlusRaOrZeroAddress: S8IMM(RA_OR_ZERO) is S8IMM & RA_OR_ZERO {tmp:$(REGISTER_SIZE) = RA_OR_ZERO+S8IMM; export tmp; } + +sd4PlusRxAddr: SD4_VLE(RX_VLE) is SD4_VLE & RX_VLE {tmp:$(REGISTER_SIZE) = RX_VLE+SD4_VLE; export tmp; } +sd4HPlusRxAddr: SD4_VLE(RX_VLE) is SD4_VLE & RX_VLE {tmp:$(REGISTER_SIZE) = RX_VLE+(SD4_VLE << 1); export tmp; } +sd4WPlusRxAddr: SD4_VLE(RX_VLE) is SD4_VLE & RX_VLE {tmp:$(REGISTER_SIZE) = RX_VLE+(SD4_VLE << 2); export tmp; } + +OIMM: val is UI5_VLE [ val = UI5_VLE+1; ] { export *[const]:$(REGISTER_SIZE) val; } + +@if REGISTER_SIZE == "4" +SCALE: val is BIT_10 & SCL_VLE & IMM8 [ val = (((0xFFFFFFFF << ((SCL_VLE*8) + 8)) | (0xFFFFFFFF >> (32 - (SCL_VLE*8)))) * BIT_10) | (IMM8 << (SCL_VLE*8)); ] { export *[const]:4 val;} +@else +# Due to the way this big >> would work in java (arithmetic), we have to modify the orig way this was done. +# (0xFFFFFFFFFFFFFFFF >> (64 - (SCL_VLE*8)) <--- Original +# (0x7FFFFFFFFFFFFFFF >> (63 - (SCL_VLE*8)) <--- New +# We 'pre-shift' by 1 and take 1 off the total we'd shift by. SCL_VLE*8 is at most 24 so we don't have to +# worry about a negative shift value. +SCALE: val is BIT_10 & SCL_VLE & IMM8 [ val = (((0xFFFFFFFFFFFFFFFF << ((SCL_VLE*8) + 8)) | (0x7FFFFFFFFFFFFFFF >> (63 - (SCL_VLE*8)))) * BIT_10) | (IMM8 << (SCL_VLE*8)); ] { export *[const]:8 val;} +@endif + +SIMM16: val is IMM_0_10_VLE & SIMM_21_25_VLE [ val = (SIMM_21_25_VLE << 11) | IMM_0_10_VLE ;] { export *[const]:2 val; } +SIMM20: val is IMM_0_10_VLE & IMM_16_20_VLE & SIMM_11_14_VLE [ val = (SIMM_11_14_VLE << 16 ) | (IMM_16_20_VLE << 11) | IMM_0_10_VLE ;] { export *[const]:4 val; } +IMM16: val is IMM_0_10_VLE & IMM_21_25_VLE [ val = (IMM_21_25_VLE << 11) | IMM_0_10_VLE ;] { export *[const]:2 val; } +IMM16B: val is IMM_0_10_VLE & IMM_16_20_VLE [ val = (IMM_16_20_VLE << 11) | IMM_0_10_VLE ;] { export *[const]:2 val; } + +:e_b addrBD24 is $(ISVLE) & OP=30 & BIT_25=0 & LK=0 & addrBD24 { + goto addrBD24; +} + +:e_bl addrBD24 is $(ISVLE) & OP=30 & BIT_25=0 & LK=1 & addrBD24 { + LR = inst_next; + call addrBD24; +} + +:se_b addrBD8 is $(ISVLE) & OP6_VLE=58 & BIT9_VLE=0 & LK8_VLE=0 & addrBD8 { + goto addrBD8; +} + +:se_bl addrBD8 is $(ISVLE) & OP6_VLE=58 & BIT9_VLE=0 & LK8_VLE=1 & addrBD8 { + LR = inst_next; + call addrBD8; +} + +# NOTE: For the conditional branches, the "official" mnemonics have just bc and bcl. +# We use extended mnemonics so the display is understandable without having to cross- +# reference multiple tables. +:e_b^CC32 addrBD15 is $(ISVLE) & OP=30 & XOP_VLE=8 & LK=0 & addrBD15 & CC32 { + if (CC32 == 0) goto inst_next; + goto addrBD15; +} + +:e_b^CC32^"l" addrBD15 is $(ISVLE) & OP=30 & XOP_VLE=8 & LK=1 & addrBD15 & CC32 { + if (CC32 == 0) goto inst_next; + LR= inst_next; + call [addrBD15]; +} + +:se_b^CC16 addrBD8 is $(ISVLE) & OP5_VLE=28 & addrBD8 & CC16 { + if (CC16 == 0) goto inst_next; + goto addrBD8; +} +####### + +:se_bctr is $(ISVLE) & OP15_VLE=3 & LK0_VLE=0 { + tmp:$(REGISTER_SIZE) = CTR & ~1; + goto [tmp]; +} + +:se_bctrl is $(ISVLE) & OP15_VLE=3 & LK0_VLE=1 { + LR = inst_next; + tmp:$(REGISTER_SIZE) = CTR & ~1; + call [tmp]; +} + +:se_blr is $(ISVLE) & OP15_VLE=2 & LK0_VLE=0 { + tmp:$(REGISTER_SIZE) = LR & ~1; + return [tmp]; +} + +:se_blrl is $(ISVLE) & OP15_VLE=2 & LK0_VLE=1 { + tmp:$(REGISTER_SIZE) = LR & ~1; + LR = inst_next; + return [tmp]; +} + +:se_sc is $(ISVLE) & OP16_VLE=2 { + tmp:1 = 0; + syscall(tmp); +} + +:e_sc LEV_VLE is $(ISVLE) & OP=31 & XOP_1_10=36 & BIT_0=0 & BITS_16_20=0 & BITS_21_25=0 & LEV_VLE { + tmp:1 = LEV_VLE; + syscall(tmp); +} + +:e_sc is $(ISVLE) & OP=31 & XOP_1_10=36 & BIT_0=0 & BITS_16_20=0 & BITS_21_25=0 & LEV_VLE=0 { + tmp:1 = 0; + syscall(tmp); +} + +:se_illegal is $(ISVLE) & OP16_VLE=0 { + illegal(); +} + +:se_rfmci is $(ISVLE) & OP16_VLE=11 { + MSR = returnFromMachineCheckInterrupt(MSR, spr23b); #MCSRR1 + local ra = spr23a; #MCSRR0 + return[ra]; +} + +:se_rfci is $(ISVLE) & OP16_VLE=9 { + MSR = returnFromCriticalInterrupt(MSR, CSRR1); + local ra = CSRR0; + return[ra]; +} + +:se_rfi is $(ISVLE) & OP16_VLE=8 { + MSR = returnFromInterrupt(MSR, SRR1); + local ra = SRR0; + return[ra]; +} + +:se_rfdi is $(ISVLE) & OP16_VLE=10 { + MSR = returnFromDebugInterrupt(MSR, spr23f); #DSRR1 + local ra = spr23e; #DSRR0 + return[ra]; +} + +:se_rfgi is $(ISVLE) & OP16_VLE=12 { + MSR = returnFromGuestInterrupt(MSR, spr17b); #GSRR1 + local ra = spr17a; #GSRR0 + return[ra]; +} + +:e_crand CC_D_OP,CC_OP,CC_B_OP is $(ISVLE) & OP=31 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=257 & BIT_0=0 +{ + setCrBit(CR_D,CR_D_CC,CC_OP & CC_B_OP); +} + +:e_crandc CC_D_OP,CC_OP,CC_B_OP is $(ISVLE) & OP=31 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=129 & BIT_0=0 +{ + tmp1:1 = !CC_B_OP; + setCrBit(CR_D,CR_D_CC,CC_OP & tmp1); +} + +:e_creqv CC_D_OP,CC_OP,CC_B_OP is $(ISVLE) & OP=31 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=289 & BIT_0=0 +{ + setCrBit(CR_D,CR_D_CC,CC_B_OP == CC_OP); +} + +:e_crnand CC_D_OP,CC_OP,CC_B_OP is $(ISVLE) & OP=31 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=225 & BIT_0=0 +{ + setCrBit(CR_D,CR_D_CC,!(CC_B_OP & CC_OP)); +} + +:e_crnor CC_D_OP,CC_OP,CC_B_OP is $(ISVLE) & OP=31 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=33 & BIT_0=0 +{ + setCrBit(CR_D,CR_D_CC,!(CC_B_OP | CC_OP)); +} + +:e_cror CC_D_OP,CC_OP,CC_B_OP is $(ISVLE) & OP=31 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=449 & BIT_0=0 +{ + setCrBit(CR_D,CR_D_CC,(CC_B_OP | CC_OP)); +} + +:e_crorc CC_D_OP,CC_OP,CC_B_OP is $(ISVLE) & OP=31 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=417 & BIT_0=0 +{ + setCrBit(CR_D,CR_D_CC,(CC_B_OP | (!CC_OP))); +} + +:e_crxor CC_D_OP,CC_OP,CC_B_OP is $(ISVLE) & OP=31 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=193 & BIT_0=0 +{ + setCrBit(CR_D,CR_D_CC,(CC_B_OP ^ CC_OP)); +} + +:e_mcrf CRFD,CRFS is $(ISVLE) & OP=31 & CRFD & BITS_21_22=0 & CRFS & BITS_11_17=0 & XOP_1_10=16 & BIT_0=0 +{ + CRFD = CRFS; +} + +:e_lbz D,dPlusRaOrZeroAddress is $(ISVLE) & OP=12 & D & dPlusRaOrZeroAddress +{ + D = zext(*:1(dPlusRaOrZeroAddress)); +} + +:se_lbz RZ_VLE,sd4PlusRxAddr is $(ISVLE) & OP4_VLE=8 & RZ_VLE & sd4PlusRxAddr { + RZ_VLE = zext(*:1(sd4PlusRxAddr)); +} + +:e_lbzu D,d8PlusRaAddress is $(ISVLE) & OP=6 & D & A & XOP_8_VLE=0 & d8PlusRaAddress +{ + ea:$(REGISTER_SIZE) = d8PlusRaAddress; + D = zext(*:1(ea)); + A = ea; +} + +# e_ldmvcsrrw 6 (0b0001_10) 0b00101 RA 0b0001_0000 D8 +:e_ldmvcsrrw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x10 & BITS_21_25=5 +{ + tea = d8PlusRaOrZeroAddress; + loadReg(CSRR0); + loadReg(CSRR1); +} + +# e_ldmvdsrrw 6 (0b0001_10) 0b00110 RA 0b0001_0000 D8 +:e_ldmvdsrrw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x10 & BITS_21_25=6 +{ + tea = d8PlusRaOrZeroAddress; + loadReg(spr23e); #DSRR0 + loadReg(spr23f); #DSRR1 +} + +# e_ldmvgprw 6 (0b0001_10) 0b00000 RA 0b0001_0000 D8 +:e_ldmvgprw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x10 & BITS_21_25=0 +{ + tea = d8PlusRaOrZeroAddress; + loadReg(r0); + loadReg(r3); + loadReg(r4); + loadReg(r5); + loadReg(r6); + loadReg(r7); + loadReg(r8); + loadReg(r9); + loadReg(r10); + loadReg(r11); + loadReg(r12); +} + +# e_ldmvsprw 6 (0b0001_10) 0b00001 RA 0b0001_0000 D8 +:e_ldmvsprw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x10 & BITS_21_25=1 +{ + tea = d8PlusRaOrZeroAddress; + #TODO is there a better way to handle this, CR are 4 bit + # so crall can't be used. And not much code accesses + # CR in this way, also CRM_CR seems backwards? + # loadReg(CR); + local tmpCR:4 = *:4 tea; + cr0 = zext(tmpCR[0,4]); + cr1 = zext(tmpCR[4,4]); + cr2 = zext(tmpCR[8,4]); + cr3 = zext(tmpCR[12,4]); + cr4 = zext(tmpCR[16,4]); + cr5 = zext(tmpCR[20,4]); + cr6 = zext(tmpCR[24,4]); + cr7 = zext(tmpCR[28,4]); + tea = tea + 4; + loadReg(LR); + loadReg(CTR); + loadReg(XER); +} + +# e_ldmvsrrw 6 (0b0001_10) 0b00100 RA 0b0001_0000 D8 +:e_ldmvsrrw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x10 & BITS_21_25=4 +{ + tea = d8PlusRaOrZeroAddress; + loadReg(SRR0); + loadReg(SRR1); +} + +:e_lha D,dPlusRaOrZeroAddress is $(ISVLE) & OP=14 & D & dPlusRaOrZeroAddress +{ + D = sext(*:2(dPlusRaOrZeroAddress)); +} + +:e_lhz D,dPlusRaOrZeroAddress is $(ISVLE) & OP=22 & D & dPlusRaOrZeroAddress +{ + D = zext(*:2(dPlusRaOrZeroAddress)); +} + +:se_lhz RZ_VLE,sd4HPlusRxAddr is $(ISVLE) & OP4_VLE=10 & RZ_VLE & sd4HPlusRxAddr { + RZ_VLE = zext(*:2(sd4HPlusRxAddr)); +} + +:e_lhau D,d8PlusRaAddress is $(ISVLE) & OP=6 & D & A & XOP_8_VLE=3 & d8PlusRaAddress +{ + ea:$(REGISTER_SIZE) = d8PlusRaAddress; + D = sext(*:2(ea)); + A = ea; +} + +:e_lhzu D,d8PlusRaAddress is $(ISVLE) & OP=6 & D & A & XOP_8_VLE=1 & d8PlusRaAddress +{ + ea:$(REGISTER_SIZE) = d8PlusRaAddress; + D = zext(*:2(ea)); + A = ea; +} + +:e_lwz D,dPlusRaOrZeroAddress is $(ISVLE) & OP=20 & D & dPlusRaOrZeroAddress +{ + D = zext(*:4(dPlusRaOrZeroAddress)); +} + +:se_lwz RZ_VLE,sd4WPlusRxAddr is $(ISVLE) & OP4_VLE=12 & RZ_VLE & sd4WPlusRxAddr { + RZ_VLE = zext(*:4(sd4WPlusRxAddr)); +} + +:e_lwzu D,d8PlusRaAddress is $(ISVLE) & OP=6 & D & A & XOP_8_VLE=2 & d8PlusRaAddress +{ + ea:$(REGISTER_SIZE) = d8PlusRaAddress; + D = zext(*:4(ea)); + A = ea; +} + +:e_stb S,dPlusRaOrZeroAddress is $(ISVLE) & OP=13 & S & dPlusRaOrZeroAddress +{ + *:1(dPlusRaOrZeroAddress) = S:1; +} + +:se_stb RZ_VLE,sd4PlusRxAddr is $(ISVLE) & OP4_VLE=9 & RZ_VLE & sd4PlusRxAddr { + *:1(sd4PlusRxAddr) = RZ_VLE:1; +} + +:e_stbu S,d8PlusRaAddress is $(ISVLE) & OP=6 & XOP_8_VLE=4 & S & A & d8PlusRaAddress +{ + ea:$(REGISTER_SIZE) = d8PlusRaAddress; + *:1(ea) = S:1; + A = ea; +} + +:e_sth S,dPlusRaOrZeroAddress is $(ISVLE) & OP=23 & S & dPlusRaOrZeroAddress +{ + *:2(dPlusRaOrZeroAddress) = S:2; +} + +:se_sth RZ_VLE,sd4HPlusRxAddr is $(ISVLE) & OP4_VLE=11 & RZ_VLE & sd4HPlusRxAddr { + *:2(sd4HPlusRxAddr) = RZ_VLE:2; +} + +:sthu S,d8PlusRaAddress is $(ISVLE) & OP=6 & XOP_8_VLE=5 & S & A & d8PlusRaAddress +{ + ea:$(REGISTER_SIZE) = d8PlusRaAddress; + *:2(ea) = S:2; + A = ea; +} + +# e_stmvcsrrw 6 (0b0001_10) 0b00101 RA 0b0001_0001 D8 +:e_stmvcsrrw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x11 & BITS_21_25=5 +{ + tea = d8PlusRaOrZeroAddress; + storeReg(CSRR0); + storeReg(CSRR1); +} + +# e_stmvdsrrw 6 (0b0001_10) 0b00110 RA 0b0001_0001 D8 +:e_stmvdsrrw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x11 & BITS_21_25=6 +{ + tea = d8PlusRaOrZeroAddress; + storeReg(spr23e); #DSRR0 + storeReg(spr23f); #DSRR1 +} + +# e_stmvgprw 6 (0b0001_10) 0b00000 RA 0b0001_0001 D8 +:e_stmvgprw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x11 & BITS_21_25=0 +{ + tea = d8PlusRaOrZeroAddress; + storeReg(r0); + storeReg(r3); + storeReg(r4); + storeReg(r5); + storeReg(r6); + storeReg(r7); + storeReg(r8); + storeReg(r9); + storeReg(r10); + storeReg(r11); + storeReg(r12); +} + +# e_stmvsprw 6 (0b0001_10) 0b00001 RA 0b0001_0001 D8 +:e_stmvsprw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x11 & BITS_21_25=1 +{ + tea = d8PlusRaOrZeroAddress; + #TODO SEE TODO in e_ldmvsprw + # storeReg(CR); + local tmpCR:4 = 0; + tmpCR = tmpCR | zext((cr0 & 0xf) << 0); + tmpCR = tmpCR | zext((cr1 & 0xf) << 4); + tmpCR = tmpCR | zext((cr2 & 0xf) << 8); + tmpCR = tmpCR | zext((cr3 & 0xf) << 12); + tmpCR = tmpCR | zext((cr4 & 0xf) << 16); + tmpCR = tmpCR | zext((cr5 & 0xf) << 20); + tmpCR = tmpCR | zext((cr6 & 0xf) << 24); + tmpCR = tmpCR | zext((cr7 & 0xf) << 28); + *:4 tea = tmpCR; + tea = tea + 4; + storeReg(LR); + storeReg(CTR); + storeReg(XER); +} + +# e_stmvsrrw 6 (0b0001_10) 0b00100 RA 0b0001_0001 D8 +:e_stmvsrrw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x11 & BITS_21_25=4 +{ + tea = d8PlusRaOrZeroAddress; + storeReg(SRR0); + storeReg(SRR1); +} + +:e_stw S,dPlusRaOrZeroAddress is $(ISVLE) & OP=21 & S & dPlusRaOrZeroAddress +{ +@ifdef BIT_64 + *:4(dPlusRaOrZeroAddress) = S:4; +@else + *:4(dPlusRaOrZeroAddress) = S; +@endif +} + +:se_stw RZ_VLE,sd4WPlusRxAddr is $(ISVLE) & OP4_VLE=13 & RZ_VLE & sd4WPlusRxAddr { +@ifdef BIT_64 + *:4(sd4WPlusRxAddr) = RZ_VLE:4; +@else + *:4(sd4WPlusRxAddr) = RZ_VLE; +@endif +} + +:e_stwu S,d8PlusRaAddress is $(ISVLE) & OP=6 & XOP_8_VLE=6 & S & A & d8PlusRaAddress +{ + ea:$(REGISTER_SIZE) = d8PlusRaAddress; +@ifdef BIT_64 + *:4(ea) = S:4; +@else + *:4(ea) = S; +@endif + A = ea; +} + +:e_lmw D,d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & XOP_8_VLE=8 & D & BITS_21_25 & d8PlusRaOrZeroAddress & LDMR31 [ lsmul = BITS_21_25; ] +{ + tea = d8PlusRaOrZeroAddress; + build LDMR31; +} + +:e_stmw S,d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & XOP_8_VLE=9 & S & BITS_21_25 & d8PlusRaOrZeroAddress & STMR31 [ lsmul = BITS_21_25; ] +{ + tea = d8PlusRaOrZeroAddress; + build STMR31; +} + +:se_add RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=1 & BITS_8_9=0 & RX_VLE & RY_VLE { + RX_VLE = RX_VLE + RY_VLE; +} + +:e_add16i D,A,SIMM is $(ISVLE) & OP=7 & A & D & SIMM { + tmp:2 = SIMM; + D = A + sext(tmp); +} + +:e_add2i. A,SIMM16 is $(ISVLE) & OP=28 & XOP_11_VLE=17 & A & SIMM16 { + A = A + sext(SIMM16); + cr0flags(A); +} + +:e_add2is A,SIMM16 is $(ISVLE) & OP=28 & XOP_11_VLE=18 & A & SIMM16 { + tmp:$(REGISTER_SIZE) = sext(SIMM16); + tmp = tmp << 16; + A = A + tmp; +} + +:e_addi D,A,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=8 & BIT_11=0 & D & A & SCALE { + D = A + SCALE; +} + +:e_addi. D,A,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=8 & BIT_11=1 & D & A & SCALE { + D = A + SCALE; + cr0flags(D); +} + +:se_addi RX_VLE,OIMM is $(ISVLE) & OP6_VLE=8 & BIT9_VLE=0 & RX_VLE & OIMM { + RX_VLE = RX_VLE + OIMM; +} + +:e_addic D,A,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=9 & BIT_11=0 & D & A & SCALE { + xer_ca = carry(A,SCALE); + D = A + SCALE; +} + +:e_addic. D,A,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=9 & BIT_11=1 & D & A & SCALE { + xer_ca = carry(A,SCALE); + D = A + SCALE; + cr0flags(D); +} + +:se_sub RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=1 & BITS_8_9=2 & RX_VLE & RY_VLE { + RX_VLE = RX_VLE - RY_VLE; +} + +:se_subf RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=1 & BITS_8_9=3 & RX_VLE & RY_VLE { + RX_VLE = RY_VLE - RX_VLE; +} + +:e_subfic D,A,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=11 & BIT_11=0 & D & A & SCALE { + xer_ca = (A <= SCALE); + D = SCALE - A; +} + +:e_subfic. D,A,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=11 & BIT_11=1 & D & A & SCALE { + xer_ca = (A <= SCALE); + D = SCALE - A; + cr0flags(D); +} + +:se_subi RX_VLE,OIMM is $(ISVLE) & OP6_VLE=9 & BIT9_VLE=0 & RX_VLE & OIMM { + RX_VLE = RX_VLE - OIMM; +} + +:se_subi. RX_VLE,OIMM is $(ISVLE) & OP6_VLE=9 & BIT9_VLE=1 & RX_VLE & OIMM { + RX_VLE = RX_VLE - OIMM; + cr0flags(RX_VLE); +} + +:e_mulli D,A,SCALE is $(ISVLE) & OP=6 & XOP_11_VLE=20 & D & A & SCALE { + tmp1:16 = sext(A); + tmp2:16 = sext(SCALE); + tmpP:16 = tmp1 * tmp2; + D = tmpP:$(REGISTER_SIZE); +} + +:e_mull2i. A,SIMM16 is $(ISVLE) & OP=28 & XOP_11_VLE=20 & A & SIMM16 { + tmp1:16 = sext(A); + tmp2:16 = sext(SIMM16); + tmpP:16 = tmp1 * tmp2; + A = tmpP:$(REGISTER_SIZE); +} + +:se_mullw RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=1 & BITS_8_9=1 & RX_VLE & RY_VLE { + RX_VLE = RX_VLE * RY_VLE; +} + +:se_neg RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=3 & RX_VLE { + RX_VLE = ~RX_VLE + 1; +} + +:se_btsti RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=25 & BIT9_VLE=1 & RX_VLE & OIM5_VLE { + tmp:$(REGISTER_SIZE) = (RX_VLE >> (0x1F - OIM5_VLE)) & 0x1; + cr0flags(tmp); +} + +:e_cmp16i. A,SIMM16 is $(ISVLE) & OP=28 & XOP_11_VLE=19 & A & SIMM16 { + tmpA:4 = A:4; + tmpB:4 = sext(SIMM16); + cr0 = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); +} + + +:e_cmpi BF_VLE,A,SCALE is $(ISVLE) & OP=6 & XOP_11_VLE=21 & BITS_23_25=0 & A & BF_VLE & SCALE { + tmpA:4 = A:4; + tmpB:4 = SCALE:4; + BF_VLE = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); +} + +:se_cmp RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=3 & BITS_8_9=0 & RX_VLE & RY_VLE { + tmpA:4 = RX_VLE:4; + tmpB:4 = RY_VLE:4; + cr0 = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); +} + +:se_cmpi RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=10 & BIT9_VLE=1 & RX_VLE & OIM5_VLE { + tmpA:4 = RX_VLE:4; + tmpB:4 = OIM5_VLE; + cr0 = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); +} + +:e_cmpl16i. A,IMM16 is $(ISVLE) & OP=28 & XOP_11_VLE=21 & A & IMM16 { + tmpA:4 = A:4; + tmpB:4 = zext(IMM16); + cr0 = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); +} + +:e_cmpli BF_VLE,A,SCALE is $(ISVLE) & OP=6 & XOP_11_VLE=21 & BITS_23_25=1 & A & BF_VLE & SCALE { + tmpA:4 = A:4; + tmpB:4 = SCALE:4; + BF_VLE = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); +} + +:se_cmpl RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=3 & BITS_8_9=1 & RX_VLE & RY_VLE { + tmpA:4 = RX_VLE:4; + tmpB:4 = RY_VLE:4; + cr0 = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); +} + +:se_cmpli RX_VLE,OIMM is $(ISVLE) & OP6_VLE=8 & BIT9_VLE=1 & RX_VLE & OIMM { + tmpA:4 = RX_VLE:4; + tmpB:4 = OIMM:4; + cr0 = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); +} + +:e_cmph CRFD,A,B is $(ISVLE) & OP=31 & BITS_21_22=0 & BIT_0=0 & XOP_1_10=14 & A & B & CRFD { + tmpA:2 = A:2; + tmpB:2 = B:2; + CRFD = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); +} + +:se_cmph RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=3 & BITS_8_9=2 & RX_VLE & RY_VLE { + tmpA:2 = RX_VLE:2; + tmpB:2 = RY_VLE:2; + cr0 = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); +} + +:e_cmph16i. A,SIMM16 is $(ISVLE) & OP=28 & XOP_11_VLE=22 & A & SIMM16 { + tmpA:2 = A:2; + tmpB:2 = SIMM16; + cr0 = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); +} + +:e_cmphl CRFD,A,B is $(ISVLE) & OP=31 & BITS_21_22=0 & BIT_0=0 & XOP_1_10=46 & A & B & CRFD { + tmpA:2 = A:2; + tmpB:2 = B:2; + tmpC:1 = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); + CRFD = tmpC; +} + +:se_cmphl RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=3 & BITS_8_9=3 & RX_VLE & RY_VLE { + tmpA:2 = RX_VLE:2; + tmpB:2 = RY_VLE:2; + cr0 = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); +} + +:e_cmphl16i. A,IMM16 is $(ISVLE) & OP=28 & XOP_11_VLE=23 & A & IMM16 { + tmpA:2 = A:2; + tmpB:2 = IMM16; + cr0 = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); +} + +:e_and2i. D,IMM16B is $(ISVLE) & OP=28 & XOP_11_VLE=25 & D & IMM16B { + D = D & zext(IMM16B); + cr0flags(D); +} + +:e_and2is. D,IMM16B is $(ISVLE) & OP=28 & XOP_11_VLE=29 & D & IMM16B { + tmp:$(REGISTER_SIZE) = zext(IMM16B); + tmp = tmp << 16; + D = D & tmp; + cr0flags(D); +} + +:e_andi A,S,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=12 & BIT_11=0 & S & A & SCALE { + A = S & SCALE; +} + +:e_andi. A,S,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=12 & BIT_11=1 & S & A & SCALE { + A = S & SCALE; + cr0flags(A); +} + +:se_andi RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=11 & BIT9_VLE=1 & RX_VLE & OIM5_VLE { + tmp:1 = OIM5_VLE; + RX_VLE = RX_VLE & zext(tmp); +} + +:e_or2i D,IMM16B is $(ISVLE) & OP=28 & XOP_11_VLE=24 & D & IMM16B { + D = D | zext(IMM16B); +} + +:e_or2is D,IMM16B is $(ISVLE) & OP=28 & XOP_11_VLE=26 & D & IMM16B { + tmp:$(REGISTER_SIZE) = zext(IMM16B); + tmp = tmp << 16; + D = D | tmp; +} + +:e_ori A,S,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=13 & BIT_11=0 & S & A & SCALE { + A = S | SCALE; +} + +:e_ori. A,S,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=13 & BIT_11=1 & S & A & SCALE { + A = S | SCALE; + cr0flags(A); +} + +:e_xori A,S,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=14 & BIT_11=0 & S & A & SCALE { + A = S ^ SCALE; +} + +:e_xori. A,S,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=14 & BIT_11=1 & S & A & SCALE { + A = S ^ SCALE; + cr0flags(A); +} + +:se_and RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=17 & BIT9_VLE=1 & BIT8_VLE=0 & RX_VLE & RY_VLE { + RX_VLE = RX_VLE & RY_VLE; +} + +:se_and. RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=17 & BIT9_VLE=1 & BIT8_VLE=1 & RX_VLE & RY_VLE { + RX_VLE = RX_VLE & RY_VLE; + cr0flags(RX_VLE); +} + +:se_andc RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=17 & BITS_8_9=1 & RX_VLE & RY_VLE { + RX_VLE = RX_VLE & ~RY_VLE; +} + +:se_or RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=17 & BITS_8_9=0 & RX_VLE & RY_VLE { + RX_VLE = RX_VLE | RY_VLE; +} + +:se_not RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=2 & RX_VLE { + RX_VLE = ~RX_VLE; +} + +:se_bclri RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=24 & BIT9_VLE=0 & RX_VLE & OIM5_VLE { + tmp:$(REGISTER_SIZE) = 0x80000000 >> OIM5_VLE; + tmp = ~tmp; + RX_VLE = RX_VLE & tmp; +} + +:se_bgeni RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=24 & BIT9_VLE=1 & RX_VLE & OIM5_VLE { + RX_VLE = 0x80000000 >> OIM5_VLE; +} + +:se_bmaski RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=11 & BIT9_VLE=0 & RX_VLE & OIM5_VLE { + RX_VLE = ~0; + sa:4 = (8 * $(REGISTER_SIZE) - OIM5_VLE) * zext( OIM5_VLE != 0:1 ); + RX_VLE = RX_VLE >> sa; +} + +:se_bseti RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=25 & BIT9_VLE=0 & RX_VLE & OIM5_VLE { + tmp:$(REGISTER_SIZE) = 0x80000000 >> OIM5_VLE; + RX_VLE = RX_VLE | tmp; +} + +:se_extsb RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=13 & RX_VLE { + RX_VLE = sext(RX_VLE:1); +} + +:se_extsh RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=15 & RX_VLE { + RX_VLE = sext(RX_VLE:2); +} + +:se_extzb RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=12 & RX_VLE { + RX_VLE = zext(RX_VLE:1); +} + +:se_extzh RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=14 & RX_VLE { + RX_VLE = zext(RX_VLE:2); +} + +:e_li D,SIMM20 is $(ISVLE) & OP=28 & BIT_15=0 & D & SIMM20 { + D = sext(SIMM20); +} + +:se_li RX_VLE,OIM7_VLE is $(ISVLE) & OP5_VLE=9 & RX_VLE & OIM7_VLE { + RX_VLE = OIM7_VLE; +} + +:e_lis D,IMM16B is $(ISVLE) & OP=28 & XOP_11_VLE=28 & D & IMM16B { + tmp:$(REGISTER_SIZE) = zext(IMM16B); + D = tmp << 16; +} + +:se_mfar RX_VLE,ARY_VLE is $(ISVLE) & OP6_VLE=0 & BITS_8_9=3 & RX_VLE & ARY_VLE { + RX_VLE = ARY_VLE; +} + +:se_mr RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=0 & BITS_8_9=1 & RX_VLE & RY_VLE { + RX_VLE = RY_VLE; +} + +:se_mtar ARX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=0 & BITS_8_9=2 & ARX_VLE & RY_VLE { + ARX_VLE = RY_VLE; +} + +:e_rlw A,S,B is $(ISVLE) & OP=31 & BIT_0=0 & XOP_1_10=280 & A & B & S { + tmpB:1 = B[0,5]; + tmpS:4 = S:4; + tmpA:4 = (tmpS << tmpB) | (tmpS >> (32 - tmpB)); + A = zext(tmpA); +} + +:e_rlw. A,S,B is $(ISVLE) & OP=31 & BIT_0=1 & XOP_1_10=280 & A & B & S { + tmpB:1 = B[0,5]; + tmpS:4 = S:4; + tmpA:4 = (tmpS << tmpB) | (tmpS >> (32 - tmpB)); + A = zext(tmpA); + cr0flags(A); +} + +:e_rlwi A,S,SHL is $(ISVLE) & OP=31 & BIT_0=0 & XOP_1_10=312 & A & SHL & S { + tmpS:4 = S:4; + tmpA:4 = (tmpS << SHL) | (tmpS >> (32 - SHL)); + A = zext(tmpA); +} + +:e_rlwi. A,S,SHL is $(ISVLE) & OP=31 & BIT_0=1 & XOP_1_10=312 & A & SHL & S { + tmpS:4 = S:4; + tmpA:4 = (tmpS << SHL) | (tmpS >> (32 - SHL)); + A = zext(tmpA); + cr0flags(A); +} + +# The manual uses MB instead of NB here, but because the "MB" symbol is already taken, NB it is +:e_rlwimi A,S,SHL,MBL,ME is $(ISVLE) & OP=29 & BIT_0=0 & MBL & ME & A & SHL & S { + tmpS:4 = S:4; + tmpA:4 = (tmpS << SHL) | (tmpS >> (32 - SHL)); + tmpM:4 = ~0:4; + if (ME:1 < MBL:1) goto ; + tmpM = tmpM << MBL; + tmpM = tmpM >> ((31-ME) + MBL); + tmpM = tmpM << (31-ME); + goto ; + + tmpM = tmpM << ME; + tmpM = tmpM >> ((31-MBL) + ME); + tmpM = tmpM << (31-MBL); + tmpM = ~tmpM; + + A = zext(tmpA & tmpM) | (A & zext(~tmpM)); +} + +:e_rlwinm A,S,SHL,MBL,ME is $(ISVLE) & OP=29 & BIT_0=1 & MBL & ME & A & SHL & S { + tmpS:4 = S:4; + tmpA:4 = (tmpS << SHL) | (tmpS >> (32 - SHL)); + tmpM:4 = ~0:4; + if (ME:1 < MBL:1) goto ; + tmpM = tmpM << MBL; + tmpM = tmpM >> ((31-ME) + MBL); + tmpM = tmpM << (31-ME); + goto ; + + tmpM = tmpM << ME; + tmpM = tmpM >> ((31-MBL) + ME); + tmpM = tmpM << (31-MBL); + tmpM = ~tmpM; + + A = zext(tmpA & tmpM); +} + +:e_slwi A,S,SHL is $(ISVLE) & OP=31 & BIT_0=0 & XOP_1_10=56 & A & SHL & S { + tmpS:4 = S:4; + tmpS = tmpS << SHL; + A = zext(tmpS); +} + +:e_slwi. A,S,SHL is $(ISVLE) & OP=31 & BIT_0=1 & XOP_1_10=56 & A & SHL & S { + tmpS:4 = S:4; + tmpS = tmpS << SHL; + A = zext(tmpS); + cr0flags(A); +} + +:se_slwi RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=27 & BIT9_VLE=0 & RX_VLE & OIM5_VLE { + tmpX:4 = RX_VLE:4; + tmpX = tmpX << OIM5_VLE; + RX_VLE = zext(tmpX); +} + +:se_slw RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=16 & BITS_8_9=2 & RX_VLE & RY_VLE { + tmpX:4 = RX_VLE:4; + tmpS:1 = RY_VLE[0,6]; + tmpX = tmpX << tmpS; + RX_VLE = zext(tmpX); +} + +:se_srawi RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=26 & BIT9_VLE=1 & RX_VLE & OIM5_VLE { + tmpX:4 = RX_VLE:4; + tmpX = tmpX s>> OIM5_VLE; + RX_VLE = sext(tmpX); + xer_ca = (RX_VLE s< 0) & (OIM5_VLE:1 != 0); +} + +:se_sraw RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=16 & BITS_8_9=1 & RX_VLE & RY_VLE { + tmpX:4 = RX_VLE:4; + tmpS:1 = RY_VLE[0,5]; + tmpX = tmpX s>> tmpS; + RX_VLE = sext(tmpX); + xer_ca = (RX_VLE s< 0) & (tmpS != 0); +} + +:e_srwi A,S,SHL is $(ISVLE) & OP=31 & BIT_0=0 & XOP_1_10=568 & A & SHL & S { + tmpS:4 = S:4; + tmpS = tmpS >> SHL; + A = zext(tmpS); +} + +:e_srwi. A,S,SHL is $(ISVLE) & OP=31 & BIT_0=1 & XOP_1_10=568 & A & SHL & S { + tmpS:4 = S:4; + tmpS = tmpS >> SHL; + A = zext(tmpS); + cr0flags(A); +} + +:se_srwi RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=26 & BIT9_VLE=0 & RX_VLE & OIM5_VLE { + tmpX:4 = RX_VLE:4; + tmpX = tmpX >> OIM5_VLE; + RX_VLE = zext(tmpX); +} + +:se_srw RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=16 & BITS_8_9=0 & RX_VLE & RY_VLE { + tmpX:4 = RX_VLE:4; + tmpS:1 = RY_VLE[0,5]; + tmpX = tmpX >> tmpS; + RX_VLE = zext(tmpX); +} + +:se_mfctr RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=10 & RX_VLE { + RX_VLE = CTR; +} + +:se_mtctr RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=11 & RX_VLE { + CTR = RX_VLE; +} + +:se_mflr RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=8 & RX_VLE { + RX_VLE = LR; +} + +:se_mtlr RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=9 & RX_VLE { + LR = RX_VLE; +} + +:se_isync is $(ISVLE) & OP16_VLE=1 { + instructionSynchronize(); +} diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/quicciii.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/quicciii.sinc new file mode 100644 index 00000000..8b7977bc --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/quicciii.sinc @@ -0,0 +1,131 @@ +# These instructions show up in the Freescale PowerQUICC III instruction manual +# (not present elsewhere) + +define pcodeop dataCacheBlockClearLock; +define pcodeop prefetchDataCacheBlockLockSet; +define pcodeop prefetchDataCacheBlockLockSetX; +define pcodeop debuggerNotifyHalt; +define pcodeop instructionCacheBlockClearLock; +define pcodeop queryInstructionCacheBlockLock; +define pcodeop prefetchInstructionCacheBlockLockSetX; +define pcodeop memoryBarrier; +define pcodeop moveFromAPIDIndirect; +define pcodeop moveFromPerformanceMonitorRegister; +define pcodeop moveToPerformanceMonitorRegister; +define pcodeop invalidateTLB; + +#dcblc 0,0,r0 #FIXME +:dcblc CT,RA_OR_ZERO,B is OP=31 & CT & B & XOP_1_10=390 & BIT_0=0 & RA_OR_ZERO +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + dataCacheBlockClearLock(ea); +} + +#dcbtls 0,0,r0 #FIXME +:dcbtls CT,RA_OR_ZERO,B is OP=31 & CT & B & XOP_1_10=166 & BIT_0=0 & RA_OR_ZERO +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + prefetchDataCacheBlockLockSet(ea); +} + +#dcbtstls 0,0,r0 #FIXME +:dcbtstls CT,RA_OR_ZERO,B is OP=31 & CT & B & XOP_1_10=134 & BIT_0=0 & RA_OR_ZERO +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + prefetchDataCacheBlockLockSetX(ea); +} + +#dnh 0,0 #FIXME +:dnh DUI,DUIS is $(NOTVLE) & OP=19 & DUI & DUIS & XOP_1_10=198 & BIT_0=0 +{ + debuggerNotifyHalt(DUI:1,DUIS:2); +} + +#icblc 0,0,r0 #FIXME +:icblc CT,RA_OR_ZERO,B is OP=31 & CT & B & XOP_1_10=230 & BIT_0=0 & RA_OR_ZERO +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + instructionCacheBlockClearLock(CT:1,ea); +} + +:icblq CT,RA_OR_ZERO,B is OP=31 & CT & B & XOP_1_10=198 & BIT_0=1 & RA_OR_ZERO +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + cr0 = queryInstructionCacheBlockLock(CT:1,ea); +} + +#icbtls 0,0,r0 #FIXME +:icbtls CT,RA_OR_ZERO,B is OP=31 & CT & B & XOP_1_10=486 & BIT_0=0 & RA_OR_ZERO +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + prefetchInstructionCacheBlockLockSetX(ea); +} + +:isel^CC_X_OPm D,RA_OR_ZERO,B,CC_X_OP is OP=31 & D & RA_OR_ZERO & B & CC_X_OP & CC_X_OPm & XOP_1_5=15 +{ + local tmp:$(REGISTER_SIZE) = RA_OR_ZERO; + D = B; + if (!CC_X_OP) goto inst_next; + D = tmp; +# D = (zext(CC_X_OP) * RA_OR_ZERO) + (zext(!CC_X_OP) * B); +} + +@ifndef IS_ISA +#mbar 0 #FIXME +:mbar MO is OP=31 & MO & XOP_1_10=854 +{ + memoryBarrier(MO:1); +} +@endif + +#mfapidi r0,r1 #FIXME +:mfapidi D,A is $(NOTVLE) & OP=31 & D & A & XOP_1_10=275 +{ + D = moveFromAPIDIndirect(A); +} + +pmrn: pmr is BITS_16_20 & BITS_11_15 [ pmr = BITS_11_15 << 5 | BITS_16_20; ] { tmp:2 = pmr; export tmp; } + +#mfpmr r0,? #FIXME +:mfpmr D,pmrn is OP=31 & D & pmrn & XOP_1_10=334 & BIT_0=0 +{ + D = moveFromPerformanceMonitorRegister(pmrn); +} + +#mtpmr r0,? #FIXME +:mtpmr pmrn,S is OP=31 & S & pmrn & XOP_1_10=462 & BIT_0=0 +{ + moveToPerformanceMonitorRegister(pmrn, S); +} + +#rfdi #FIXME +:rfdi is $(NOTVLE) & OP=19 & XOP_1_10=39 +{ + MSR = returnFromDebugInterrupt(MSR, spr23f); #DSRR1 + local ra = spr23e; #DSRR0 + return[ra]; +} + +#rfmci #FIXME +:rfmci is $(NOTVLE) & OP=19 & XOP_0_10=76 +{ + MSR = returnFromMachineCheckInterrupt(MSR, spr23b); #MCSRR1 + local ra = spr23a; #MCSRR0 + return[ra]; +} + + +# PowerISA II: 6.11.4.9 TLB Management Instructions +# CMT: TLB Invalidate Local Indexed [Category: Embedded.Phased In]] +# FORM: X-form +define pcodeop TLBInvalidateLocalIndexed; # Outputs/affect TBD +:tlbilx BITS_21_22,RA_OR_ZERO,RB_OR_ZERO is $(NOTVLE) & OP=31 & CRFD=0 & BITS_21_22 & RA_OR_ZERO & RB_OR_ZERO & XOP_1_10=18 & BIT_0=0 { + TLBInvalidateLocalIndexed(BITS_21_22:1,RA_OR_ZERO,RB_OR_ZERO); +} + +#tlbivax 0,r0 #FIXME +:tlbivax RA_OR_ZERO,B is OP=31 & RA_OR_ZERO & B & XOP_1_10=786 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + invalidateTLB(ea); +} diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/stmwInstructions.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/stmwInstructions.sinc new file mode 100644 index 00000000..f39f2758 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/stmwInstructions.sinc @@ -0,0 +1,101 @@ +STMR0: is lsmul=1 {} +STMR0: is epsilon { storeReg(r0); } + +STMR1: is lsmul=2 {} +STMR1: is STMR0 { build STMR0; storeReg(r1); } + +STMR2: is lsmul=3 {} +STMR2: is STMR1 { build STMR1; storeReg(r2); } + +STMR3: is lsmul=4 {} +STMR3: is STMR2 { build STMR2; storeReg(r3); } + +STMR4: is lsmul=5 {} +STMR4: is STMR3 { build STMR3; storeReg(r4); } + +STMR5: is lsmul=6 {} +STMR5: is STMR4 { build STMR4; storeReg(r5); } + +STMR6: is lsmul=7 {} +STMR6: is STMR5 { build STMR5; storeReg(r6); } + +STMR7: is lsmul=8 {} +STMR7: is STMR6 { build STMR6; storeReg(r7); } + +STMR8: is lsmul=9 {} +STMR8: is STMR7 { build STMR7; storeReg(r8); } + +STMR9: is lsmul=10 {} +STMR9: is STMR8 { build STMR8; storeReg(r9); } + +STMR10: is lsmul=11 {} +STMR10: is STMR9 { build STMR9; storeReg(r10); } + +STMR11: is lsmul=12 {} +STMR11: is STMR10 { build STMR10; storeReg(r11); } + +STMR12: is lsmul=13 {} +STMR12: is STMR11 { build STMR11; storeReg(r12); } + +STMR13: is lsmul=14 {} +STMR13: is STMR12 { build STMR12; storeReg(r13); } + +STMR14: is lsmul=15 {} +STMR14: is STMR13 { build STMR13; storeReg(r14); } + +STMR15: is lsmul=16 {} +STMR15: is STMR14 { build STMR14; storeReg(r15); } + +STMR16: is lsmul=17 {} +STMR16: is STMR15 { build STMR15; storeReg(r16); } + +STMR17: is lsmul=18 {} +STMR17: is STMR16 { build STMR16; storeReg(r17); } + +STMR18: is lsmul=19 {} +STMR18: is STMR17 { build STMR17; storeReg(r18); } + +STMR19: is lsmul=20 {} +STMR19: is STMR18 { build STMR18; storeReg(r19); } + +STMR20: is lsmul=21 {} +STMR20: is STMR19 { build STMR19; storeReg(r20); } + +STMR21: is lsmul=22 {} +STMR21: is STMR20 { build STMR20; storeReg(r21); } + +STMR22: is lsmul=23 {} +STMR22: is STMR21 { build STMR21; storeReg(r22); } + +STMR23: is lsmul=24 {} +STMR23: is STMR22 { build STMR22; storeReg(r23); } + +STMR24: is lsmul=25 {} +STMR24: is STMR23 { build STMR23; storeReg(r24); } + +STMR25: is lsmul=26 {} +STMR25: is STMR24 { build STMR24; storeReg(r25); } + +STMR26: is lsmul=27 {} +STMR26: is STMR25 { build STMR25; storeReg(r26); } + +STMR27: is lsmul=28 {} +STMR27: is STMR26 { build STMR26; storeReg(r27); } + +STMR28: is lsmul=29 {} +STMR28: is STMR27 { build STMR27; storeReg(r28); } + +STMR29: is lsmul=30 {} +STMR29: is STMR28 { build STMR28; storeReg(r29); } + +STMR30: is lsmul=31 {} +STMR30: is STMR29 { build STMR29; storeReg(r30); } + +STMR31: is STMR30 { build STMR30; storeReg(r31); } + +:stmw S,dPlusRaOrZeroAddress is $(NOTVLE) & OP=47 & S & BITS_21_25 & dPlusRaOrZeroAddress & STMR31 [ lsmul = BITS_21_25; ] +{ + tea = dPlusRaOrZeroAddress; + build STMR31; +} + diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/stswiInstructions.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/stswiInstructions.sinc new file mode 100644 index 00000000..cf7c8098 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/stswiInstructions.sinc @@ -0,0 +1,186 @@ +#stswi r5,r3,0x02 7c a4 14 aa +#stswi r5,r4,0x08 7c a4 44 aa + + +DYN_S1: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 1)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } +DYN_S2: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 2)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } +DYN_S3: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 3)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } +DYN_S4: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 4)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } +DYN_S5: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 5)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } +DYN_S6: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 6)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } +DYN_S7: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 7)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } + +:stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=0 & BH=0 & XOP_1_10=725 & BIT_0=0 + & DYN_S1 & DYN_S2 & DYN_S3 & DYN_S4 & DYN_S5 & DYN_S6 & DYN_S7 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + storeRegister(S,ea); + storeRegister(DYN_S1,ea); + storeRegister(DYN_S2,ea); + storeRegister(DYN_S3,ea); + storeRegister(DYN_S4,ea); + storeRegister(DYN_S5,ea); + storeRegister(DYN_S6,ea); + storeRegister(DYN_S7,ea); +} + +:stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=0 & BH & XOP_1_10=725 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + sa:1 = BH; + storeRegisterPartial(S,ea,sa); +} + +:stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=1 & BH=0 & XOP_1_10=725 & BIT_0=0 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + storeRegister(S,ea); +} + +:stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=1 & BH & XOP_1_10=725 & BIT_0=0 + & DYN_S1 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + storeRegister(S,ea); + sa:1 = BH; + storeRegisterPartial(DYN_S1,ea,sa); +} + + +:stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=2 & BH=0 & XOP_1_10=725 & BIT_0=0 + & DYN_S1 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + storeRegister(S,ea); + storeRegister(DYN_S1,ea); +} + +:stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=2 & BH & XOP_1_10=725 & BIT_0=0 + & DYN_S1 & DYN_S2 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + storeRegister(S,ea); + storeRegister(DYN_S1,ea); + sa:1 = BH; + storeRegisterPartial(DYN_S2,ea,sa); +} + +:stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=3 & BH=0 & XOP_1_10=725 & BIT_0=0 + & DYN_S1 & DYN_S2 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + storeRegister(S,ea); + storeRegister(DYN_S1,ea); + storeRegister(DYN_S2,ea); +} + +:stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=3 & BH & XOP_1_10=725 & BIT_0=0 + & DYN_S1 & DYN_S2 & DYN_S3 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + storeRegister(S,ea); + storeRegister(DYN_S1,ea); + storeRegister(DYN_S2,ea); + sa:1 = BH; + storeRegisterPartial(DYN_S3,ea,sa); +} + +:stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=4 & BH=0 & XOP_1_10=725 & BIT_0=0 + & DYN_S1 & DYN_S2 & DYN_S3 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + storeRegister(S,ea); + storeRegister(DYN_S1,ea); + storeRegister(DYN_S2,ea); + storeRegister(DYN_S3,ea); +} + +:stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=4 & BH & XOP_1_10=725 & BIT_0=0 + & DYN_S1 & DYN_S2 & DYN_S3 & DYN_S4 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + storeRegister(S,ea); + storeRegister(DYN_S1,ea); + storeRegister(DYN_S2,ea); + storeRegister(DYN_S3,ea); + sa:1 = BH; + storeRegisterPartial(DYN_S4,ea,sa); +} + +:stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=5 & BH=0 & XOP_1_10=725 & BIT_0=0 + & DYN_S1 & DYN_S2 & DYN_S3 & DYN_S4 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + storeRegister(S,ea); + storeRegister(DYN_S1,ea); + storeRegister(DYN_S2,ea); + storeRegister(DYN_S3,ea); + storeRegister(DYN_S4,ea); +} + +:stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=5 & BH & XOP_1_10=725 & BIT_0=0 + & DYN_S1 & DYN_S2 & DYN_S3 & DYN_S4 & DYN_S5 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + storeRegister(S,ea); + storeRegister(DYN_S1,ea); + storeRegister(DYN_S2,ea); + storeRegister(DYN_S3,ea); + storeRegister(DYN_S4,ea); + sa:1 = BH; + storeRegisterPartial(DYN_S5,ea,sa); +} + +:stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=6 & BH=0 & XOP_1_10=725 & BIT_0=0 + & DYN_S1 & DYN_S2 & DYN_S3 & DYN_S4 & DYN_S5 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + storeRegister(S,ea); + storeRegister(DYN_S1,ea); + storeRegister(DYN_S2,ea); + storeRegister(DYN_S3,ea); + storeRegister(DYN_S4,ea); + storeRegister(DYN_S5,ea); +} + +:stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=6 & BH & XOP_1_10=725 & BIT_0=0 + & DYN_S1 & DYN_S2 & DYN_S3 & DYN_S4 & DYN_S5 & DYN_S6 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + storeRegister(S,ea); + storeRegister(DYN_S1,ea); + storeRegister(DYN_S2,ea); + storeRegister(DYN_S3,ea); + storeRegister(DYN_S4,ea); + storeRegister(DYN_S5,ea); + sa:1 = BH; + storeRegisterPartial(DYN_S6,ea,sa); +} + +:stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=7 & BH=0 & XOP_1_10=725 & BIT_0=0 + & DYN_S1 & DYN_S2 & DYN_S3 & DYN_S4 & DYN_S5 & DYN_S6 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + storeRegister(S,ea); + storeRegister(DYN_S1,ea); + storeRegister(DYN_S2,ea); + storeRegister(DYN_S3,ea); + storeRegister(DYN_S4,ea); + storeRegister(DYN_S5,ea); + storeRegister(DYN_S6,ea); +} + +:stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=7 & BH & XOP_1_10=725 & BIT_0=0 + & DYN_S1 & DYN_S2 & DYN_S3 & DYN_S4 & DYN_S5 & DYN_S6 & DYN_S7 +{ + ea:$(REGISTER_SIZE) = RA_OR_ZERO; + storeRegister(S,ea); + storeRegister(DYN_S1,ea); + storeRegister(DYN_S2,ea); + storeRegister(DYN_S3,ea); + storeRegister(DYN_S4,ea); + storeRegister(DYN_S5,ea); + storeRegister(DYN_S6,ea); + sa:1 = BH; + storeRegisterPartial(DYN_S7,ea,sa); +} diff --git a/src/third-party/sleigh/processors/PowerPC/data/languages/vsx.sinc b/src/third-party/sleigh/processors/PowerPC/data/languages/vsx.sinc new file mode 100644 index 00000000..b922a31b --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/languages/vsx.sinc @@ -0,0 +1,1665 @@ +# Source for information on instructions: +# PowerISA_V2.06B_PUBLIC.pdf (dated: July 23, 2010) +# and binutils-2.21.1 + +# version 1.0 + +# ========================================================================================================== +# VSX use of XA,XB,XC,XT +# ========================================================================================================== +# PowerPC VSX allows for VSX registers values to come from a combination of 2 different fields +# XA is the value of A and AX concatenated. (A has 5 bits and AX 1 so allows for 6 bits or 64 registers). +# XB is the value of B and BX concatenated. (B has 5 bits and BX 1 so allows for 6 bits or 64 registers). +# XC is the value of C and CX concatenated. (C has 5 bits and CX 1 so allows for 6 bits or 64 registers). +# XT is the value of T and TX concatenated. (T has 5 bits and TX 1 so allows for 6 bits or 64 registers). +# +# NOTE: A,B,C,T are all 5 bits long and AX,BX,CX,TX are all 1 bit long. +# +# In order to print the registers defined in XA,XB,XC,XT we need to play some tricks. +# Normally you use a "attach variables [ field ...] [ name1 ... ]; to attach names to fields but because +# we need to attach names to 2 fields and that is not directly supported in sleigh. +# +# We attach the low registers (0 to 31) to fields that overlap the normal A,B,C,T named Avsa, Bvsa, Bvsa, Bvsa. +# We attach the high registers (31 to 63) to fields that overlap the normal A,B,C,T named Avsb, Bvsb, Bvsb, Bvsb. +# +# Then we make constructors dependent on the AX,BX,CX,TX values to switch between them as needed. +#define token instr(32) +#... +# support VSX args +# Avsa=(16,20) +# Avsb=(16,20) +# Bvsa=(11,15) +# Bvsb=(11,15) +# Cvsa=(6,10) +# Cvsb=(6,10) +# Tvsa=(21,25) +# Tvsb=(21,25) +#... +#; +# Attach low VSX registers +attach variables [ Avsa Bvsa Cvsa Svsa Tvsa ] + [ vs0 vs1 vs2 vs3 vs4 vs5 vs6 vs7 vs8 vs9 vs10 vs11 vs12 vs13 vs14 vs15 + vs16 vs17 vs18 vs19 vs20 vs21 vs22 vs23 vs24 vs25 vs26 vs27 vs28 vs29 vs30 vs31 + ]; +# Attach hi VSX registers +attach variables [ Avsb Bvsb Cvsb Svsb Tvsb ] + [ vs32 vs33 vs34 vs35 vs36 vs37 vs38 vs39 vs40 vs41 vs42 vs43 vs44 vs45 vs46 vs47 + vs48 vs49 vs50 vs51 vs52 vs53 vs54 vs55 vs56 vs57 vs58 vs59 vs60 vs61 vs62 vs63 + ]; + +attach variables [ Svsbx Tvsbx ] + [ vr0_64_0 vr1_64_0 vr2_64_0 vr3_64_0 vr4_64_0 vr5_64_0 vr6_64_0 vr7_64_0 + vr8_64_0 vr9_64_0 vr10_64_0 vr11_64_0 vr12_64_0 vr13_64_0 vr14_64_0 vr15_64_0 + vr16_64_0 vr17_64_0 vr18_64_0 vr19_64_0 vr20_64_0 vr21_64_0 vr22_64_0 vr23_64_0 + vr24_64_0 vr25_64_0 vr26_64_0 vr27_64_0 vr28_64_0 vr29_64_0 vr30_64_0 vr31_64_0 + ]; +XA: Avsa is Avsa & AX=0 { export Avsa; } # Low register version of XA (i.e A and AX fields) +XA: Avsb is Avsb & AX=1 { export Avsb; } # Hi register version of XA (i.e A and AX fields) +XB: Bvsa is Bvsa & BX=0 { export Bvsa; } # Low register version of XB (i.e B and BX fields) +XB: Bvsb is Bvsb & BX=1 { export Bvsb; } # Hi register version of XB (i.e B and BX fields) +XC: Cvsa is Cvsa & CX=0 { export Cvsa; } # Low register version of XC (i.e C and CX fields) +XC: Cvsb is Cvsb & CX=1 { export Cvsb; } # Hi register version of XC (i.e C and CX fields) +XS: Svsa is Svsa & SX=0 { export Svsa; } +XS: Svsb is Svsb & SX=1 { export Svsb; } +XS3: Svsa is Svsa & SX3=0 { export Svsa; } +XS3: Svsb is Svsb & SX3=1 { export Svsb; } +XT: Tvsa is Tvsa & TX=0 { export Tvsa; } # Low register version of XT (i.e T and AT fields) +XT: Tvsb is Tvsb & TX=1 { export Tvsb; } # Hi register version of XT (i.e T and AT fields) +XT3: Tvsa is Tvsa & TX3=0 { export Tvsa; } # Low register version of XT (i.e T and AT fields) +XT3: Tvsb is Tvsb & TX3=1 { export Tvsb; } # Hi register version of XT (i.e T and AT fields) + +XSF: fS is fS & SX=0 { export fS; } +XSF: Svsbx is Svsbx & SX=1 { export Svsbx; } +XTF: fT is fT & TX=0 { export fT; } +XTF: Tvsbx is Tvsbx & TX=1 { export Tvsbx; } + + +DBUILD: val is DX & DM2 & DC6 [ val = (DC6 << 6) | (DM2 << 5) | DX; ] { export *[const]:1 val; } +# ========================================================================================================== +# ========================================================================================================== + +define pcodeop lxvdsxOp; +# ISA-info: lxvdsx - Form "XX1" Page 339 Category "VSX" +# binutils: vsx.d: 8: 7d 0a a2 99 lxvdsx vs40,r10,r20 +:lxvdsx XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=332 & TX { XT = lxvdsxOp(A,B); } + +# lxsdx XT,RA,RB +# ISA-info: lxsdx - Form "XX1" Page 338 Category "VSX" +# binutils: vsx.d: 0: 7d 0a a4 99 lxsdx vs40,r10,r20 +:lxsdx XT,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XT & RA_OR_ZERO & B & XOP_1_10=588 & TX { + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + XT[0,64] = *:8 ea; +} + +# name lxvd2x code 7c000698 mask fe0700fc00000000 flags @VSX operands 69 31 38 0 0 0 0 0 +:lxvd2x XT,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XT & RA_OR_ZERO & B & XOP_1_10=844 { + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + XT[64,64] = *:8 ea; + XT[0,64] = *:8 (ea+8); +} + +define pcodeop stxsdxOp; +# ISA-info: stxsdx - Form "XX1" Page 340 Category "VSX" +# binutils: vsx.d: 10: 7d 0a a5 99 stxsdx vs40,r10,r20 +:stxsdx XT,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XT & RA_OR_ZERO & B & XOP_1_10=716 & TX { + EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *[ram]:8 EA = stxsdxOp(RA_OR_ZERO,B); +} + +# name stxvd2x code 7c000798 mask fe0700fc00000000 flags @VSX operands 69 31 38 0 0 0 0 0 +:stxvd2x XS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XS & RA_OR_ZERO & B & XOP_1_10=972 { + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *:8 ea = XS(8); + *:8 (ea+8) = XS:8; +} + +# ISA-cmt: lxvw4x - Load VSR Vector Word*4 Indexed +# ISA-info: lxvw4x - Form "XX1" Page 339 Category "VSX" +# binutils: vsx.d: c: 7d 0a a6 19 lxvw4x vs40,r10,r20 +:lxvw4x XT,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XT & RA_OR_ZERO & B & XOP_1_10=780 { + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + XT[96,32] = *:4 ea; + XT[64,32] = *:4 (ea + 4); + XT[32,32] = *:4 (ea + 8); + XT[0,32] = *:4 (ea + 12); +} + +# ISA-cmt: stxvw4x - Store VSR Vector Word*4 Indexed +# ISA-info: stxvw4x - Form "XX1" Page 341 Category "VSX" +# binutils: vsx.d: 18: 7d 0a a7 19 stxvw4x vs40,r10,r20 +:stxvw4x XS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XS & RA_OR_ZERO & B & XOP_1_10=908 { + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *:16 ea = XS; +} + +# ISA-cmt: xxsldwi - VSX Shift Left Double by Word Immediate +# ISA-info: xxsldwi - Form "XX3" Page 501 Category "VSX" +# binutils: vsx.d: 270: f1 12 e2 17 xxsldwi vs40,vs50,vs60,2 +:xxsldwi XT,XA,XB,SHW is $(NOTVLE) & OP=60 & BIT_10 & SHW & BITS_3_7=2 & XA & XB & XT { + tmp:32 = (zext(XA) << 128) | zext(XB); + tmp = tmp >> ((7 - (SHW+3)) * 32); + XT = tmp:16; +} + +define pcodeop xxselOp; +# ISA-cmt: xxsel - VSX Select +# ISA-info: xxsel - Form "XX4" Page 500 Category "VSX" +# binutils: vsx.d: 26c: f1 12 e7 bf xxsel vs40,vs50,vs60,vs62 +:xxsel XT,XA,XB,XC is $(NOTVLE) & OP=60 & XT & XA & XB & XC & BITS_4_5=3 { xxselOp(XA,XB,XC); } + +define pcodeop xxpermdiOp; +# :xxpermdi BITS_21_25,TX,A,AX,B,BX,DM is $(NOTVLE) & OP=60 & XOP_3_10=10 & BITS_21_25 & TX & A & AX & B & BX & DM { xxpermdiOp(A,B); } +# ISA-cmt: xxpermdi - VSX Permute Doubleword Immediate +# ISA-info: xxpermdi - Form "XX3" Page 500 Category "VSX" +# binutils: power7.d: 30: f0 64 29 50 xxpermdi vs3,vs4,vs5,1 +# binutils: power7.d: 34: f1 6c 69 57 xxpermdi vs43,vs44,vs45,1 +# binutils: power7.d: 38: f0 64 2a 50 xxpermdi vs3,vs4,vs5,2 +# binutils: power7.d: 3c: f1 6c 6a 57 xxpermdi vs43,vs44,vs45,2 +# binutils: vsx.d: 23c: f1 12 e1 57 xxpermdi vs40,vs50,vs60,1 +# binutils: vsx.d: 240: f1 12 e2 57 xxpermdi vs40,vs50,vs60,2 +:xxpermdi XT,XA,XB,DM is $(NOTVLE) & OP=60 & OE & DM & BITS_3_7=10 & XA & XB & XT { xxpermdiOp(XA,XB,XT); } + +define pcodeop xxmrghwOp; +# ISA-cmt: xxmrghw - VSX Merge High Word +# ISA-info: xxmrghw - Form "XX3" Page 499 Category "VSX" +# binutils: vsx.d: 230: f1 12 e0 97 xxmrghw vs40,vs50,vs60 +:xxmrghw XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=18 & XA & XB & XT { xxmrghwOp(XA,XB,XT); } + +define pcodeop xsadddpOp; +# ISA-cmt: xsadddp - VSX Scalar Add Double-Precision +# ISA-info: xsadddp - Form "XX3" Page 342 Category "VSX" +# binutils: vsx.d: 20: f1 12 e1 07 xsadddp vs40,vs50,vs60 +:xsadddp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=32 & XA & XB & XT +{ + src1:8 = XA:8; + src2:8 = XB:8; + local src = src1 f+ src2; + XT[0,64] = src; +} + +define pcodeop xsmaddadpOp; +# ISA-cmt: xsmaddadp - VSX Scalar Multiply-Add Type-A Double-Precision +# ISA-info: xsmaddadp - Form "XX3" Page 365 Category "VSX" +# binutils: vsx.d: 54: f1 12 e1 0f xsmaddadp vs40,vs50,vs60 +:xsmaddadp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=33 & XA & XB & XT { xsmaddadpOp(XA,XB,XT); } + +define pcodeop xscmpudpOp; +# ISA-cmt: xscmpudp - VSX Scalar Compare Unordered Double-Precision +# ISA-info: xscmpudp - Form "XX3" Page 349 Category "VSX" +# binutils: vsx.d: 28: f0 92 e1 1e xscmpudp cr1,vs50,vs60 +:xscmpudp CRFD,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=35 & CRFD & BITS_21_22=0 & BIT_0=0 & XA & XB { xscmpudpOp(CRFD,XA,XB); } + +define pcodeop xssubdpOp; +# ISA-cmt: xssubdp - VSX Scalar Subtract Double-Precision +# ISA-info: xssubdp - Form "XX3" Page 393 Category "VSX" +# binutils: vsx.d: a8: f1 12 e1 47 xssubdp vs40,vs50,vs60 +:xssubdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=40 & XA & XB & XT { xssubdpOp(XA,XB,XT); } + +define pcodeop xsmaddmdpOp; +# ISA-cmt: xsmaddmdp - VSX Scalar Multiply-Add Type-M Double-Precision +# ISA-info: xsmaddmdp - Form "XX3" Page 365 Category "VSX" +# binutils: vsx.d: 58: f1 12 e1 4f xsmaddmdp vs40,vs50,vs60 +:xsmaddmdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=41 & XA & XB & XT { xsmaddmdpOp(XA,XB,XT); } + +define pcodeop xscmpodpOp; +# ISA-cmt: xscmpodp - VSX Scalar Compare Ordered Double-Precision +# ISA-info: xscmpodp - Form "XX3" Page 347 Category "VSX" +# binutils: vsx.d: 24: f0 92 e1 5e xscmpodp cr1,vs50,vs60 +:xscmpodp CRFD,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=43 & CRFD & BIT_0=0 & BITS_21_22=0 & XA & XB { xscmpodpOp(CRFD,XA,XB); } + +define pcodeop xsmuldpOp; +# ISA-cmt: xsmuldp - VSX Scalar Multiply Double-Precision +# ISA-info: xsmuldp - Form "XX3" Page 375 Category "VSX" +# binutils: vsx.d: 6c: f1 12 e1 87 xsmuldp vs40,vs50,vs60 +:xsmuldp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=48 & XA & XB & XT { xsmuldpOp(XA,XB,XT); } + +define pcodeop xsmsubadpOp; +# ISA-cmt: xsmsubadp - VSX Scalar Multiply-Subtract Type-A Double-Precision +# ISA-info: xsmsubadp - Form "XX3" Page 372 Category "VSX" +# binutils: vsx.d: 64: f1 12 e1 8f xsmsubadp vs40,vs50,vs60 +:xsmsubadp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=49 & XA & XB & XT { xsmsubadpOp(XA,XB,XT); } + +define pcodeop xxmrglwOp; +# ISA-cmt: xxmrglw - VSX Merge Low Word +# ISA-info: xxmrglw - Form "XX3" Page 499 Category "VSX" +# binutils: vsx.d: 234: f1 12 e1 97 xxmrglw vs40,vs50,vs60 +:xxmrglw XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=50 & XA & XB & XT { xxmrglwOp(XA,XB,XT); } + +define pcodeop xsdivdpOp; +# ISA-cmt: xsdivdp - VSX Scalar Divide Double-Precision +# ISA-info: xsdivdp - Form "XX3" Page 363 Category "VSX" +# binutils: vsx.d: 50: f1 12 e1 c7 xsdivdp vs40,vs50,vs60 +:xsdivdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=56 & XA & XB & XT { xsdivdpOp(XA,XB,XT); } + +define pcodeop xsmsubmdpOp; +# ISA-cmt: xsmsubmdp - VSX Scalar Multiply-Subtract Type-M Double-Precision +# ISA-info: xsmsubmdp - Form "XX3" Page 372 Category "VSX" +# binutils: vsx.d: 68: f1 12 e1 cf xsmsubmdp vs40,vs50,vs60 +:xsmsubmdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=57 & XA & XB & XT { xsmsubmdpOp(XA,XB,XT); } + +define pcodeop xstdivdpOp; +# ISA-cmt: xstdivdp - VSX Scalar Test for software Divide Double-Precision +# ISA-info: xstdivdp - Form "XX3" Page 395 Category "VSX" +# binutils: vsx.d: ac: f0 92 e1 ee xstdivdp cr1,vs50,vs60 +:xstdivdp CRFD,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=61 & CRFD & BIT_0=0 & BITS_21_22=0 & XA & XB { xstdivdpOp(CRFD,XA,XB); } + +define pcodeop xvaddspOp; +# ISA-cmt: xvaddsp - VSX Vector Add Single-Precision +# ISA-info: xvaddsp - Form "XX3" Page 402 Category "VSX" +# binutils: vsx.d: c0: f1 12 e2 07 xvaddsp vs40,vs50,vs60 +:xvaddsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=64 & XA & XB & XT { xvaddspOp(XA,XB,XT); } + +define pcodeop xvmaddaspOp; +# ISA-cmt: xvmaddasp - VSX Vector Multiply-Add Type-A Single-Precision +# ISA-info: xvmaddasp - Form "XX3" Page 437 Category "VSX" +# binutils: vsx.d: 164: f1 12 e2 0f xvmaddasp vs40,vs50,vs60 +:xvmaddasp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=65 & XA & XB & XT { xvmaddaspOp(XA,XB,XT); } + +define pcodeop xvcmpeqspOp; +# ISA-cmt: xvcmpeqsp - VSX Vector Compare Equal To Single-Precision +# ISA-info: xvcmpeqsp - Form "XX3" Page 405 Category "VSX" +# binutils: vsx.d: cc: f1 12 e2 1f xvcmpeqsp vs40,vs50,vs60 +:xvcmpeqsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=67 & BIT_10=0 & XA & XB & XT { xvcmpeqspOp(XA,XB,XT); } + +define pcodeop xvcmpeqspDotOp; +# ISA-cmt: xvcmpeqsp. - VSX Vector Compare Equal To Single-Precision & Record +# ISA-info: xvcmpeqsp. - Form "XX3" Page 405 Category "VSX" +# binutils: mytest.d: 1b8: f0 43 26 18 xvcmpeqsp. vs2,vs3,vs4 +:xvcmpeqsp. XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=67 & BIT_10=1 & XA & XB & XT { xvcmpeqspDotOp(XA,XB,XT); } + +define pcodeop xvsubspOp; +# ISA-cmt: xvsubsp - VSX Vector Subtract Single-Precision +# ISA-info: xvsubsp - Form "XX3" Page 491 Category "VSX" +# binutils: vsx.d: 208: f1 12 e2 47 xvsubsp vs40,vs50,vs60 +:xvsubsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=72 & XA & XB & XT { xvsubspOp(XA,XB,XT); } + +define pcodeop xscvdpuxwsOp; +# ISA-cmt: xscvdpuxws - VSX Scalar truncate Double-Precision to integer and Convert to Unsigned Fixed-Point Word format with Saturate +# ISA-info: xscvdpuxws - Form "XX2" Page 359 Category "VSX" +# binutils: vsx.d: 40: f1 00 e1 23 xscvdpuxws vs40,vs60 +:xscvdpuxws XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=72 & BITS_16_20=0 & XB & XT { xscvdpuxwsOp(XB,XT); } + +define pcodeop xvmaddmspOp; +# ISA-cmt: xvmaddmsp - VSX Vector Multiply-Add Type-M Single-Precision +# ISA-info: xvmaddmsp - Form "XX3" Page 440 Category "VSX" +# binutils: vsx.d: 168: f1 12 e2 4f xvmaddmsp vs40,vs50,vs60 +:xvmaddmsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=73 & XA & XB & XT { xvmaddmspOp(XA,XB,XT); } + +define pcodeop xsrdpiOp; +# ISA-cmt: xsrdpi - VSX Scalar Round to Double-Precision Integer +# ISA-info: xsrdpi - Form "XX2" Page 386 Category "VSX" +# binutils: vsx.d: 88: f1 00 e1 27 xsrdpi vs40,vs60 +:xsrdpi XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=73 & BITS_16_20=0 & XB & XT { xsrdpiOp(XB,XT); } + +define pcodeop xsrsqrtedpOp; +# ISA-cmt: xsrsqrtedp - VSX Scalar Reciprocal Square Root Estimate Double-Precision +# ISA-info: xsrsqrtedp - Form "XX2" Page 391 Category "VSX" +# binutils: vsx.d: a0: f1 00 e1 2b xsrsqrtedp vs40,vs60 +:xsrsqrtedp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=74 & BITS_16_20=0 & XB & XT { xsrsqrtedpOp(XB,XT); } + +define pcodeop xssqrtdpOp; +# ISA-cmt: xssqrtdp - VSX Scalar Square Root Double-Precision +# ISA-info: xssqrtdp - Form "XX2" Page 392 Category "VSX" +# binutils: vsx.d: a4: f1 00 e1 2f xssqrtdp vs40,vs60 +:xssqrtdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=75 & BITS_16_20=0 & XB & XT { xssqrtdpOp(XB,XT); } + +define pcodeop xvcmpgtspOp; +# ISA-cmt: xvcmpgtsp - VSX Vector Compare Greater Than Single-Precision +# ISA-info: xvcmpgtsp - Form "XX3" Page 409 Category "VSX" +# binutils: vsx.d: ec: f1 12 e2 5f xvcmpgtsp vs40,vs50,vs60 +:xvcmpgtsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=75 & BIT_10=0 & XA & XB & XT { xvcmpgtspOp(XA,XB,XT); } + +define pcodeop xvcmpgtspDotOp; +# ISA-cmt: xvcmpgtsp. - VSX Vector Compare Greater Than Single-Precision & Record +# ISA-info: xvcmpgtsp. - Form "XX3" Page 409 Category "VSX" +# binutils: mytest.d: 1bc: f0 43 26 58 xvcmpgtsp. vs2,vs3,vs4 +:xvcmpgtsp. XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=75 & BIT_10=1 & XA & XB & XT { xvcmpgtspDotOp(XA,XB,XT); } + +define pcodeop xvmulspOp; +# ISA-cmt: xvmulsp - VSX Vector Multiply Single-Precision +# ISA-info: xvmulsp - Form "XX3" Page 459 Category "VSX" +# binutils: vsx.d: 190: f1 12 e2 87 xvmulsp vs40,vs50,vs60 +:xvmulsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=80 & XA & XB & XT { xvmulspOp(XA,XB,XT); } + +define pcodeop xvmsubaspOp; +# ISA-cmt: xvmsubasp - VSX Vector Multiply-Subtract Type-A Single-Precision +# ISA-info: xvmsubasp - Form "XX3" Page 451 Category "VSX" +# binutils: vsx.d: 184: f1 12 e2 8f xvmsubasp vs40,vs50,vs60 +:xvmsubasp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=81 & XA & XB & XT { xvmsubaspOp(XA,XB,XT); } + +define pcodeop xvcmpgespOp; +# ISA-cmt: xvcmpgesp - VSX Vector Compare Greater Than or Equal To Single-Precision +# ISA-info: xvcmpgesp - Form "XX3" Page 407 Category "VSX" +# binutils: vsx.d: dc: f1 12 e2 9f xvcmpgesp vs40,vs50,vs60 +:xvcmpgesp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=83 & BIT_10=0 & XA & XB & XT { xvcmpgespOp(XA,XB,XT); } + +define pcodeop xvcmpgespDotOp; +# ISA-cmt: xvcmpgesp. - VSX Vector Compare Greater Than or Equal To Single-Precision & Record +# ISA-info: xvcmpgesp. - Form "XX3" Page 407 Category "VSX" +# binutils: mytest.d: 1c0: f0 43 26 98 xvcmpgesp. vs2,vs3,vs4 +:xvcmpgesp. XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=83 & BIT_10=1 & XA & XB & XT { xvcmpgespDotOp(XA,XB,XT); } + +define pcodeop xvdivspOp; +# ISA-cmt: xvdivsp - VSX Vector Divide Single-Precision +# ISA-info: xvdivsp - Form "XX3" Page 435 Category "VSX" +# binutils: vsx.d: 158: f1 12 e2 c7 xvdivsp vs40,vs50,vs60 +:xvdivsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=88 & XA & XB & XT { xvdivspOp(XA,XB,XT); } + +define pcodeop xscvdpsxwsOp; +# ISA-cmt: xscvdpsxws - VSX Scalar truncate Double-Precision to integer and Convert to Signed Fixed-Point Word format with Saturate +# ISA-info: xscvdpsxws - Form "XX2" Page 355 Category "VSX" +# binutils: vsx.d: 38: f1 00 e1 63 xscvdpsxws vs40,vs60 +:xscvdpsxws XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=88 & BITS_16_20=0 & XB & XT { xscvdpsxwsOp(XB,XT); } + +define pcodeop xvmsubmspOp; +# ISA-cmt: xvmsubmsp - VSX Vector Multiply-Subtract Type-M Single-Precision +# ISA-info: xvmsubmsp - Form "XX3" Page 454 Category "VSX" +# binutils: vsx.d: 188: f1 12 e2 cf xvmsubmsp vs40,vs50,vs60 +:xvmsubmsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=89 & XA & XB & XT { xvmsubmspOp(XA,XB,XT); } + +define pcodeop xsrdpizOp; +# ISA-cmt: xsrdpiz - VSX Scalar Round to Double-Precision Integer toward Zero +# ISA-info: xsrdpiz - Form "XX2" Page 389 Category "VSX" +# binutils: vsx.d: 98: f1 00 e1 67 xsrdpiz vs40,vs60 +:xsrdpiz XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=89 & BITS_16_20=0 & XB & XT { xsrdpizOp(XB,XT); } + +define pcodeop xsredpOp; +# ISA-cmt: xsredp - VSX Scalar Reciprocal Estimate Double-Precision +# ISA-info: xsredp - Form "XX2" Page 390 Category "VSX" +# binutils: vsx.d: 9c: f1 00 e1 6b xsredp vs40,vs60 +:xsredp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=90 & BITS_16_20=0 & XB & XT { xsredpOp(XB,XT); } + +define pcodeop xvtdivspOp; +# ISA-cmt: xvtdivsp - VSX Vector Test for software Divide Single-Precision +# ISA-info: xvtdivsp - Form "XX3" Page 494 Category "VSX" +# binutils: vsx.d: 210: f0 92 e2 ee xvtdivsp cr1,vs50,vs60 +:xvtdivsp CRFD,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=93 & CRFD & BIT_0=0 & BITS_21_22=0 & XA & XB { xvtdivspOp(CRFD,XA,XB); } + +define pcodeop xvadddpOp; +# ISA-cmt: xvadddp - VSX Vector Add Double-Precision +# ISA-info: xvadddp - Form "XX3" Page 398 Category "VSX" +# binutils: vsx.d: bc: f1 12 e3 07 xvadddp vs40,vs50,vs60 +:xvadddp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=96 & XA & XB & XT { xvadddpOp(XA,XB,XT); } + +define pcodeop xvmaddadpOp; +# ISA-cmt: xvmaddadp - VSX Vector Multiply-Add Type-A Double-Precision +# ISA-info: xvmaddadp - Form "XX3" Page 437 Category "VSX" +# binutils: vsx.d: 15c: f1 12 e3 0f xvmaddadp vs40,vs50,vs60 +:xvmaddadp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=97 & XA & XB & XT { xvmaddadpOp(XA,XB,XT); } + +define pcodeop xvcmpeqdpOp; +# ISA-cmt: xvcmpeqdp - VSX Vector Compare Equal To Double-Precision +# ISA-info: xvcmpeqdp - Form "XX3" Page 404 Category "VSX" +# binutils: vsx.d: c4: f1 12 e3 1f xvcmpeqdp vs40,vs50,vs60 +:xvcmpeqdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=99 & BIT_10=0 & XA & XB & XT { xvcmpeqdpOp(XA,XB,XT); } + +define pcodeop xvcmpeqdpDotOp; +# ISA-cmt: xvcmpeqdp. - VSX Vector Compare Equal To Double-Precision & Record +# ISA-info: xvcmpeqdp. - Form "XX3" Page 404 Category "VSX" +# binutils: mytest.d: 1c4: f0 43 27 18 xvcmpeqdp. vs2,vs3,vs4 +:xvcmpeqdp. XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=99 & BIT_10=1 & XA & XB & XT { xvcmpeqdpDotOp(XA,XB,XT); } + +define pcodeop xvsubdpOp; +# ISA-cmt: xvsubdp - VSX Vector Subtract Double-Precision +# ISA-info: xvsubdp - Form "XX3" Page 489 Category "VSX" +# binutils: vsx.d: 204: f1 12 e3 47 xvsubdp vs40,vs50,vs60 +:xvsubdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=104 & XA & XB & XT { xvsubdpOp(XA,XB,XT); } + +define pcodeop xvmaddmdpOp; +# ISA-cmt: xvmaddmdp - VSX Vector Multiply-Add Type-M Double-Precision +# ISA-info: xvmaddmdp - Form "XX3" Page 440 Category "VSX" +# binutils: vsx.d: 160: f1 12 e3 4f xvmaddmdp vs40,vs50,vs60 +:xvmaddmdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=105 & XA & XB & XT { xvmaddmdpOp(XA,XB,XT); } + +define pcodeop xsrdpipOp; +# ISA-cmt: xsrdpip - VSX Scalar Round to Double-Precision Integer toward +Infinity +# ISA-info: xsrdpip - Form "XX2" Page 388 Category "VSX" +# binutils: vsx.d: 94: f1 00 e1 a7 xsrdpip vs40,vs60 +:xsrdpip XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=105 & BITS_16_20=0 & XB & XT { xsrdpipOp(XB,XT); } + +define pcodeop xstsqrtdpOp; +# ISA-cmt: xstsqrtdp - VSX Scalar Test for software Square Root Double-Precision +# ISA-info: xstsqrtdp - Form "XX2" Page 396 Category "VSX" +# binutils: vsx.d: b0: f0 80 e1 aa xstsqrtdp cr1,vs60 +:xstsqrtdp CRFD,XB is $(NOTVLE) & OP=60 & XOP_2_10=106 & CRFD & BIT_0=0 & BITS_21_22=0 & BITS_16_20=0 & XB { xstsqrtdpOp(CRFD,XB); } + +define pcodeop xsrdpicOp; +# ISA-cmt: xsrdpic - VSX Scalar Round to Double-Precision Integer using Current rounding mode +# ISA-info: xsrdpic - Form "XX2" Page 387 Category "VSX" +# binutils: vsx.d: 8c: f1 00 e1 af xsrdpic vs40,vs60 +:xsrdpic XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=107 & BITS_16_20=0 & XB & XT { xsrdpicOp(XB,XT); } + +define pcodeop xvcmpgtdpOp; +# ISA-cmt: xvcmpgtdp - VSX Vector Compare Greater Than Double-Precision +# ISA-info: xvcmpgtdp - Form "XX3" Page 408 Category "VSX" +# binutils: vsx.d: e4: f1 12 e3 5f xvcmpgtdp vs40,vs50,vs60 +:xvcmpgtdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=107 & BIT_10=0 & XA & XB & XT { xvcmpgtdpOp(XA,XB,XT); } + +define pcodeop xvcmpgtdpDotOp; +# ISA-cmt: xvcmpgtdp. - VSX Vector Compare Greater Than Double-Precision & Record +# ISA-info: xvcmpgtdp. - Form "XX3" Page 408 Category "VSX" +# binutils: mytest.d: 1c8: f0 43 27 58 xvcmpgtdp. vs2,vs3,vs4 +:xvcmpgtdp. XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=107 & BIT_10=1 & XA & XB & XT { xvcmpgtdpDotOp(XA,XB,XT); } + +define pcodeop xvmuldpOp; +# ISA-cmt: xvmuldp - VSX Vector Multiply Double-Precision +# ISA-info: xvmuldp - Form "XX3" Page 457 Category "VSX" +# binutils: vsx.d: 18c: f1 12 e3 87 xvmuldp vs40,vs50,vs60 +:xvmuldp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=112 & XA & XB & XT { xvmuldpOp(XA,XB,XT); } + +define pcodeop xvmsubadpOp; +# ISA-cmt: xvmsubadp - VSX Vector Multiply-Subtract Type-A Double-Precision +# ISA-info: xvmsubadp - Form "XX3" Page 451 Category "VSX" +# binutils: vsx.d: 17c: f1 12 e3 8f xvmsubadp vs40,vs50,vs60 +:xvmsubadp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=113 & XA & XB & XT { xvmsubadpOp(XA,XB,XT); } + +define pcodeop xvcmpgedpOp; +# ISA-cmt: xvcmpgedp - VSX Vector Compare Greater Than or Equal To Double-Precision +# ISA-info: xvcmpgedp - Form "XX3" Page 406 Category "VSX" +# binutils: vsx.d: d4: f1 12 e3 9f xvcmpgedp vs40,vs50,vs60 +:xvcmpgedp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=115 & BIT_10=0 & XA & XB & XT { xvcmpgedpOp(XA,XB,XT); } + +define pcodeop xvcmpgedpDotOp; +# ISA-cmt: xvcmpgedp. - VSX Vector Compare Greater Than or Equal To Double-Precision & Record +# ISA-info: xvcmpgedp. - Form "XX3" Page 406 Category "VSX" +# binutils: mytest.d: 1cc: f0 43 27 98 xvcmpgedp. vs2,vs3,vs4 +:xvcmpgedp. XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=115 & BIT_10=1 & XA & XB & XT { xvcmpgedpDotOp(XA,XB,XT); } + +define pcodeop xvdivdpOp; +# ISA-cmt: xvdivdp - VSX Vector Divide Double-Precision +# ISA-info: xvdivdp - Form "XX3" Page 433 Category "VSX" +# binutils: vsx.d: 154: f1 12 e3 c7 xvdivdp vs40,vs50,vs60 +:xvdivdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=120 & XA & XB & XT { xvdivdpOp(XA,XB,XT); } + +define pcodeop xvmsubmdpOp; +# ISA-cmt: xvmsubmdp - VSX Vector Multiply-Subtract Type-M Double-Precision +# ISA-info: xvmsubmdp - Form "XX3" Page 454 Category "VSX" +# binutils: vsx.d: 180: f1 12 e3 cf xvmsubmdp vs40,vs50,vs60 +:xvmsubmdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=121 & XA & XB & XT { xvmsubmdpOp(XA,XB,XT); } + +define pcodeop xsrdpimOp; +# ISA-cmt: xsrdpim - VSX Scalar Round to Double-Precision Integer toward -Infinity +# ISA-info: xsrdpim - Form "XX2" Page 388 Category "VSX" +# binutils: vsx.d: 90: f1 00 e1 e7 xsrdpim vs40,vs60 +:xsrdpim XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=121 & BITS_16_20=0 & XB & XT { xsrdpimOp(XB,XT); } + +define pcodeop xvtdivdpOp; +# ISA-cmt: xvtdivdp - VSX Vector Test for software Divide Double-Precision +# ISA-info: xvtdivdp - Form "XX3" Page 493 Category "VSX" +# binutils: vsx.d: 20c: f0 92 e3 ee xvtdivdp cr1,vs50,vs60 +:xvtdivdp CRFD,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=125 & CRFD & BIT_0=0 & BITS_21_22=0 & XA & XB { xvtdivdpOp(CRFD,XA,XB); } + +# ISA-cmt: xxland - VSX Logical AND +# ISA-info: xxland - Form "XX3" Page 496 Category "VSX" +# binutils: vsx.d: 21c: f1 12 e4 17 xxland vs40,vs50,vs60 +:xxland XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=130 & XA & XB & XT { + XT = XA & XB; +} + +define pcodeop xvcvspuxwsOp; +# ISA-cmt: xvcvspuxws - VSX Vector truncate Single-Precision to integer and Convert to Unsigned Fixed-Point Word Saturate +# ISA-info: xvcvspuxws - Form "XX2" Page 427 Category "VSX" +# binutils: vsx.d: 130: f1 00 e2 23 xvcvspuxws vs40,vs60 +:xvcvspuxws XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=136 & BI_BITS=0 & XB & XT { xvcvspuxwsOp(XB,XT); } + +define pcodeop xvrspiOp; +# ISA-cmt: xvrspi - VSX Vector Round to Single-Precision Integer +# ISA-info: xvrspi - Form "XX2" Page 482 Category "VSX" +# binutils: vsx.d: 1e0: f1 00 e2 27 xvrspi vs40,vs60 +:xvrspi XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=137 & BI_BITS=0 & XB & XT { xvrspiOp(XB,XT); } + +# ISA-cmt: xxlandc - VSX Logical AND with Complement +# ISA-info: xxlandc - Form "XX3" Page 496 Category "VSX" +# binutils: vsx.d: 220: f1 12 e4 57 xxlandc vs40,vs50,vs60 +:xxlandc XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=138 & XA & XB & XT { + XT = XA & (~XB); +} + +define pcodeop xvrsqrtespOp; +# ISA-cmt: xvrsqrtesp - VSX Vector Reciprocal Square Root Estimate Single-Precision +# ISA-info: xvrsqrtesp - Form "XX2" Page 486 Category "VSX" +# binutils: vsx.d: 1f8: f1 00 e2 2b xvrsqrtesp vs40,vs60 +:xvrsqrtesp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=138 & BI_BITS=0 & XB & XT { xvrsqrtespOp(XB,XT); } + +define pcodeop xvsqrtspOp; +# ISA-cmt: xvsqrtsp - VSX Vector Square Root Single-Precision +# ISA-info: xvsqrtsp - Form "XX2" Page 488 Category "VSX" +# binutils: vsx.d: 200: f1 00 e2 2f xvsqrtsp vs40,vs60 +:xvsqrtsp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=139 & BI_BITS=0 & XB & XT { xvsqrtspOp(XB,XT); } + +# ISA-cmt: xxlor - VSX Logical OR +# ISA-info: xxlor - Form "XX3" Page 497 Category "VSX" +# binutils: vsx.d: 228: f1 12 e4 97 xxlor vs40,vs50,vs60 +:xxlor XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=146 & XA & XB & XT { + XT = XA | XB; +} + +define pcodeop xvcvspsxwsOp; +# ISA-cmt: xvcvspsxws - VSX Vector truncate Single-Precision to integer and Convert to Signed Fixed-Point Word format with Saturate +# ISA-info: xvcvspsxws - Form "XX2" Page 423 Category "VSX" +# binutils: vsx.d: 128: f1 00 e2 63 xvcvspsxws vs40,vs60 +:xvcvspsxws XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=152 & BI_BITS=0 & XB & XT { xvcvspsxwsOp(XB,XT); } + +define pcodeop xvrspizOp; +# ISA-cmt: xvrspiz - VSX Vector Round to Single-Precision Integer toward Zero +# ISA-info: xvrspiz - Form "XX2" Page 484 Category "VSX" +# binutils: vsx.d: 1f0: f1 00 e2 67 xvrspiz vs40,vs60 +:xvrspiz XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=153 & BI_BITS=0 & XB & XT { xvrspizOp(XB,XT); } + +# ISA-cmt: xxlxor - VSX Logical XOR +# ISA-info: xxlxor - Form "XX3" Page 498 Category "VSX" +# binutils: vsx.d: 22c: f1 12 e4 d7 xxlxor vs40,vs50,vs60 +:xxlxor XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=154 & XA & XB & XT { + XT = XA ^ XB; +} + +define pcodeop xvrespOp; +# ISA-cmt: xvresp - VSX Vector Reciprocal Estimate Single-Precision +# ISA-info: xvresp - Form "XX2" Page 481 Category "VSX" +# binutils: vsx.d: 1dc: f1 00 e2 6b xvresp vs40,vs60 +:xvresp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=154 & BI_BITS=0 & XB & XT { xvrespOp(XB,XT); } + +define pcodeop xsmaxdpOp; +# ISA-cmt: xsmaxdp - VSX Scalar Maximum Double-Precision +# ISA-info: xsmaxdp - Form "XX3" Page 368 Category "VSX" +# binutils: vsx.d: 5c: f1 12 e5 07 xsmaxdp vs40,vs50,vs60 +:xsmaxdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=160 & XA & XB & XT { xsmaxdpOp(XA,XB,XT); } + +define pcodeop xsnmaddadpOp; +# ISA-cmt: xsnmaddadp - VSX Scalar Negative Multiply-Add Type-A Double-Precision +# ISA-info: xsnmaddadp - Form "XX3" Page 378 Category "VSX" +# binutils: vsx.d: 78: f1 12 e5 0f xsnmaddadp vs40,vs50,vs60 +:xsnmaddadp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=161 & XA & XB & XT { xsnmaddadpOp(XA,XB); } + +define pcodeop xxlnorOp; +# ISA-cmt: xxlnor - VSX Logical NOR +# ISA-info: xxlnor - Form "XX3" Page 497 Category "VSX" +# binutils: vsx.d: 224: f1 12 e5 17 xxlnor vs40,vs50,vs60 +:xxlnor XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=162 & XA & XB & XT { + XT = ~(XA | XB); +} + +define pcodeop xxspltwOp; +# ISA-cmt: xxspltw - VSX Splat Word +# ISA-info: xxspltw - Form "XX2" Page 501 Category "VSX" +# binutils: vsx.d: 274: f1 02 e2 93 xxspltw vs40,vs60,2 +:xxspltw XT,XB,UIM is $(NOTVLE) & OP=60 & XOP_2_10=164 & BITS_18_20=0 & UIM & XB & XT { xxspltwOp(XB,XT); } + +define pcodeop xsmindpOp; +# ISA-cmt: xsmindp - VSX Scalar Minimum Double-Precision +# ISA-info: xsmindp - Form "XX3" Page 370 Category "VSX" +# binutils: vsx.d: 60: f1 12 e5 47 xsmindp vs40,vs50,vs60 +:xsmindp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=168 & XA & XB & XT { xsmindpOp(XA,XB,XT); } + +define pcodeop xvcvuxwspOp; +# ISA-cmt: xvcvuxwsp - VSX Vector Convert and round Unsigned Fixed-Point Word to Single-Precision format +# ISA-info: xvcvuxwsp - Form "XX2" Page 432 Category "VSX" +# binutils: vsx.d: 150: f1 00 e2 a3 xvcvuxwsp vs40,vs60 +:xvcvuxwsp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=168 & BI_BITS=0 & XB & XT { xvcvuxwspOp(XB,XT); } + +define pcodeop xsnmaddmdpOp; +# ISA-cmt: xsnmaddmdp - VSX Scalar Negative Multiply-Add Type-M Double-Precision +# ISA-info: xsnmaddmdp - Form "XX3" Page 378 Category "VSX" +# binutils: vsx.d: 7c: f1 12 e5 4f xsnmaddmdp vs40,vs50,vs60 +:xsnmaddmdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=169 & XA & XB & XT { xsnmaddmdpOp(XA,XB,XT); } + +define pcodeop xvrspipOp; +# ISA-cmt: xvrspip - VSX Vector Round to Single-Precision Integer toward +Infinity +# ISA-info: xvrspip - Form "XX2" Page 483 Category "VSX" +# binutils: vsx.d: 1ec: f1 00 e2 a7 xvrspip vs40,vs60 +:xvrspip XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=169 & BI_BITS=0 & XB & XT { xvrspipOp(XB,XT); } + +define pcodeop xvtsqrtspOp; +# ISA-cmt: xvtsqrtsp - VSX Vector Test for software Square Root Single-Precision +# ISA-info: xvtsqrtsp - Form "XX2" Page 495 Category "VSX" +# binutils: vsx.d: 218: f0 80 e2 aa xvtsqrtsp cr1,vs60 +:xvtsqrtsp CRFD,XB is $(NOTVLE) & OP=60 & XOP_2_10=170 & CRFD & BITS_21_22=0 & BITS_16_20=0 & BIT_0=0 & XB { xvtsqrtspOp(CRFD,XB); } + +define pcodeop xvrspicOp; +# ISA-cmt: xvrspic - VSX Vector Round to Single-Precision Integer using Current rounding mode +# ISA-info: xvrspic - Form "XX2" Page 482 Category "VSX" +# binutils: vsx.d: 1e4: f1 00 e2 af xvrspic vs40,vs60 +:xvrspic XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=171 & BITS_16_20=0 & XB & XT { xvrspicOp(XB,XT); } + +define pcodeop xscpsgndpOp; +# ISA-cmt: xscpsgndp - VSX Scalar Copy Sign Double-Precision +# ISA-info: xscpsgndp - Form "XX3" Page 351 Category "VSX" +# binutils: vsx.d: 2c: f1 12 e5 87 xscpsgndp vs40,vs50,vs60 +:xscpsgndp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=176 & XA & XB & XT { xscpsgndpOp(XA,XB,XT); } + +define pcodeop xsnmsubadpOp; +# ISA-cmt: xsnmsubadp - VSX Scalar Negative Multiply-Subtract Type-A Double-Precision +# ISA-info: xsnmsubadp - Form "XX3" Page 383 Category "VSX" +# binutils: vsx.d: 80: f1 12 e5 8f xsnmsubadp vs40,vs50,vs60 +:xsnmsubadp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=177 & XA & XB & XT { xsnmsubadpOp(XA,XB,XT); } + +define pcodeop xvcvsxwspOp; +# ISA-cmt: xvcvsxwsp - VSX Vector Convert and round Signed Fixed-Point Word to Single-Precision format +# ISA-info: xvcvsxwsp - Form "XX2" Page 430 Category "VSX" +# binutils: vsx.d: 140: f1 00 e2 e3 xvcvsxwsp vs40,vs60 +:xvcvsxwsp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=184 & BITS_16_20=0 & XB & XT { xvcvsxwspOp(XB,XT); } + +define pcodeop xsnmsubmdpOp; +# ISA-cmt: xsnmsubmdp - VSX Scalar Negative Multiply-Subtract Type-M Double-Precision +# ISA-info: xsnmsubmdp - Form "XX3" Page 383 Category "VSX" +# binutils: vsx.d: 84: f1 12 e5 cf xsnmsubmdp vs40,vs50,vs60 +:xsnmsubmdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=185 & XA & XB & XT { xsnmsubmdpOp(XA,XB,XT); } + +define pcodeop xvrspimOp; +# ISA-cmt: xvrspim - VSX Vector Round to Single-Precision Integer toward -Infinity +# ISA-info: xvrspim - Form "XX2" Page 483 Category "VSX" +# binutils: vsx.d: 1e8: f1 00 e2 e7 xvrspim vs40,vs60 +:xvrspim XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=185 & BITS_16_20=0 & XB & XT { xvrspimOp(XB,XT); } + +define pcodeop xvmaxspOp; +# ISA-cmt: xvmaxsp - VSX Vector Maximum Single-Precision +# ISA-info: xvmaxsp - Form "XX3" Page 445 Category "VSX" +# binutils: vsx.d: 170: f1 12 e6 07 xvmaxsp vs40,vs50,vs60 +:xvmaxsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=192 & XA & XB & XT { xvmaxspOp(XA,XB,XT); } + +define pcodeop xvnmaddaspOp; +# ISA-cmt: xvnmaddasp - VSX Vector Negative Multiply-Add Type-A Single-Precision +# ISA-info: xvnmaddasp - Form "XX3" Page 463 Category "VSX" +# binutils: vsx.d: 1ac: f1 12 e6 0f xvnmaddasp vs40,vs50,vs60 +:xvnmaddasp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=193 & XA & XB & XT { xvnmaddaspOp(XA,XB,XT); } + +define pcodeop xvminspOp; +# ISA-cmt: xvminsp - VSX Vector Minimum Single-Precision +# ISA-info: xvminsp - Form "XX3" Page 449 Category "VSX" +# binutils: vsx.d: 178: f1 12 e6 47 xvminsp vs40,vs50,vs60 +:xvminsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=200 & XA & XB & XT { xvminspOp(XA,XB,XT); } + +define pcodeop xvcvdpuxwsOp; +# ISA-cmt: xvcvdpuxws - VSX Vector truncate Double-Precision to integer and Convert to Unsigned Fixed-Point Word format with Saturate +# ISA-info: xvcvdpuxws - Form "XX2" Page 418 Category "VSX" +# binutils: vsx.d: 11c: f1 00 e3 23 xvcvdpuxws vs40,vs60 +:xvcvdpuxws XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=200 & BITS_16_20=0 & XB & XT { xvcvdpuxwsOp(XB,XT); } + +define pcodeop xvnmaddmspOp; +# ISA-cmt: xvnmaddmsp - VSX Vector Negative Multiply-Add Type-M Single-Precision +# ISA-info: xvnmaddmsp - Form "XX3" Page 468 Category "VSX" +# binutils: vsx.d: 1b0: f1 12 e6 4f xvnmaddmsp vs40,vs50,vs60 +:xvnmaddmsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=201 & XA & XB & XT { xvnmaddmspOp(XA,XB,XT); } + +define pcodeop xvrdpiOp; +# ISA-cmt: xvrdpi - VSX Vector Round to Double-Precision Integer +# ISA-info: xvrdpi - Form "XX2" Page 477 Category "VSX" +# binutils: vsx.d: 1c4: f1 00 e3 27 xvrdpi vs40,vs60 +:xvrdpi XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=201 & BITS_16_20=0 & XB & XT { xvrdpiOp(XB,XT); } + +define pcodeop xvrsqrtedpOp; +# ISA-cmt: xvrsqrtedp - VSX Vector Reciprocal Square Root Estimate Double-Precision +# ISA-info: xvrsqrtedp - Form "XX2" Page 485 Category "VSX" +# binutils: vsx.d: 1f4: f1 00 e3 2b xvrsqrtedp vs40,vs60 +:xvrsqrtedp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=202 & BITS_16_20=0 & XB & XT { xvrsqrtedpOp(XB,XT); } + +define pcodeop xvsqrtdpOp; +# ISA-cmt: xvsqrtdp - VSX Vector Square Root Double-Precision +# ISA-info: xvsqrtdp - Form "XX2" Page 487 Category "VSX" +# binutils: vsx.d: 1fc: f1 00 e3 2f xvsqrtdp vs40,vs60 +:xvsqrtdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=203 & BITS_16_20=0 & XB & XT { xvsqrtdpOp(XB,XT); } + +define pcodeop xvcpsgnspOp; +# ISA-cmt: xvcpsgnsp - VSX Vector Copy Sign Single-Precision +# ISA-info: xvcpsgnsp - Form "XX3" Page 410 Category "VSX" +# binutils: vsx.d: 100: f1 12 e6 87 xvcpsgnsp vs40,vs50,vs60 +:xvcpsgnsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=208 & XA & XB & XT { xvcpsgnspOp(XA,XB,XT); } + +define pcodeop xvnmsubaspOp; +# ISA-cmt: xvnmsubasp - VSX Vector Negative Multiply-Subtract Type-A Single-Precision +# ISA-info: xvnmsubasp - Form "XX3" Page 471 Category "VSX" +# binutils: vsx.d: 1bc: f1 12 e6 8f xvnmsubasp vs40,vs50,vs60 +:xvnmsubasp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=209 & XA & XB & XT { xvnmsubaspOp(XA,XB,XT); } + +define pcodeop xvcvdpsxwsOp; +# ISA-cmt: xvcvdpsxws - VSX Vector truncate Double-Precision to integer and Convert to Signed Fixed-Point Word Saturate +# ISA-info: xvcvdpsxws - Form "XX2" Page 414 Category "VSX" +# binutils: vsx.d: 114: f1 00 e3 63 xvcvdpsxws vs40,vs60 +:xvcvdpsxws XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=216 & BITS_16_20=0 & XB & XT { xvcvdpsxwsOp(XB,XT); } + +define pcodeop xvnmsubmspOp; +# ISA-cmt: xvnmsubmsp - VSX Vector Negative Multiply-Subtract Type-M Single-Precision +# ISA-info: xvnmsubmsp - Form "XX3" Page 474 Category "VSX" +# binutils: vsx.d: 1c0: f1 12 e6 cf xvnmsubmsp vs40,vs50,vs60 +:xvnmsubmsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=217 & XA & XB & XT { xvnmsubmspOp(XA,XB,XT); } + +define pcodeop xvrdpizOp; +# ISA-cmt: xvrdpiz - VSX Vector Round to Double-Precision Integer toward Zero +# ISA-info: xvrdpiz - Form "XX2" Page 479 Category "VSX" +# binutils: vsx.d: 1d4: f1 00 e3 67 xvrdpiz vs40,vs60 +:xvrdpiz XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=217 & BITS_16_20=0 & XB & XT { xvrdpizOp(XB,XT); } + +define pcodeop xvredpOp; +# ISA-cmt: xvredp - VSX Vector Reciprocal Estimate Double-Precision +# ISA-info: xvredp - Form "XX2" Page 480 Category "VSX" +# binutils: vsx.d: 1d8: f1 00 e3 6b xvredp vs40,vs60 +:xvredp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=218 & BITS_16_20=0 & XB & XT { xvredpOp(XB,XT); } + +define pcodeop xvmaxdpOp; +# ISA-cmt: xvmaxdp - VSX Vector Maximum Double-Precision +# ISA-info: xvmaxdp - Form "XX3" Page 443 Category "VSX" +# binutils: vsx.d: 16c: f1 12 e7 07 xvmaxdp vs40,vs50,vs60 +:xvmaxdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=224 & XA & XB & XT { xvmaxdpOp(XA,XB,XT); } + +define pcodeop xvnmaddadpOp; +# ISA-cmt: xvnmaddadp - VSX Vector Negative Multiply-Add Type-A Double-Precision +# ISA-info: xvnmaddadp - Form "XX3" Page 463 Category "VSX" +# binutils: vsx.d: 1a4: f1 12 e7 0f xvnmaddadp vs40,vs50,vs60 +:xvnmaddadp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=225 & XA & XB & XT { xvnmaddadpOp(XA,XB,XT); } + +define pcodeop xvmindpOp; +# ISA-cmt: xvmindp - VSX Vector Minimum Double-Precision +# ISA-info: xvmindp - Form "XX3" Page 447 Category "VSX" +# binutils: vsx.d: 174: f1 12 e7 47 xvmindp vs40,vs50,vs60 +:xvmindp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=232 & XA & XB & XT { xvmindpOp(XA,XB,XT); } + +define pcodeop xvnmaddmdpOp; +# ISA-cmt: xvnmaddmdp - VSX Vector Negative Multiply-Add Type-M Double-Precision +# ISA-info: xvnmaddmdp - Form "XX3" Page 468 Category "VSX" +# binutils: vsx.d: 1a8: f1 12 e7 4f xvnmaddmdp vs40,vs50,vs60 +:xvnmaddmdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=233 & XA & XB & XT { xvnmaddmdpOp(XA,XB,XT); } + +define pcodeop xvcvuxwdpOp; +# ISA-cmt: xvcvuxwdp - VSX Vector Convert Unsigned Fixed-Point Word to Double-Precision format +# ISA-info: xvcvuxwdp - Form "XX2" Page 432 Category "VSX" +# binutils: vsx.d: 14c: f1 00 e3 a3 xvcvuxwdp vs40,vs60 +:xvcvuxwdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=232 & BITS_16_20=0 & XB & XT { xvcvuxwdpOp(XB,XT); } + +define pcodeop xvrdpipOp; +# ISA-cmt: xvrdpip - VSX Vector Round to Double-Precision Integer toward +Infinity +# ISA-info: xvrdpip - Form "XX2" Page 479 Category "VSX" +# binutils: vsx.d: 1d0: f1 00 e3 a7 xvrdpip vs40,vs60 +:xvrdpip XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=233 & BITS_16_20=0 & XB & XT { xvrdpipOp(XB,XT); } + +define pcodeop xvtsqrtdpOp; +# ISA-cmt: xvtsqrtdp - VSX Vector Test for software Square Root Double-Precision +# ISA-info: xvtsqrtdp - Form "XX2" Page 495 Category "VSX" +# binutils: vsx.d: 214: f0 80 e3 aa xvtsqrtdp cr1,vs60 +:xvtsqrtdp CRFD,XB is $(NOTVLE) & OP=60 & XOP_2_10=234 & CRFD & BITS_16_20=0 & BIT_0=0 & BITS_21_22=0 & XB { xvtsqrtdpOp(CRFD,XB); } + +define pcodeop xvrdpicOp; +# ISA-cmt: xvrdpic - VSX Vector Round to Double-Precision Integer using Current rounding mode +# ISA-info: xvrdpic - Form "XX2" Page 478 Category "VSX" +# binutils: vsx.d: 1c8: f1 00 e3 af xvrdpic vs40,vs60 +:xvrdpic XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=235 & BITS_16_20=0 & XB & XT { xvrdpicOp(XB,XT); } + +define pcodeop xvcpsgndpOp; +# ISA-cmt: xvcpsgndp - VSX Vector Copy Sign Double-Precision +# ISA-info: xvcpsgndp - Form "XX3" Page 410 Category "VSX" +# binutils: power7.d: 50: f0 64 2f 80 xvcpsgndp vs3,vs4,vs5 +# binutils: power7.d: 54: f1 6c 6f 87 xvcpsgndp vs43,vs44,vs45 +# binutils: vsx.d: f4: f1 12 e7 87 xvcpsgndp vs40,vs50,vs60 +:xvcpsgndp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=240 & XA & XB & XT { xvcpsgndpOp(XA,XB,XT); } + +define pcodeop xvnmsubadpOp; +# ISA-cmt: xvnmsubadp - VSX Vector Negative Multiply-Subtract Type-A Double-Precision +# ISA-info: xvnmsubadp - Form "XX3" Page 471 Category "VSX" +# binutils: vsx.d: 1b4: f1 12 e7 8f xvnmsubadp vs40,vs50,vs60 +:xvnmsubadp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=241 & XA & XB & XT { xvnmsubadpOp(XA,XB,XT); } + +define pcodeop xvcvsxwdpOp; +# ISA-cmt: xvcvsxwdp - VSX Vector Convert Signed Fixed-Point Word to Double-Precision format +# ISA-info: xvcvsxwdp - Form "XX2" Page 430 Category "VSX" +# binutils: vsx.d: 13c: f1 00 e3 e3 xvcvsxwdp vs40,vs60 +:xvcvsxwdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=248 & BI_BITS=0 & XB & XT { xvcvsxwdpOp(XB,XT); } + +define pcodeop xvnmsubmdpOp; +# ISA-cmt: xvnmsubmdp - VSX Vector Negative Multiply-Subtract Type-M Double-Precision +# ISA-info: xvnmsubmdp - Form "XX3" Page 474 Category "VSX" +# binutils: vsx.d: 1b8: f1 12 e7 cf xvnmsubmdp vs40,vs50,vs60 +:xvnmsubmdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=249 & XA & XB & XT { xvnmsubmdpOp(XA,XB,XT); } + +define pcodeop xvrdpimOp; +# ISA-cmt: xvrdpim - VSX Vector Round to Double-Precision Integer toward -Infinity +# ISA-info: xvrdpim - Form "XX2" Page 478 Category "VSX" +# binutils: vsx.d: 1cc: f1 00 e3 e7 xvrdpim vs40,vs60 +:xvrdpim XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=249 & BITS_16_20=0 & XB & XT { xvrdpimOp(XB,XT); } + +define pcodeop xscvdpspOp; +# ISA-cmt: xscvdpsp - VSX Scalar Convert Double-Precision to Single-Precision +# ISA-info: xscvdpsp - Form "XX2" Page 352 Category "VSX" +# binutils: vsx.d: 30: f1 00 e4 27 xscvdpsp vs40,vs60 +:xscvdpsp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=265 & BITS_16_20=0 & XB & XT { xscvdpspOp(XB,XT); } + +define pcodeop xscvdpuxdsOp; +# ISA-cmt: xscvdpuxds - VSX Scalar truncate Double-Precision to integer and Convert to Unsigned Fixed-Point Doubleword format with Saturate +# ISA-info: xscvdpuxds - Form "XX2" Page 357 Category "VSX" +# binutils: vsx.d: 3c: f1 00 e5 23 xscvdpuxds vs40,vs60 +:xscvdpuxds XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=328 & BITS_16_20=0 & XB & XT { xscvdpuxdsOp(XB,XT); } + +define pcodeop xscvspdpOp; +# ISA-cmt: xscvspdp - VSX Scalar Convert Single-Precision to Double-Precision format +# binutils: vsx.d: 44: f1 00 e5 27 xscvspdp vs40,vs60 +:xscvspdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=329 & BITS_16_20=0 & XB & XT { xscvspdpOp(XB,XT); } + +define pcodeop xscvdpsxdsOp; +# ISA-cmt: xscvdpsxds - VSX Scalar truncate Double-Precision to integer and Convert to Signed Fixed-Point Doubleword format with Saturate +# ISA-info: xscvdpsxds - Form "XX2" Page 353 Category "VSX" +# binutils: vsx.d: 34: f1 00 e5 63 xscvdpsxds vs40,vs60 +:xscvdpsxds XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=344 & BITS_16_20=0 & XB & XT { xscvdpsxdsOp(XB,XT); } + +define pcodeop xsabsdpOp; +# ISA-cmt: xsabsdp - VSX Scalar Absolute Value Double-Precision +# ISA-info: xsabsdp - Form "XX2" Page 341 Category "VSX" +# binutils: vsx.d: 1c: f1 00 e5 67 xsabsdp vs40,vs60 +:xsabsdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=345 & XT & BITS_16_20=0 & XB { xsabsdpOp(XB,XT); } + +define pcodeop xscvuxddpOp; +# ISA-cmt: xscvuxddp - VSX Scalar Convert and round Unsigned Fixed-Point Doubleword to Double-Precision format +# binutils: vsx.d: 4c: f1 00 e5 a3 xscvuxddp vs40,vs60 +:xscvuxddp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=360 & BITS_16_20=0 & XB & XT { xscvuxddpOp(XB,XT); } + +define pcodeop xsnabsdpOp; +# ISA-cmt: xsnabsdp - VSX Scalar Negative Absolute Value Double-Precision +# ISA-info: xsnabsdp - Form "XX2" Page 377 Category "VSX" +# binutils: vsx.d: 70: f1 00 e5 a7 xsnabsdp vs40,vs60 +:xsnabsdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=361 & BITS_16_20=0 & XB & XT { xsnabsdpOp(XB,XT); } + +define pcodeop xscvsxddpOp; +# ISA-cmt: xscvsxddp - VSX Scalar Convert and round Signed Fixed-Point Doubleword to Double-Precision format +# ISA-info: xscvsxddp - Form "XX2" Page 361 Category "VSX" +# binutils: vsx.d: 48: f1 00 e5 e3 xscvsxddp vs40,vs60 +:xscvsxddp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=376 & BITS_16_20=0 & XB & XT { xscvsxddpOp(XB,XT); } + +define pcodeop xsnegdpOp; +# ISA-cmt: xsnegdp - VSX Scalar Negate Double-Precision +# ISA-info: xsnegdp - Form "XX2" Page 377 Category "VSX" +# binutils: vsx.d: 74: f1 00 e5 e7 xsnegdp vs40,vs60 +:xsnegdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=377 & BITS_16_20=0 & XB & XT { xsnegdpOp(XB,XT); } + +define pcodeop xvcvspuxdsOp; +# ISA-cmt: xvcvspuxds - VSX Vector truncate Single-Precision to integer and Convert to Unsigned Fixed-Point Doubleword format with Saturate +# ISA-info: xvcvspuxds - Form "XX2" Page 425 Category "VSX" +# binutils: vsx.d: 12c: f1 00 e6 23 xvcvspuxds vs40,vs60 +:xvcvspuxds XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=392 & BITS_16_20=0 & XB & XT { xvcvspuxdsOp(XB,XT); } + +define pcodeop xvcvdpspOp; +# ISA-cmt: xvcvdpsp - VSX Vector round and Convert Double-Precision to Single-Precision format +# ISA-info: xvcvdpsp - Form "XX2" Page 411 Category "VSX" +# binutils: vsx.d: 10c: f1 00 e6 27 xvcvdpsp vs40,vs60 +:xvcvdpsp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=393 & BITS_16_20=0 & XB & XT { xvcvdpspOp(XB,XT); } + +define pcodeop xvcvspsxdsOp; +# ISA-cmt: xvcvspsxds - VSX Vector truncate Single-Precision to integer and Convert to Signed Fixed-Point Doubleword format with Saturate +# ISA-info: xvcvspsxds - Form "XX2" Page 421 Category "VSX" +# binutils: vsx.d: 124: f1 00 e6 63 xvcvspsxds vs40,vs60 +:xvcvspsxds XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=408 & BITS_16_20=0 & XB & XT { xvcvspsxdsOp(XB,XT); } + +define pcodeop xvabsspOp; +# ISA-cmt: xvabssp - VSX Vector Absolute Value Single-Precision +# ISA-info: xvabssp - Form "XX2" Page 397 Category "VSX" +# binutils: vsx.d: b8: f1 00 e6 67 xvabssp vs40,vs60 +:xvabssp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=409 & BITS_16_20=0 & XB & XT { xvabsspOp(XB,XT); } + +define pcodeop xvcvuxdspOp; +# ISA-cmt: xvcvuxdsp - VSX Vector Convert and round Unsigned Fixed-Point Doubleword to Single-Precision format +# ISA-info: xvcvuxdsp - Form "XX2" Page 431 Category "VSX" +# binutils: vsx.d: 148: f1 00 e6 a3 xvcvuxdsp vs40,vs60 +:xvcvuxdsp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=424 & BITS_16_20=0 & XB & XT { xvcvuxdspOp(XB,XT); } + +define pcodeop xvnabsspOp; +# ISA-cmt: xvnabssp - VSX Vector Negative Absolute Value Single-Precision +# ISA-info: xvnabssp - Form "XX2" Page 461 Category "VSX" +# binutils: vsx.d: 198: f1 00 e6 a7 xvnabssp vs40,vs60 +:xvnabssp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=425 & BITS_16_20=0 & XB & XT { xvnabsspOp(XB,XT); } + +define pcodeop xvcvsxdspOp; +# ISA-cmt: xvcvsxdsp - VSX Vector Convert and round Signed Fixed-Point Doubleword to Single-Precision format +# ISA-info: xvcvsxdsp - Form "XX2" Page 429 Category "VSX" +# binutils: vsx.d: 138: f1 00 e6 e3 xvcvsxdsp vs40,vs60 +:xvcvsxdsp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=440 & BITS_16_20=0 & XB & XT { xvcvsxdspOp(XB,XT); } + +define pcodeop xvnegspOp; +# ISA-cmt: xvnegsp - VSX Vector Negate Single-Precision +# ISA-info: xvnegsp - Form "XX2" Page 462 Category "VSX" +# binutils: vsx.d: 1a0: f1 00 e6 e7 xvnegsp vs40,vs60 +:xvnegsp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=441 & BITS_16_20=0 & XB & XT { xvnegspOp(XB,XT); } + +define pcodeop xvcvdpuxdsOp; +# ISA-cmt: xvcvdpuxds - VSX Vector truncate Double-Precision to integer and Convert to Unsigned Fixed-Point Doubleword format with Saturate +# ISA-info: xvcvdpuxds - Form "XX2" Page 416 Category "VSX" +# binutils: vsx.d: 118: f1 00 e7 23 xvcvdpuxds vs40,vs60 +:xvcvdpuxds XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=456 & BITS_16_20=0 & XB & XT { xvcvdpuxdsOp(XB,XT); } + +define pcodeop xvcvspdpOp; +# ISA-cmt: xvcvspdp - VSX Vector Convert Single-Precision to Double-Precision +# ISA-info: xvcvspdp - Form "XX2" Page 420 Category "VSX" +# binutils: vsx.d: 120: f1 00 e7 27 xvcvspdp vs40,vs60 +:xvcvspdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=457 & BITS_16_20=0 & XB & XT { xvcvspdpOp(XB,XT); } + +define pcodeop xvcvdpsxdsOp; +# ISA-cmt: xvcvdpsxds - VSX Vector truncate Double-Precision to integer and Convert to Signed Fixed-Point Doubleword Saturate +# ISA-info: xvcvdpsxds - Form "XX2" Page 412 Category "VSX" +# binutils: vsx.d: 110: f1 00 e7 63 xvcvdpsxds vs40,vs60 +:xvcvdpsxds XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=472 & BITS_16_20=0 & XB & XT { xvcvdpsxdsOp(XB,XT); } + +define pcodeop xvabsdpOp; +# ISA-cmt: xvabsdp - VSX Vector Absolute Value Double-Precision +# ISA-info: xvabsdp - Form "XX2" Page 397 Category "VSX" +# binutils: vsx.d: b4: f1 00 e7 67 xvabsdp vs40,vs60 +:xvabsdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=473 & BITS_16_20=0 & XB & XT { xvabsdpOp(XB,XT); } + +define pcodeop xvcvuxddpOp; +# ISA-cmt: xvcvuxddp - VSX Vector Convert and round Unsigned Fixed-Point Doubleword to Double-Precision format +# ISA-info: xvcvuxddp - Form "XX2" Page 431 Category "VSX" +# binutils: vsx.d: 144: f1 00 e7 a3 xvcvuxddp vs40,vs60 +:xvcvuxddp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=488 & BITS_16_20=0 & XB & XT { xvcvuxddpOp(XB,XT); } + +define pcodeop xvnabsdpOp; +# ISA-cmt: xvnabsdp - VSX Vector Negative Absolute Value Double-Precision +# ISA-info: xvnabsdp - Form "XX2" Page 461 Category "VSX" +# binutils: vsx.d: 194: f1 00 e7 a7 xvnabsdp vs40,vs60 +:xvnabsdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=489 & BITS_16_20=0 & XB & XT { xvnabsdpOp(XB,XT); } + +define pcodeop xvcvsxddpOp; +# ISA-cmt: xvcvsxddp - VSX Vector Convert and round Signed Fixed-Point Doubleword to Double-Precision format +# ISA-info: xvcvsxddp - Form "XX2" Page 429 Category "VSX" +# binutils: vsx.d: 134: f1 00 e7 e3 xvcvsxddp vs40,vs60 +:xvcvsxddp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=504 & BITS_16_20=0 & XB & XT { xvcvsxddpOp(XB,XT); } + +define pcodeop xvnegdpOp; +# ISA-cmt: xvnegdp - VSX Vector Negate Double-Precision +# ISA-info: xvnegdp - Form "XX2" Page 462 Category "VSX" +# binutils: vsx.d: 19c: f1 00 e7 e7 xvnegdp vs40,vs60 +:xvnegdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=505 & BITS_16_20=0 & XB & XT { xvnegdpOp(XB,XT); } + +define pcodeop vsx207_1; +define pcodeop vsx207_2; +define pcodeop vsx207_3; +define pcodeop vsx207_5; +define pcodeop vsx207_8; +define pcodeop vsx207_9; +define pcodeop vsx207_10; +define pcodeop vsx207_11; +define pcodeop vsx207_12; +define pcodeop vsx207_13; +define pcodeop vsx207_14; +define pcodeop vsx207_15; +define pcodeop vsx207_16; +define pcodeop vsx207_17; +define pcodeop vsx207_18; +define pcodeop vsx207_19; +define pcodeop vsx207_20; +define pcodeop vsx207_21; +define pcodeop vsx207_22; +define pcodeop vsx207_23; +define pcodeop vsx207_24; +define pcodeop vsx207_25; +define pcodeop vsx207_26; +define pcodeop vsx207_27; +define pcodeop vsx207_28; +define pcodeop vsx207_29; +define pcodeop vsx207_30; + +define pcodeop vsx300_1; +define pcodeop vsx300_2; +define pcodeop vsx300_3; +define pcodeop vsx300_4; +define pcodeop vsx300_5; +define pcodeop vsx300_7; +define pcodeop vsx300_8; +define pcodeop vsx300_9; +define pcodeop vsx300_10; +define pcodeop vsx300_11; +define pcodeop vsx300_12; +define pcodeop vsx300_13; +define pcodeop vsx300_14; +define pcodeop vsx300_15; +define pcodeop vsx300_16; +define pcodeop vsx300_17; +define pcodeop vsx300_18; +define pcodeop vsx300_19; +define pcodeop vsx300_20; +define pcodeop vsx300_21; +define pcodeop vsx300_22; +define pcodeop vsx300_23; +define pcodeop vsx300_25; +define pcodeop vsx300_26; +define pcodeop vsx300_27; +define pcodeop vsx300_28; +define pcodeop vsx300_29; +define pcodeop vsx300_30; +define pcodeop vsx300_31; +define pcodeop vsx300_32; +define pcodeop vsx300_33; +define pcodeop vsx300_34; +define pcodeop vsx300_35; +define pcodeop vsx300_36; +define pcodeop vsx300_37; +define pcodeop vsx300_38; +define pcodeop vsx300_39; +define pcodeop vsx300_40; +define pcodeop vsx300_41; +define pcodeop vsx300_42; +define pcodeop vsx300_43; +define pcodeop vsx300_44; +define pcodeop vsx300_45; +define pcodeop vsx300_46; +define pcodeop vsx300_47; +define pcodeop vsx300_48; +define pcodeop vsx300_49; +define pcodeop vsx300_50; +define pcodeop vsx300_51; +define pcodeop vsx300_52; +define pcodeop vsx300_53; +define pcodeop vsx300_54; +define pcodeop vsx300_55; +define pcodeop vsx300_56; +define pcodeop vsx300_57; +define pcodeop vsx300_58; +define pcodeop vsx300_59; +define pcodeop vsx300_60; +define pcodeop vsx300_61; +define pcodeop vsx300_62; +define pcodeop vsx300_63; +define pcodeop vsx300_64; +define pcodeop vsx300_65; +define pcodeop vsx300_66; +define pcodeop vsx300_67; +define pcodeop vsx300_68; +define pcodeop vsx300_69; +define pcodeop vsx300_70; +define pcodeop vsx300_71; +define pcodeop vsx300_72; +define pcodeop vsx300_73; +define pcodeop vsx300_74; +define pcodeop vsx300_75; +define pcodeop vsx300_76; +define pcodeop vsx300_77; +define pcodeop vsx300_78; +define pcodeop vsx300_79; +define pcodeop vsx300_80; +define pcodeop vsx300_81; +define pcodeop vsx300_82; +define pcodeop vsx300_83; +define pcodeop vsx300_84; +define pcodeop vsx300_85; +define pcodeop vsx300_86; +define pcodeop vsx300_87; +define pcodeop vsx300_88; +define pcodeop vsx300_89; +define pcodeop vsx300_90; +define pcodeop vsx300_91; +define pcodeop vsx300_92; +define pcodeop vsx300_93; +define pcodeop vsx300_94; +define pcodeop vsx300_95; +define pcodeop vsx300_96; +define pcodeop vsx300_97; +define pcodeop vsx300_98; +define pcodeop vsx300_99; +define pcodeop vsx300_100; +define pcodeop vsx300_101; +define pcodeop vsx300_102; +define pcodeop vsx300_103; + +################# +# v2.07 additions +:lxsiwax XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=76 { + XT = vsx207_1(A,B); +} + +:lxsiwzx XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=12 { + XT = vsx207_2(A,B); +} + +:lxsspx XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=524 { + XT = vsx207_3(A,B); +} + +:mfvsrd A,XSF is $(NOTVLE) & OP=31 & XOP_1_10=51 & BITS_11_15=0 & XSF & A { + A = XSF; +} + +:mfvsrwz A,XSF is $(NOTVLE) & OP=31 & XOP_1_10=115 & BITS_11_15=0 & XSF & A { + A[0,32] = XSF[0,32]; + A[32,32] = 0; +} + +:mtvsrd XTF,A is $(NOTVLE) & OP=31 & XOP_1_10=179 & BITS_11_15=0 & XTF & A { + XTF = A; +} + +:mtvsrwa XTF,A is $(NOTVLE) & OP=31 & XOP_1_10=211 & BITS_11_15=0 & XTF & A { + XTF = sext(A:4); +} + +:mtvsrwz XTF,A is $(NOTVLE) & OP=31 & XOP_1_10=243 & BITS_11_15=0 & XTF & A { + XTF = zext(A:4); +} + +:stxsiwx XS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XS & RA_OR_ZERO & B & XOP_1_10=140 { + EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *[ram]:4 EA = vsx207_9(XS,RA_OR_ZERO,B); +} + +:stxsspx XS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XS & RA_OR_ZERO & B & XOP_1_10=652 { + EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *[ram]:4 EA = vsx207_10(XS,RA_OR_ZERO,B); +} + +:xsaddsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=0 & XA & XB & XT { + XT = vsx207_11(XA,XB); +} + +:xscvdpspn XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & XOP_2_10=267 & XB & XT +{ + src:4 = float2float(XB:8); + XT[0,32] = src; +} + +:xscvspdpn XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & XOP_2_10=331 & XB & XT { + XT = vsx207_13(XB); +} + +:xscvsxdsp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & XOP_2_10=312 & XB & XT { + XT = vsx207_14(XB); +} + +:xscvuxdsp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & XOP_2_10=296 & XB & XT { + XT = vsx207_15(XB); +} + +:xsdivsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=24 & XA & XB & XT { + XT = vsx207_16(XA,XB); +} + +:xsmaddasp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=1 & XA & XB & XT { + XT = vsx207_17(XA,XB); +} + +:xsmaddmsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=9 & XA & XB & XT { + XT = vsx207_18(XA,XB); +} + +:xsmsubasp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=17 & XA & XB & XT { + XT = vsx207_19(XA,XB); +} + +:xsmsubmsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=25 & XA & XB & XT { + XT = vsx207_20(XA,XB); +} + +:xsmulsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=16 & XA & XB & XT { + XT = vsx207_21(XA,XB); +} + +:xsnmaddasp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=129 & XA & XB & XT { + XT = vsx207_22(XA,XB); +} + +:xsnmaddmsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=137 & XA & XB & XT { + XT = vsx207_23(XA,XB); +} + +:xsnmsubasp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=145 & XA & XB & XT { + XT = vsx207_24(XA,XB); +} + +:xsnmsubmsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=153 & XA & XB & XT { + XT = vsx207_25(XA,XB); +} + +:xsresp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & XOP_2_10=26 & XB & XT { + XT = vsx207_26(XB); +} + +:xsrsp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & XOP_2_10=281 & XB & XT { + XT = vsx207_27(XB); +} + +:xsrsqrtesp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & XOP_2_10=10 & XB & XT { + XT = vsx207_28(XB); +} + +:xssqrtsp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & XOP_2_10=11 & XB & XT { + XT = vsx207_29(XB); +} + +:xssubsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=8 & XA & XB & XT { + XT = vsx207_30(XA,XB); +} + +:xxleqv XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=186 & XA & XB & XT { + XT = ~(XA ^ XB); +} + +:xxlnand XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=178 & XA & XB & XT { + XT = ~(XA & XB); +} + +:xxlorc XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=170 & XA & XB & XT { + XT = XA | (~XB); +} + +####################### +# v3.0 + +# The endian behavior of the storage has not been modelled +:lxsd vrD,DSs(RA_OR_ZERO) is $(NOTVLE) & OP=57 & vrD & RA_OR_ZERO & BITS_0_1=2 & DSs { + ea:$(REGISTER_SIZE) = RA_OR_ZERO + (DSs << 2); + vrD[0,64] = *:8 ea; +} + +:lxsibzx XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=781 { + XT = vsx300_2(A,B); +} + +:lxsihzx XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=813 { + XT = vsx300_3(A,B); +} + +:lxssp vrD,DSs(RA_OR_ZERO) is $(NOTVLE) & OP=57 & vrD & RA_OR_ZERO & BITS_0_1=3 & DSs { + vrD = vsx300_4(DSs:2,RA_OR_ZERO); +} + +# The endian behavior of the storage has not been modelled +:lxv XT3,DQs(RA_OR_ZERO) is $(NOTVLE) & OP=61 & XT3 & RA_OR_ZERO & BITS_0_2=1 & DQs { + ea:$(REGISTER_SIZE) = RA_OR_ZERO + (DQs << 4); + XT3 = *:16 ea; +} + +:lxvx XT,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XOP_1_5=12 & BIT_6=0 & XOP_7_10=4 & RA_OR_ZERO & B & XT { + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + XT = *:16 ea; +} + +:lxvb16x XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=876 { + XT = vsx300_7(A,B); +} + +:lxvh8x XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=812 { + XT = vsx300_8(A,B); +} + +:lxvl XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=269 { + XT = vsx300_9(A,B); +} + +:lxvll XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=301 { + XT = vsx300_10(A,B); +} + +:lxvwsx XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=364 { + XT = vsx300_11(A,B); +} + +:mfvsrld A,XSF is $(NOTVLE) & OP=31 & XOP_1_10=307 & BITS_11_15=0 & XSF & A { + A = vsx300_12(XSF); +} + +:mtvsrdd XTF,A,B is $(NOTVLE) & OP=31 & XTF & A & B & XOP_1_10=435 { + XTF = vsx300_13(A,B); +} + +:mtvsrws XTF,A is $(NOTVLE) & OP=31 & XOP_1_10=403 & BITS_11_15=0 & XTF & A { + XTF = vsx300_14(A); +} + +:stxsd vrS,DSs(RA_OR_ZERO) is $(NOTVLE) & OP=61 & vrS & RA_OR_ZERO & BITS_0_1=2 & DSs { + vsx300_15(vrS,DSs:2,RA_OR_ZERO); +} + +:stxsibx XS,A,B is $(NOTVLE) & OP=31 & XS & A & B & XOP_1_10=909 { + vsx300_16(XS,A,B); +} + +:stxsihx XS,A,B is $(NOTVLE) & OP=31 & XS & A & B & XOP_1_10=941 { + vsx300_17(XS,A,B); +} + +:stxssp vrS,DSs(RA_OR_ZERO) is $(NOTVLE) & OP=61 & vrS & RA_OR_ZERO & BITS_0_1=3 & DSs { + vsx300_18(vrS,DSs:2,RA_OR_ZERO); +} + +# The endian behavior of the storage has not been modelled +:stxv XS3,DQs(RA_OR_ZERO) is $(NOTVLE) & OP=61 & XS3 & RA_OR_ZERO & BITS_0_2=5 & DQs { + ea:$(REGISTER_SIZE) = RA_OR_ZERO + (DQs << 4); + *:16 ea = XS3; +} + +:stxvb16x XS,A,B is $(NOTVLE) & OP=31 & XS & A & B & XOP_1_10=1004 { + vsx300_20(XS,A,B); +} + +:stxvh8x XS,A,B is $(NOTVLE) & OP=31 & XS & A & B & XOP_1_10=940 { + vsx300_21(XS,A,B); +} + +:stxvl XS,A,B is $(NOTVLE) & OP=31 & XS & A & B & XOP_1_10=397 { + vsx300_22(XS,A,B); +} + +:stxvll XS,A,B is $(NOTVLE) & OP=31 & XS & A & B & XOP_1_10=429 { + vsx300_23(XS,A,B); +} + +:stxvx XS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XS & RA_OR_ZERO & B & XOP_1_10=396 { + ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; + *:16 ea = XS; +} + +:xsabsqp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=0 & BIT_0=0 & XOP_1_10=804 & vrD & vrB { + vrD = vsx300_25(vrB); +} + +:xsaddqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=4 & R0=0 & vrD & vrA & vrB { + vrD = vsx300_26(vrA,vrB); +} + +:xsaddqpo vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=4 & R0=1 & vrD & vrA & vrB { + vrD = vsx300_27(vrA,vrB); +} + +:xscmpeqdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=3 & XA & XB & XT { + XT = vsx300_28(XA,XB); +} + +:xscmpexpdp BF2,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=59 & BITS_21_22=0 & BIT_0=0 & XA & XB & BF2 { + vsx300_29(BF2:1,XA,XB); +} + +:xscmpexpqp BF2,vrA,vrB is $(NOTVLE) & OP=63 & BITS_21_22=0 & BIT_0=0 & XOP_1_10=164 & R0=0 & BF2 & vrA & vrB { + vsx300_30(BF2:1,vrA,vrB); +} + +:xscmpgedp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=19 & XA & XB & XT { + XT = vsx300_31(XA,XB); +} + +:xscmpgtdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=11 & XA & XB & XT { + XT = vsx300_32(XA,XB); +} + +:xscmpnedp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=27 & XA & XB & XT { + XT = vsx300_33(XA,XB); +} + +:xscmpoqp BF2,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=132 & BITS_21_22=0 & BIT_0=0 & vrA & vrB & BF2 { + vsx300_34(BF2:1,vrA,vrB); +} + +:xscmpuqp BF2,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=644 & BITS_21_22=0 & BIT_0=0 & vrA & vrB & BF2 { + vsx300_35(BF2:1,vrA,vrB); +} + +:xscpsgnqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & BIT_0=0 & XOP_1_10=100 & vrD & vrA & vrB { + vrD = vsx300_36(vrA,vrB); +} + +:xscvdphp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=17 & XOP_2_10=347 & XB & XT { + XT = vsx300_37(XB); +} + +:xscvdpqp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=22 & BIT_0=0 & XOP_1_10=836 & vrD & vrB { + vrD = vsx300_38(vrB); +} + +:xscvhpdp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=16 & XOP_2_10=347 & XB & XT { + XT = vsx300_39(XB); +} + +:xscvqpdp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=20 & XOP_1_10=836 & R0=0 & vrD & vrB { + vrD = vsx300_40(vrB); +} + +:xscvqpdpo vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=20 & XOP_1_10=836 & R0=1 & vrD & vrB { + vrD = vsx300_41(vrB); +} + +:xscvqpsdz vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=25 & XOP_1_10=836 & BIT_0=0 & vrD & vrB { + vrD = vsx300_42(vrB); +} + +:xscvqpswz vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=9 & XOP_1_10=836 & BIT_0=0 & vrD & vrB { + vrD = vsx300_43(vrB); +} + +:xscvqpudz vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=17 & XOP_1_10=836 & BIT_0=0 & vrD & vrB { + vrD = vsx300_44(vrB); +} + +:xscvqpuwz vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=1 & XOP_1_10=836 & BIT_0=0 & vrD & vrB { + vrD = vsx300_45(vrB); +} + +:xscvsdqp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=10 & XOP_1_10=836 & BIT_0=0 & vrD & vrB { + vrD = vsx300_46(vrB); +} + +:xscvudqp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=2 & XOP_1_10=836 & BIT_0=0 & vrD & vrB { + vrD = vsx300_47(vrB); +} + +:xsdivqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=548 & R0=0 & vrD & vrA & vrB { + vrD = vsx300_47(vrA,vrB); +} + +:xsdivqpo vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=548 & R0=1 & vrD & vrA & vrB { + vrD = vsx300_48(vrA,vrB); +} + +:xsiexpdp XT,A,B is $(NOTVLE) & OP=60 & XT & A & B & XOP_1_10=918 { + vsx300_49(A,B); +} + +:xsiexpqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & BIT_0=0 & XOP_1_10=868 & vrD & vrA & vrB { + vrD = vsx300_50(vrA,vrB); +} + +:xsmaddqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=388 & R0=0 & vrD & vrA & vrB { + vrD = vsx300_51(vrA,vrB); +} + +:xsmaddqpo vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=388 & R0=1 & vrD & vrA & vrB { + vrD = vsx300_52(vrA,vrB); +} + +:xsmaxcdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=128 & XA & XB & XT { + XT = vsx300_53(XA,XB); +} + +:xsmaxjdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=144 & XA & XB & XT { + XT = vsx300_54(XA,XB); +} + +:xsmincdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=136 & XA & XB & XT { + XT = vsx300_55(XA,XB); +} + +:xsminjdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=152 & XA & XB & XT { + XT = vsx300_56(XA,XB); +} + +:xsmsubqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=420 & R0=0 & vrD & vrA & vrB { + vrD = vsx300_57(vrA,vrB); +} + +:xsmsubqpo vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=420 & R0=1 & vrD & vrA & vrB { + vrD = vsx300_58(vrA,vrB); +} + +:xsmulqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=36 & R0=0 & vrD & vrA & vrB { + vrD = vsx300_59(vrA,vrB); +} + +:xsmulqpo vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=36 & R0=1 & vrD & vrA & vrB { + vrD = vsx300_60(vrA,vrB); +} + +:xsnabsqp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=8 & XOP_1_10=804 & BIT_0=0 & vrD & vrB { + vrD = vsx300_61(vrB); +} + +:xsnegqp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=16 & XOP_1_10=804 & BIT_0=0 & vrD & vrB { + vrD = vsx300_62(vrB); +} + +:xsnmaddqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=452 & R0=0 & vrD & vrA & vrB { + vrD = vsx300_63(vrA,vrB); +} + +:xsnmaddqpo vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=452 & R0=1 & vrD & vrA & vrB { + vrD = vsx300_64(vrA,vrB); +} + +:xsnmsubqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=484 & R0=0 & vrD & vrA & vrB { + vrD = vsx300_65(vrA,vrB); +} + +:xsnmsubqpo vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=484 & R0=1 & vrD & vrA & vrB { + vrD = vsx300_66(vrA,vrB); +} + +:xsrqpi R16,vrD,vrB,RMC is $(NOTVLE) & OP=63 & BITS_17_20=0 & XOP_1_8=5 & EX=0 & vrD & vrB & R16 & RMC { + vrD = vsx300_67(vrB,RMC:1,R16:1); +} + +:xsrqpix R16,vrD,vrB,RMC is $(NOTVLE) & OP=63 & BITS_17_20=0 & XOP_1_8=5 & EX=1 & vrD & vrB & R16 & RMC { + vrD = vsx300_68(vrB,RMC:1,R16:1); +} + +:xsrqpxp R16,vrD,vrB,RMC is $(NOTVLE) & OP=63 & BITS_17_20=0 & XOP_1_8=37 & BIT_0=0 & vrD & vrB & R16 & RMC { + vrD = vsx300_69(vrB,RMC:1,R16:1); +} + +:xssqrtqp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=27 & XOP_1_10=804 & R0=0 & vrD & vrB { + vrD = vsx300_70(vrB); +} + +:xssqrtqpo vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=27 & XOP_1_10=804 & R0=1 & vrD & vrB { + vrD = vsx300_71(vrB); +} + +:xssubqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=516 & R0=0 & vrD & vrA & vrB { + vrD = vsx300_72(vrA,vrB); +} + +:xssubqpo vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=516 & R0=1 & vrD & vrA & vrB { + vrD = vsx300_73(vrA,vrB); +} + +:xststdcdp BF2,XB,DCMX is $(NOTVLE) & OP=60 & BIT_0=0 & XOP_2_10=362 & XB & BF2 & DCMX { + vsx300_74(XB,BF2:1,DCMX:1); +} + +:xststdcqp BF2,vrB,DCMX is $(NOTVLE) & OP=63 & XOP_1_10=708 & BIT_0=0 & vrB & BF2 & DCMX { + vsx300_75(vrB,BF2:1,DCMX:1); +} + +:xststdcsp BF2,XB,DCMX is $(NOTVLE) & OP=60 & BIT_0=0 & XOP_2_10=298 & XB & BF2 & DCMX { + vsx300_76(XB,BF2:1,DCMX:1); +} + +:xsxexpdp D,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & BIT_0=0 & XOP_2_10=347 & XB & D { + D = vsx300_77(XB); +} + +:xsxexpqp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=2 & XOP_1_10=804 & BIT_0=0 & vrD & vrB { + vrD = vsx300_78(vrB); +} + +:xsxsigdp D,XB is $(NOTVLE) & OP=60 & BITS_16_20=1 & BIT_0=0 & XOP_2_10=347 & XB & D { + D = vsx300_79(XB); +} + +:xsxsigqp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=18 & XOP_1_10=804 & BIT_0=0 & vrD & vrB { + vrD = vsx300_80(vrB); +} + +:xvcmpnedp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=123 & Rc2=0 & XA & XB & XT { + XT = vsx300_81(XA,XB); +} + +:xvcmpnedp. XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=123 & Rc2=1 & XA & XB & XT { + XT = vsx300_82(XA,XB); +} + +:xvcmpnesp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=91 & Rc2=0 & XA & XB & XT { + XT = vsx300_83(XA,XB); +} + +:xvcmpnesp. XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=91 & Rc2=1 & XA & XB & XT { + XT = vsx300_84(XA,XB); +} + +:xvcvhpsp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=24 & XOP_2_10=475 & XB & XT { + XT = vsx300_85(XB); +} + +:xvcvsphp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=25 & XOP_2_10=475 & XB & XT { + XT = vsx300_86(XB); +} + +:xviexpdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=248 & XA & XB & XT { + XT = vsx300_87(XA,XB); +} + +:xviexpsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=216 & XA & XB & XT { + XT = vsx300_88(XA,XB); +} + +:xvtstdcdp XT,XB,DBUILD is $(NOTVLE) & OP=60 & XOP_3_5=5 & XOP_7_10=15 & XA & XB & XT & DBUILD { + XT = vsx300_89(XB,DBUILD); +} + +:xvtstdcsp XT,XB,DBUILD is $(NOTVLE) & OP=60 & XOP_3_5=5 & XOP_7_10=13 & XA & XB & XT & DBUILD { + XT = vsx300_90(XB,DBUILD); +} + +:xvxexpdp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & XOP_2_10=475 & XB & XT { + XT = vsx300_91(XB); +} + +:xvxexpsp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=8 & XOP_2_10=475 & XB & XT { + XT = vsx300_92(XB); +} + +:xvxsigdp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=1 & XOP_2_10=475 & XB & XT { + XT = vsx300_93(XB); +} + +:xvxsigsp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=9 & XOP_2_10=475 & XB & XT { + XT = vsx300_94(XB); +} + +:xxbrd XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=23 & XOP_2_10=475 & XB & XT { + XT = vsx300_95(XB); +} + +:xxbrh XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=7 & XOP_2_10=475 & XB & XT { + XT = vsx300_96(XB); +} + +:xxbrq XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=31 & XOP_2_10=475 & XB & XT { + XT = vsx300_97(XB); +} + +:xxbrw XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=15 & XOP_2_10=475 & XB & XT { + XT = vsx300_98(XB); +} + +:xxextractuw XT,XB,UIMB is $(NOTVLE) & OP=60 & BIT_20=0 & XOP_2_10=165 & XB & XT & UIMB { + XT = vsx300_99(XB,UIMB:1); +} + +:xxinsertw XT,XB,UIMB is $(NOTVLE) & OP=60 & BIT_20=0 & XOP_2_10=181 & XB & XT & UIMB { + XT = vsx300_100(XB,UIMB:1); +} + +:xxperm XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=26 & XA & XB & XT { + XT = vsx300_101(XA,XB); +} + +:xxpermr XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=58 & XA & XB & XT { + XT = vsx300_102(XA,XB); +} + +:xxspltib XT,UIMM8 is $(NOTVLE) & OP=60 & BITS_19_20=0 & XOP_1_10=360 & XT & UIMM8 { + tmpa:16 = zext(UIMM8:1); + tmpa = tmpa | (tmpa << 8); + tmpa = tmpa | (tmpa << 16); + tmpa = tmpa | (tmpa << 32); + tmpa = tmpa | (tmpa << 64); + XT = tmpa; +} diff --git a/src/third-party/sleigh/processors/PowerPC/data/manuals/PowerISA.idx b/src/third-party/sleigh/processors/PowerPC/data/manuals/PowerISA.idx new file mode 100644 index 00000000..0a0eab6e --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/manuals/PowerISA.idx @@ -0,0 +1,1724 @@ +@PowerISA_V2.06_PUBLIC.pdf[PowerPC® Microprocessor Family: The Programming Environments Manual for 32 and 64-bit Microprocessors, Version 2.3, March 31, 2005] +addo. , 91 +addo , 91 +add. , 91 +add , 91 +addco. , 92 +addco , 92 +addc. , 92 +addc , 92 +addeo. , 93 +addeo , 93 +adde. , 93 +adde , 93 +addg6s , 125 +addi , 90 +addic , 91 +addic. , 91 +addis , 90 +addmeo. , 93 +addmeo , 93 +addme. , 93 +addme , 93 +addzeo. , 94 +addzeo , 94 +addze. , 94 +addze , 94 +and. , 108 +and , 108 +andc. , 109 +andc , 109 +andi. , 106 +andis. , 106 +bla , 63 +bl , 63 +ba , 63 +b , 63 +bcla , 63 +bcl , 63 +bca , 63 +bc , 63 +bcctrl , 64 +bcctr , 64 +bclrl , 64 +bclr , 64 +bpermd , 114 +brinc , 538 +cbcdtd , 125 +cdtbcd , 125 +cmp , 102 +cmpb , 110 +cmpi , 102 +cmpl , 103 +cmpli , 103 +cntlzd. , 113 +cntlzd , 113 +cntlzw. , 109 +cntlzw , 109 +crand , 65 +crandc , 66 +creqv , 66 +crnand , 65 +crnor , 66 +cror , 65 +crorc , 66 +crxor , 65 +dadd. , 201 +dadd , 201 +daddq. , 201 +daddq , 201 +dcba , 711 +dcbf , 715 +dcbfep , 940 +dcbi , 996 +dcblc , 1000 +dcbst , 715 +dcbstep , 939 +dcbt , 712 +dcbtep , 939 +dcbtls , 999 +dcbtst , 712 +dcbtstep , 941 +dcbtstls , 999 +dcbz , 715 +dcbzep , 942 +dcffix. , 223 +dcffix , 223 +dcffixq. , 223 +dcffixq , 223 +dci , 131 +dcmpo , 207 +dcmpoq , 207 +dcmpu , 206 +dcmpuq , 207 +dcread , 134 +dcread , 134 +dctdp. , 221 +dctdp , 221 +dctfix. , 223 +dctfix , 223 +dctfixq. , 223 +dctfixq , 223 +dctqpq. , 221 +dctqpq , 221 +ddedpd. , 225 +ddedpd , 225 +ddedpdq. , 225 +ddedpdq , 225 +ddiv. , 204 +ddiv , 204 +ddivq. , 204 +ddivq , 204 +denbcd. , 225 +denbcd , 225 +denbcdq. , 225 +denbcdq , 225 +diex. , 226 +diex , 226 +diexq. , 226 +diexq , 226 +divdo. , 100 +divdo , 100 +divd. , 100 +divd , 100 +divdeo. , 101 +divdeo , 101 +divde. , 101 +divde , 101 +divdeuo. , 101 +divdeuo , 101 +divdeu. , 101 +divdeu , 101 +divduo. , 100 +divduo , 100 +divdu. , 100 +divdu , 100 +divwo. , 96 +divwo , 96 +divw. , 96 +divw , 96 +divweo. , 97 +divweo , 97 +divwe. , 97 +divwe , 97 +divweuo. , 97 +divweuo , 97 +divweu. , 97 +divweu , 97 +divwuo. , 96 +divwuo , 96 +divwu. , 96 +divwu , 96 +dlmzb. , 617 +dlmzb , 617 +dmul. , 203 +dmul , 203 +dmulq. , 203 +dmulq , 203 +dnh , 1068 +doze , 770 +dqua. , 212 +dqua , 212 +dquai. , 211 +dquai , 211 +dquaiq. , 211 +dquaiq , 211 +dquaq. , 212 +dquaq , 212 +drdpq. , 222 +drdpq , 222 +drintn. , 119 +drintn , 119 +drintnq. , 219 +drintnq , 219 +drintx. , 217 +drintx , 217 +drintxq. , 217 +drintxq , 217 +drrnd. , 214 +drrnd , 214 +drrndq. , 214 +drrndq , 214 +drsp. , 222 +drsp , 222 +dscli. , 228 +dscli , 228 +dscliq. , 228 +dscliq , 228 +dscri. , 228 +dscri , 228 +dscriq. , 228 +dscriq , 228 +dsn , 738 +dsub. , 101 +dsub , 101 +dsubq. , 201 +dsubq , 201 +dtstdc , 208 +dtstdcq , 208 +dtstdg , 208 +dtstdgq , 208 +dtstex , 209 +dtstexq , 209 +dtstsf , 210 +dtstsfq , 210 +dxex. , 226 +dxex , 226 +dxexq. , 226 +dxexq , 226 +eciwx , 740 +ecowx , 740 +efdabs , 605 +efdadd , 606 +efdcfs , 612 +efdcfsf , 610 +efdcfsi , 609 +efdcfsid , 610 +efdcfuf , 610 +efdcfui , 609 +efdcfuid , 610 +efdcmpeq , 607 +efdcmpgt , 607 +efdcmplt , 607 +efdctsf , 612 +efdctsi , 610 +efdctsidz , 611 +efdctsiz , 612 +efdctuf , 612 +efdctui , 610 +efdctuidz , 611 +efdctuiz , 612 +efddiv , 606 +efdmul , 606 +efdnabs , 605 +efdneg , 605 +efdsub , 606 +efdtsteq , 608 +efdtstgt , 607 +efdtstlt , 608 +efsabs , 598 +efsadd , 599 +efscfd , 613 +efscfsf , 603 +efscfsi , 603 +efscfuf , 603 +efscfui , 603 +efscmpeq , 601 +efscmpgt , 600 +efscmplt , 600 +efsctsf , 604 +efsctsi , 603 +efsctsiz , 604 +efsctuf , 604 +efsctui , 603 +efsctuiz , 604 +efsdiv , 599 +efsmul , 599 +efsnabs , 598 +efsneg , 598 +efssub , 599 +efststeq , 602 +efststgt , 601 +efststlt , 602 +ehpriv , 919 +eieio , 727 +eqv. , 109 +eqv , 109 +evabs , 538 +evaddiw , 538 +evaddsmiaaw , 538 +evaddssiaaw , 539 +evaddumiaaw , 539 +evaddusiaaw , 539 +evaddw , 539 +evand , 540 +evandc , 540 +evcmpeq , 540 +evcmpgts , 540 +evcmpgtu , 541 +evcmplts , 541 +evcmpltu , 541 +evcntlsw , 542 +evcntlzw , 542 +evdivws , 542 +evdivwu , 543 +eveqv , 543 +evextsb , 543 +evextsh , 543 +evfsabs , 590 +evfsadd , 591 +evfscfsf , 595 +evfscfsi , 595 +evfscfuf , 595 +evfscfui , 595 +evfscmpeq , 593 +evfscmpgt , 592 +evfscmplt , 592 +evfsctsf , 597 +evfsctsi , 596 +evfsctsiz , 596 +evfsctuf , 597 +evfsctui , 596 +evfsctuiz , 596 +evfsdiv , 591 +evfsmul , 591 +evfsnabs , 590 +evfsneg , 590 +evfssub , 591 +evfststeq , 594 +evfststgt , 593 +evfststlt , 594 +evldd , 544 +evlddepx , 944 +evlddx , 544 +evldh , 544 +evldhx , 544 +evldw , 545 +evldwx , 545 +evlhhesplat , 545 +evlhhesplatx , 545 +evlhhossplat , 546 +evlhhossplatx , 546 +evlhhousplat , 546 +evlhhousplatx , 546 +evlwhe , 547 +evlwhex , 547 +evlwhos , 547 +evlwhosx , 547 +evlwhou , 548 +evlwhoux , 548 +evlwhsplat , 548 +evlwhsplatx , 548 +evlwwsplat , 549 +evlwwsplatx , 549 +evmergehi , 549 +evmergehilo , 550 +evmergelo , 549 +evmergelohi , 550 +evmhegsmfaa , 550 +evmhegsmfan , 550 +evmhegsmiaa , 551 +evmhegsmian , 551 +evmhegumiaa , 551 +evmhegumian , 551 +evmhesmf , 552 +evmhesmfa , 552 +evmhesmfaaw , 552 +evmhesmfanw , 552 +evmhesmi , 553 +evmhesmia , 553 +evmhesmiaaw , 553 +evmhesmianw , 553 +evmhessf , 554 +evmhessfa , 554 +evmhessfaaw , 555 +evmhessfanw , 555 +evmhessiaaw , 556 +evmhessianw , 556 +evmheumi , 557 +evmheumia , 557 +evmheumiaaw , 557 +evmheumianw , 557 +evmheusiaaw , 558 +evmheusianw , 558 +evmhogsmfaa , 559 +evmhogsmfan , 559 +evmhogsmiaa , 559 +evmhogsmian , 559 +evmhogumiaa , 560 +evmhogumian , 560 +evmhosmf , 560 +evmhosmfa , 560 +evmhosmfaaw , 561 +evmhosmfanw , 561 +evmhosmi , 561 +evmhosmia , 561 +evmhosmiaaw , 562 +evmhosmianw , 561 +evmhossf , 563 +evmhossfa , 563 +evmhossfaaw , 564 +evmhossfanw , 564 +evmhossiaaw , 565 +evmhossianw , 565 +evmhoumi , 565 +evmhoumia , 565 +evmhoumiaaw , 566 +evmhoumianw , 562 +evmhousiaaw , 566 +evmhousianw , 566 +evmra , 567 +evmwhsmf , 567 +evmwhsmfa , 567 +evmwhsmi , 567 +evmwhsmia , 567 +evmwhssf , 568 +evmwhssfa , 568 +evmwhumi , 568 +evmwhumia , 568 +evmwlsmiaaw , 569 +evmwlsmianw , 569 +evmwlssiaaw , 569 +evmwlssianw , 569 +evmwlumi , 570 +evmwlumia , 570 +evmwlumiaaw , 570 +evmwlumianw , 570 +evmwlusiaaw , 571 +evmwlusianw , 571 +evmwsmf , 571 +evmwsmfa , 571 +evmwsmfaa , 572 +evmwsmfan , 572 +evmwsmi , 572 +evmwsmia , 572 +evmwsmiaa , 572 +evmwsmian , 572 +evmwssf , 573 +evmwssfa , 573 +evmwssfaa , 573 +evmwssfan , 574 +evmwumi , 574 +evmwumia , 574 +evmwumiaa , 575 +evmwumian , 575 +evnand , 575 +evneg , 575 +evnor , 575 +evor , 576 +evorc , 576 +evrlw , 576 +evrlwi , 577 +evrndw , 577 +evsel , 577 +evslw , 578 +evslwi , 578 +evsplatfi , 578 +evsplati , 578 +evsrwis , 578 +evsrwiu , 578 +evsrws , 579 +evsrwu , 579 +evstdd , 579 +evstddepx , 944 +evstddx , 579 +evstdh , 580 +evstdhx , 580 +evstdw , 580 +evstdwx , 580 +evstwhe , 581 +evstwhex , 581 +evstwho , 581 +evstwhox , 581 +evstwwe , 581 +evstwwex , 581 +evstwwo , 582 +evstwwox , 582 +evsubfsmiaaw , 582 +evsubfssiaaw , 582 +evsubfumiaaw , 583 +evsubfusiaaw , 583 +evsubfw , 583 +evsubifw , 583 +evxor , 583 +extsb. , 109 +extsb , 109 +extsh. , 109 +extsh , 109 +extsw. , 113 +extsw , 113 +fabs. , 160 +fabs , 160 +fadd. , 161 +fadd , 161 +fadds. , 161 +fadds , 161 +fcfid. , 172 +fcfid , 172 +fcfids. , 173 +fcfids , 173 +fcfidu. , 173 +fcfidu , 173 +fcfidus. , 174 +fcfidus , 174 +fcmpo , 176 +fcmpu , 176 +fcpsgn. , 160 +fcpsgn , 160 +fctid. , 168 +fctid , 168 +fctidu. , 169 +fctidu , 169 +fctiduz. , 170 +fctiduz , 170 +fctidz. , 169 +fctidz , 169 +fctiw. , 170 +fctiw , 170 +fctiwu. , 171 +fctiwu , 171 +fctiwuz. , 172 +fctiwuz , 172 +fctiwz. , 171 +fctiwz , 171 +fdiv. , 162 +fdiv , 162 +fdivs. , 162 +fdivs , 162 +fmadd. , 166 +fmadd , 166 +fmadds. , 166 +fmadds , 166 +fmr. , 160 +fmr , 160 +fmsub. , 166 +fmsub , 166 +fmsubs. , 166 +fmsubs , 166 +fmul. , 162 +fmul , 162 +fmuls. , 162 +fmuls , 162 +fnabs. , 160 +fnabs , 160 +fneg. , 160 +fneg , 160 +fnmadd. , 167 +fnmadd , 167 +fnmadds. , 167 +fnmadds , 167 +fnmsub. , 167 +fnmsub , 167 +fnmsubs. , 167 +fnmsubs , 167 +fre. , 163 +fre , 163 +fres. , 163 +fres , 163 +frim. , 175 +frim , 175 +frin. , 175 +frin , 175 +frip. , 175 +frip , 175 +friz. , 175 +friz , 175 +frsp. , 168 +frsp , 168 +frsqrte. , 164 +frsqrte , 164 +frsqrtes. , 164 +frsqrtes , 164 +fsel. , 177 +fsel , 177 +fsqrt. , 163 +fsqrt , 163 +fsqrts. , 163 +fsqrts , 163 +fsub. , 161 +fsub , 161 +fsubs. , 161 +fsubs , 161 +ftdiv , 165 +ftsqrt , 165 +hrfid , 768 +icbi , 704 +icbiep , 942 +icblc , 1001 +icbt , 704 +icbtls , 1000 +ici , 1107 +icread , 1111 +isel , 105 +isync , 717 +lbarx , 718 +lbdx , 736 +lbepx , 935 +lbz , 73 +lbzcix , 777 +lbzu , 73 +lbzux , 73 +lbzx , 74 +ld , 78 +ldarx , 723 +ldbrx , 84 +ldcix , 777 +lddx , 736 +ldepx , 936 +ldu , 78 +ldux , 78 +ldx , 78 +lfd , 153 +lfddx , 736 +lfdepx , 943 +lfdp , 159 +lfdpx , 159 +lfdu , 153 +lfdux , 153 +lfdx , 153 +lfiwax , 154 +lfiwzx , 154 +lfs , 156 +lfsu , 156 +lfsux , 156 +lfsx , 156 +lha , 75 +lharx , 719 +lhau , 75 +lhaux , 75 +lhax , 75 +lhbrx , 83 +lhdx , 736 +lhepx , 935 +lhz , 74 +lhzcix , 777 +lhzu , 74 +lhzux , 74 +lhzx , 74 +lmw , 85 +lq , 779 +lswi , 87 +lswx , 87 +lvebx , 244 +lvehx , 241 +lvepx , 945 +lvepxl , 945 +lvewx , 241 +lvsl , 246 +lvsr , 246 +lvx , 242 +lvxl , 242 +lwa , 77 +lwarx , 718 +lwaux , 77 +lwax , 77 +lwbrx , 83 +lwdx , 736 +lwepx , 936 +lwz , 76 +lwzcix , 777 +lwzu , 76 +lwzux , 76 +lwzx , 76 +lxsdux , 366 +lxsdx , 366 +lxvd2ux , 366 +lxvd2x , 366 +lxvdsx , 367 +lxvw4ux , 367 +lxvw4x , 367 +macchwo. , 619 +macchwo , 619 +macchw. , 619 +macchw , 619 +macchwso. , 619 +macchwso , 619 +macchws. , 619 +macchws , 619 +macchwsuo. , 620 +macchwsuo , 620 +macchwsu. , 620 +macchwsu , 620 +macchwuo. , 620 +macchwuo , 620 +macchwu. , 620 +macchwu , 620 +machhwo. , 621 +machhwo , 621 +machhw. , 621 +machhw , 621 +machhwso. , 621 +machhwso , 621 +machhws. , 621 +machhws , 621 +machhwsuo. , 622 +machhwsuo , 622 +machhwsu. , 622 +machhwsu , 622 +machhwuo. , 622 +machhwuo , 622 +machhwu. , 622 +machhwu , 622 +maclhwo. , 623 +maclhwo , 623 +maclhw. , 623 +maclhw , 623 +maclhwso. , 623 +maclhwso , 623 +maclhws. , 623 +maclhws , 623 +maclhwsuo. , 624 +maclhwsuo , 624 +maclhwsu. , 624 +maclhwsu , 624 +maclhwuo. , 624 +maclhwuo , 624 +maclhwu. , 624 +maclhwu , 624 +mbar , 727 +mcrf , 66 +mcrfs , 178 +mcrxr , 132 +mfcr , 130 +mfdcr , 931 +mfdcrux , 132 +mfdcrx , 931 +mffs. , 178 +mffs , 178 +mfmsr , 787 +mfocrf , 131 +mfpmr , 1122 +mfspr , 129 +mfsr , 830 +mfsrin , 830 +mftb , 732 +mfvscr , 297 +msgclr , 1102 +msgsnd , 1102 +mtcrf , 130 +mtdcr , 930 +mtdcrux , 132 +mtdcrx , 930 +mtfsb0. , 180 +mtfsb0 , 180 +mtfsb1. , 180 +mtfsb1 , 180 +mtfsf. , 179 +mtfsf , 179 +mtfsfi. , 179 +mtfsfi , 179 +mtmsr , 931 +mtmsr , 785 +mtmsrd , 786 +mtocrf , 131 +mtpmr , 1122 +mtspr , 128 +mtsr , 824 +mtsrin , 824 +mtvscr , 297 +mulchw. , 624 +mulchw , 624 +mulchwu. , 624 +mulchwu , 624 +mulhd. , 99 +mulhd , 99 +mulhdu. , 99 +mulhdu , 99 +mulhhw. , 625 +mulhhw , 625 +mulhhwu. , 625 +mulhhwu , 625 +mulhw. , 95 +mulhw , 95 +mulhwu. , 95 +mulhwu , 95 +mulldo. , 99 +mulldo , 99 +mulld. , 99 +mulld , 99 +mullhw. , 625 +mullhw , 625 +mullhwu. , 625 +mullhwu , 625 +mulli , 95 +mullwo. , 95 +mullwo , 95 +mullw. , 95 +mullw , 95 +nand. , 108 +nand , 108 +nap , 770 +nego. , 94 +nego , 94 +neg. , 94 +neg , 94 +nmacchwo. , 626 +nmacchwo , 626 +nmacchw. , 626 +nmacchw , 626 +nmacchwso. , 626 +nmacchwso , 626 +nmacchws. , 626 +nmacchws , 626 +N , 627 +nmachhwo. , 627 +nmachhwo , 627 +nmachhw. , 627 +nmachhw , 627 +nmachhwso. , 627 +nmachhwso , 627 +nmachhws. , 627 +nmachhws , 627 +nmaclhwo. , 628 +nmaclhwo , 628 +nmaclhw. , 628 +nmaclhw , 628 +nmaclhwso. , 628 +nmaclhwso , 628 +nmaclhws. , 628 +nmaclhws , 628 +nor. , 109 +nor , 109 +or. , 108 +or , 108 +orc. , 109 +orc , 109 +ori , 106 +oris , 107 +popcntb , 111 +popcntd , 113 +popcntw , 111 +prtyd , 112 +prtyw , 112 +rfci , 917 +rfdi , 918 +rfgi , 919 +rfi , 917 +rfid , 768 +rfmci , 918 +rldcl. , 119 +rldcl , 119 +rldcr. , 120 +rldcr , 120 +rldic. , 119 +rldic , 119 +rldicl. , 118 +rldicl , 118 +rldicr. , 118 +rldicr , 118 +rldimi. , 120 +rldimi , 120 +rlwimi. , 117 +rlwimi , 117 +rlwinm. , 115 +rlwinm , 115 +rlwnm. , 116 +rlwnm , 116 +rvwinkle , 77 +sc , 67 +slbfee. , 822 +slbia , 819 +slbie , 818 +slbmfee , 821 +slbmfev , 820 +slbmte , 820 +sld. , 123 +sld , 123 +sleep , 76 +slw. , 121 +slw , 121 +srad. , 124 +srad , 124 +sradi. , 124 +sradi , 124 +sraw. , 122 +sraw , 122 +srawi. , 122 +srawi , 122 +srd. , 123 +srd , 123 +srw. , 121 +srw , 121 +stb , 79 +stbcix , 86 +stbcx. , 124 +stbdx , 41 +bepx , 57 +stbu , 79 +stbux , 79 +stbx , 79 +std , 82 +stdbrx , 84 +stdcix , 86 +stdcx. , 127 +stddx , 41 +stdepx , 58 +stdu , 82 +stdux , 82 +stdx , 82 +stfd , 57 +stfddx , 41 +fdepx , 63 +stfdp , 59 +stfdpx , 59 +stfdu , 57 +stfdux , 57 +stfdx , 57 +stfiwx , 58 +stfs , 56 +stfsu , 56 +stfsux , 56 +stfsx , 56 +sth , 80 +sthbrx , 33 +sthcix , 86 +sthcx. , 125 +sthdx , 737 +sthepx , 937 +sthu , 80 +sthux , 80 +sthx , 80 +stmw , 85 +stq , 779 +stswi , 88 +stswx , 88 +stvebx , 244 +stvehx , 244 +stvepx , 946 +stvepxl , 946 +stvewx , 245 +stvx , 242 +stvxl , 245 +stw , 81 +stwbrx , 83 +stwcix , 778 +stwcx. , 722 +stwdx , 737 +stwepx , 938 +stwu , 81 +stwux , 81 +stwx , 81 +stxsdux , 368 +stxsdx , 368 +stxvd2ux , 368 +stxvd2x , 368 +stxvw4ux , 369 +stxvw4x , 369 +subfo. , 91 +subfo , 91 +subf. , 91 +subf , 91 +subfco. , 92 +subfco , 92 +subfc. , 92 +subfc , 92 +subfeo. , 93 +subfeo , 93 +subfe. , 93 +subfe , 93 +subfic , 92 +subfmeo. , 93 +subfmeo , 93 +subfme. , 93 +subfme , 93 +subfzeo. , 94 +subfzeo , 94 +subfze. , 94 +subfze , 94 +sync , 725 +td , 105 +tdi , 105 +tlbia , 828 +tlbie , 39 +tlbiel , 42 +tlbilx , 1011 +tlbivax , 1009 +tlbre , 1016 +bsrx. , 1015 +bsx , 1013 +tlbsync , 831 +tlbwe , 1018 +tw , 104 +twi , 104 +vaddcuw , 258 +vaddfp , 287 +vaddsbs , 258 +vaddshs , 258 +vaddsws , 258 +vaddubm , 259 +vaddubs , 260 +vadduhm , 259 +vadduhs , 260 +vadduwm , 259 +vadduws , 260 +vand , 282 +vandc , 282 +vavgsb , 273 +vavgsh , 273 +vavgsw , 273 +vavgub , 274 +vavguh , 274 +vavguw , 274 +vcfsx , 291 +vcfux , 291 +vcmpbfp. , 293 +vcmpbfp , 293 +vcmpeqfp. , 293 +vcmpeqfp , 293 +vcmpequb. , 279 +vcmpequb , 279 +vcmpequh. , 279 +vcmpequh , 279 +vcmpequw. , 280 +vcmpequw , 280 +vcmpgefp. , 294 +vcmpgefp , 294 +vcmpgtfp. , 294 +vcmpgtfp , 294 +vcmpgtsb. , 280 +vcmpgtsb , 280 +vcmpgtsh. , 280 +vcmpgtsh , 280 +vcmpgtsw. , 280 +vcmpgtsw , 280 +vcmpgtub. , 281 +vcmpgtub , 281 +vcmpgtuh. , 281 +vcmpgtuh , 281 +vcmpgtuw. , 281 +vcmpgtuw , 281 +vctsxs , 290 +vctuxs , 290 +vexptefp , 295 +vlogefp , 295 +vmaddfp , 288 +vmaxfp , 289 +vmaxsb , 275 +vmaxsh , 275 +vmaxsw , 275 +vmaxub , 276 +vmaxuh , 276 +vmaxuw , 276 +vmhaddshs , 266 +vmhraddshs , 266 +vminfp , 289 +vminsb , 277 +vminsh , 277 +vminsw , 277 +vminub , 278 +vminuh , 278 +vminuw , 278 +vmladduhm , 267 +vmrghb , 252 +vmrghh , 252 +vmrghw , 252 +vmrglb , 253 +vmrglh , 253 +vmrglw , 253 +vmsummbm , 268 +vmsumshm , 268 +vmsumshs , 269 +vmsumubm , 267 +vmsumuhm , 269 +vmsumuhs , 270 +vmulesb , 264 +vmulesh , 264 +vmuleub , 264 +vmuleuh , 264 +vmulosb , 265 +vmulosh , 265 +vmuloub , 265 +vmulouh , 265 +vnmsubfp , 288 +vnor , 282 +vor , 282 +vperm , 255 +vpkpx , 247 +vpkshss , 248 +vpkshus , 248 +vpkswss , 248 +vpkswus , 248 +vpkuhum , 249 +vpkuhus , 249 +vpkuwum , 249 +vpkuwus , 249 +vrefp , 296 +vrfim , 292 +vrfin , 292 +vrfip , 292 +vrfiz , 292 +vrlb , 283 +vrlh , 283 +vrlw , 283 +vrsqrtefp , 296 +vsel , 255 +vsl , 256 +vslb , 284 +vsldoi , 256 +vslh , 284 +vslo , 256 +vslw , 284 +vspltb , 254 +vsplth , 254 +vspltisb , 254 +vspltish , 254 +vspltisw , 254 +vspltw , 254 +vsr , 257 +vsrab , 286 +vsrah , 286 +vsraw , 286 +vsrb , 285 +vsrh , 285 +vsro , 257 +vsrw , 285 +vsubcuw , 261 +vsubfp , 287 +vsubsbs , 261 +vsubshs , 261 +vsubsws , 261 +vsububm , 262 +vsububs , 263 +vsubuhm , 262 +vsubuhs , 262 +vsubuwm , 262 +vsubuws , 263 +vsum2sws , 271 +vsum4sbs , 272 +vsum4shs , 272 +vsum4ubs , 272 +vsumsws , 271 +vupkhpx , 250 +vupkhsb , 250 +vupkhsh , 250 +vupklpx , 251 +vupklsb , 251 +vupklsh , 251 +vxor , 282 +wait , 728 +wrtee , 932 +wrteei , 933 +xor. , 108 +xor , 108 +xori , 107 +xoris , 107 +xsabsdp , 369 +xsadddp , 370 +xscmpodp , 375 +xscmpudp , 377 +xscpsgndp , 379 +xscvdpsp , 380 +xscvdpsxds , 381 +xscvdpsxws , 383 +xscvdpuxds , 385 +xscvdpuxws , 387 +xscvspdp , 389 +xscvsxddp , 389 +xscvuxddp , 390 +xsdivdp , 391 +xsmaddadp , 393 +xsmaddmdp , 393 +xsmaxdp , 396 +xsmindp , 398 +xsmsubadp , 400 +xsmsubmdp , 400 +xsmuldp , 403 +xsnabsdp , 405 +xsnegdp , 405 +xsnmaddadp , 406 +xsnmaddmdp , 406 +xsnmsubadp , 411 +xsnmsubmdp , 411 +xsrdpi , 414 +xsrdpic , 415 +xsrdpim , 416 +xsrdpip , 416 +xsrdpiz , 417 +xsredp , 418 +xsrsqrtedp , 419 +xssqrtdp , 420 +xssubdp , 421 +xstdivdp , 423 +xstsqrtdp , 424 +xvabsdp , 425 +xvabssp , 425 +xvadddp , 426 +xvaddsp , 430 +xvcmpeqdp , 432 +xvcmpeqdp. , 432 +xvcmpeqsp , 433 +xvcmpeqsp. , 433 +xvcmpgedp , 434 +xvcmpgedp. , 434 +xvcmpgesp , 435 +xvcmpgesp. , 435 +xvcmpgtdp , 436 +xvcmpgtdp. , 436 +xvcmpgtsp , 437 +xvcmpgtsp. , 437 +xvcpsgndp , 438 +xvcpsgnsp , 438 +xvcvdpsp , 439 +xvcvdpsxds , 440 +xvcvdpsxws , 442 +xvcvdpuxds , 444 +xvcvdpuxws , 446 +xvcvspdp , 448 +xvcvspsxds , 449 +xvcvspsxws , 451 +xvcvspuxds , 453 +xvcvspuxws , 455 +xvcvsxddp , 457 +xvcvsxdsp , 457 +xvcvsxwdp , 458 +xvcvsxwsp , 458 +xvcvuxddp , 459 +xvcvuxdsp , 459 +xvcvuxwdp , 460 +xvcvuxwsp , 460 +xvdivdp , 461 +xvdivsp , 463 +xvmaddadp , 465 +xvmaddasp , 465 +xvmaddmdp , 468 +xvmaddmsp , 468 +xvmaxdp , 471 +xvmaxsp , 473 +xvmindp , 475 +xvminsp , 477 +xvmsubadp , 479 +xvmsubasp , 479 +xvmsubmdp , 482 +xvmsubmsp , 482 +xvmuldp , 485 +xvmulsp , 487 +xvnabsdp , 489 +xvnabssp , 489 +xvnegdp , 490 +xvnegsp , 490 +xvnmaddadp , 491 +xvnmaddasp , 491 +xvnmaddmdp , 496 +xvnmaddmsp , 496 +xvnmsubadp , 499 +xvnmsubasp , 499 +xvnmsubmdp , 502 +xvnmsubmsp , 502 +xvrdpi , 505 +xvrdpic , 506 +xvrdpim , 506 +xvrdpip , 507 +xvrdpiz , 507 +xvredp , 508 +xvresp , 509 +xvrspi , 510 +xvrspic , 510 +xvrspim , 511 +xvrspip , 511 +xvrspiz , 512 +xvrsqrtedp , 513 +xvrsqrtesp , 514 +xvsqrtdp , 515 +xvsqrtsp , 516 +xvsubdp , 517 +xvsubsp , 519 +xvtdivdp , 521 +xvtdivsp , 522 +xvtsqrtdp , 523 +xvtsqrtsp , 523 +xxland , 524 +xxlandc , 524 +xxlnor , 525 +xxlor , 525 +xxlxor , 526 +xxmrghw , 527 +xxmrglw , 527 +xxpermdi , 528 +xxsel , 528 +xxsldwi , 529 +xxspltw , 529 +@PowerISA_V2.07B.pdf [Power ISA Version 2.07 B April 9, 2015] +e_b,1307 +e_bl,1307 +se_b,1307 +se_bl,1307 +e_bc,1307 +e_bcl,1307 +se_bc,1307 +se_bctr,1308 +se_bctrl,1308 +se_blr,1308 +se_blrl,1308 +se_sc,1309 +e_sc,1309 +se_illegal,1310 +se_rfmci,1310 +se_rfci,1311 +se_rfi,1311 +se_rfdi,1312 +se_rfgi,1312 +e_crand,1313 +e_crandc,1313 +e_creqv,1313 +e_crnand,1313 +e_crnor,1314 +e_cror,1314 +e_crorc,1314 +e_crxor,1314 +e_mcrf,1314 +e_lbz,1317 +se_lbz,1317 +e_lbzu,1317 +e_lha,1317 +e_lhz,1317 +se_lhz,1317 +e_lhau,1318 +e_lhzu,1318 +e_lwz,1318 +se_lwz,1318 +e_lwzu,1319 +e_stb,1320 +se_stb,1320 +e_stbu,1321 +e_sth ,1321 +se_sth,1321 +e_sthu,1321 +e_stw,1322 +se_stw,1322 +e_stwu,1322 +e_lmw,1323 +e_stmw,1323 +se_add,1325 +e_add16i,1325 +e_add2i.,1325 +e_add2is,1325 +e_addi,1325 +e_addi.,1325 +se_addi,1325 +e_addic,1326 +e_addic.,1326 +se_sub,1326 +se_subf,1326 +e_subfic,1326 +e_subfic.,1326 +se_subi,1326 +se_subi.,1326 +e_mulli,1327 +e_mull2i.,1327 +se_mullw,1327 +se_neg,1327 +se_btsti,1328 +e_cmp16i.,1328 +e_cmpi,1329 +se_cmp,1329 +se_cmpi,1329 +e_cmpl16i.,1329 +e_cmpli,1330 +se_cmpl,1330 +se_cmpli,1330 +e_cmph,1330 +se_cmph,1331 +e_cmph16i.,1331 +e_cmphl,1331 +se_cmphl,1331 +e_cmphl16i.,1332 +e_and2i.,1333 +e_and2is.,1333 +e_andi,1333 +e_andi.,1333 +se_andi,1333 +e_or2i,1334 +e_or2is,1334 +e_ori,1334 +e_ori.,1334 +e_xori,1334 +e_xori.,1334 +se_and,1334 +se_and.,1334 +se_andc,1334 +se_or,1335 +se_not,1335 +se_bclri,1335 +se_bgeni,1335 +se_bmaski,1335 +se_bseti,1335 +se_extsb,1336 +se_extsh,1336 +se_extzb,1336 +se_extzh,1336 +e_li,1336 +se_li,1336 +e_lis,1336 +se_mfar,1337 +se_mr,1337 +se_mtar,1337 +e_rlw,1338 +e_rlw.,1338 +e_rlwi,1338 +e_rlwi.,1338 +e_rlwimi,1338 +e_rlwinm,1338 +e_slwi,1339 +e_slwi.,1339 +se_slwi,1339 +se_slw,1339 +se_srawi,1339 +se_sraw,1340 +e_srwi,1340 +e_srwi.,1340 +se_srwi,1340 +se_srw,1340 +se_mfctr,1341 +se_mtctr,1341 +se_mflr,1341 +se_mtlr,1341 +se_isync,1342 +@PowerISA_V3.0.pdf [Power ISA Version 3.0 Novomber 30, 2015] +bctar,58 +bctarl,58 +clrbhrb,62 +fmrgew,169 +fmrgow,170 +lqarx,893 +mfbhrbe,62 +msgclrp,1144 +msgsndp,1143 +rfebb,927 +stqcx.,894 +tabort.,913 +tabortdc.,915 +tabortdci.,915 +tabortwc.,914 +tabortwci.,914 +tbegin.,911 +tcheck,916 +tend.,912 +trechkpt.,988 +treclaim.,987 +tsr.,916 +addpcis,87 +cmpeqb,106 +cmprb,105 +cnttzw,113 +cnttzw.,113 +cnttzd,116 +cnttzd.,116 +copy,876 +cp_abort,878 +darn,97 +dtstsfi,222 +dtstsfiq,222 +extswsli,127 +extswsli.,127 +ldat,882 +ldmx,72 +lwat,882 +maddhd,99 +maddhdu,99 +maddld,99 +mcrxrx,137 +modsd,102 +modsw,94 +modud,102 +moduw,94 +msgsync,1144 +paste,877 +paste.,877 +setb,139 +slbieg,1043 +slbsync,1049 +stdat,884 +stop,975 +stwat,884 +wait,898 +bcdadd.,369 +bcdsub.,369 +vaddcuq,293 +vaddecuq,293 +vaddeuqm,293 +vaddudm,290 +vadduqm,290 +vbpermq,367 +vcipher,354 +vcipherlast,354 +vclzb,361 +vclzd,361 +vclzh,361 +vclzw,361 +vcmpequd,325 +vcmpequd.,325 +vcmpgtsd,326 +vcmpgtsd.,326 +vcmpgtud,328 +vcmpgtud.,328 +veqv,333 +vgbbd,360 +vmaxsd,320 +vmaxud,320 +vminsd,322 +vminud,322 +vmrgew,277 +vmrgow,277 +vmulesw,303 +vmuleuw,303 +vmulosw,303 +vmulouw,303 +vmuluwm,304 +vnand,333 +vncipher,355 +vncipherlast,355 +vorc,334 +vpermxor,359 +vpksdss,268 +vpksdus,269 +vpkudum,271 +vpkudus,271 +vpmsumb,357 +vpmsumd,357 +vpmsumh,358 +vpmsumw,358 +vpopcntb,366 +vpopcntd,366 +vpopcnth,366 +vpopcntw,366 +vrld,336 +vsbox,355 +vshasigmad,356 +vshasigmaw,356 +vsld,337 +vsrad,339 +vsrd,338 +vsubcuq,299 +vsubecuq,299 +vsubeuqm,299 +vsubudm,297 +vsubuqm,299 +vupkhsw,274 +vupklsw,274 +bcdcfn.,370 +bcdcfsq.,374 +bcdcfz.,371 +bcdcpsgn.,376 +bcdctn.,372 +bcdctsq.,374 +bcdctz.,373 +bcds.,377 +bcdsetsgn.,376 +bcdsr.,379 +bcdtrunc.,380 +bcdus.,378 +bcdutrunc.,381 +vabsdub,318 +vabsduh,318 +vabsduw,319 +vbpermd,367 +vclzlsbb,363 +vcmpneb,330 +vcmpneb.,330 +vcmpneh,331 +vcmpneh.,331 +vcmpnew,332 +vcmpnew.,332 +vcmpnezb,330 +vcmpnezb.,330 +vcmpnezh,331 +vcmpnezh.,331 +vcmpnezw,332 +vcmpnezw.,332 +vctzb,362 +vctzh,362 +vctzd,362 +vctzlsbb,363 +vctzw,362 +vextractd,287 +vextractub,287 +vextractuh,287 +vextractuw,287 +vextsb2d,314 +vextsb2w,314 +vextsh2d,314 +vextsh2w,314 +vextsw2d,315 +vextublx,364 +vextubrx,364 +vextuhlx,364 +vextuhrx,364 +vextuwlx,365 +vextuwrx,365 +vinsertb,288 +vinsertd ,288 +vinserth,288 +vinsertw,288 +vmul10cuq,375 +vmul10ecuq,375 +vmul10euq,375 +vmul10uq,375 +vnegd,313 +vnegw,313 +vpermr,280 +vprtybd,335 +vprtybq,335 +vprtybw,335 +vrldmi,341 +vrldnm,341 +vrlwmi,340 +vrlwnm,340 +vslv,285 +vsrv,285 +lxsiwax,502 +lxsiwzx,503 +lxsspx,504 +mfvsrd,129 +mfvsrwz,130 +mtvsrd,131 +mtvsrwa,131 +mtvsrwz,132 +stxsiwx,519 +stxsspx,521 +xsaddsp,537 +xscvdpspn,557 +xscvspdpn,578 +xscvsxdsp,579 +xscvuxdsp,581 +xsdivsp,586 +xsmaddasp,593 +xsmaddmsp,593 +xsmsubasp,614 +xsmsubmsp,614 +xsmulsp,624 +xsnmaddasp,633 +xsnmaddmsp,633 +xsnmsubasp,642 +xsnmsubmsp,642 +xsresp,653 +xsrsp,658 +xsrsqrtesp,660 +xssqrtsp,664 +xssubsp,669 +xxleqv,790 +xxlnand,790 +xxlorc,791 +lxsd,499 +lxsibzx,501 +lxsihzx,501 +lxssp,504 +lxv,511 +lxvx,511 +lxvb16x,506 +lxvh8x,514 +lxvl,508 +lxvll,510 +lxvwsx,516 +mfvsrld,129 +mtvsrdd,132 +mtvsrws,133 +stxsd,517 +stxsibx,518 +stxsihx,518 +stxssp.502 +stxv,526 +stxvb16x,522 +stxvh8x,524 +stxvl,526 +stxvll,528 +stxvx,529 +xsabsqp,531 +xsaddqp,539 +xsaddqpo,539 +xscmpeqdp,543 +xscmpexpdp,541 +xscmpexpqp,542 +xscmpgedp,544 +xscmpgtdp,545 +xscmpnedp,546 +xscmpoqp,549 +xscmpuqp,552 +xscpsgnqp,553 +xscvdphp,554 +xscvdpqp,555 +xscvhpdp,566 +xscvqpdp,567 +xscvqpdpo,567 +xscvqpsdz,568 +xscvqpswz,570 +xscvqpudz,572 +xscvqpuwz,574 +xscvsdqp,576 +xscvudqp,580 +xsdivqp,584 +xsdivqpo,584 +xsiexpdp,588 +xsiexpqp,589 +xsmaddqp,596 +xsmaddqpo,596 +xsmaxcdp,601 +xsmaxjdp,603 +xsmincdp,607 +xsminjdp,609 +xsmsubqp,617 +xsmsubqpo,617 +xsmulqp,622 +xsmulqpo,622 +xsnabsqp,626 +xsnegqp,627 +xsnmaddqp,636 +xsnmaddqpo,636 +xsnmsubqp,645 +xsnmsubqpo,645 +xsrqpi,654 +xsrqpix,654 +xsrqpxp,656 +xssqrtqp,662 +xssqrtqpo,662 +xssubqp,667 +xssubqpo,667 +xststdcdp,673 +xststdcqp,674 +xststdcsp,675 +xsxexpdp,676 +xsxexpqp,676 +xsxsigdp,677 +xsxsigqp,677 +xvcmpnedp,691 +xvcmpnedp.,691 +xvcmpnesp,692 +xvcmpnesp.,692 +xvcvhpsp,703 +xvcvsphp,705 +xviexpdp,722 +xviexpsp,722 +xvtstdcdp,782 +xvtstdcsp,783 +xvxexpdp,784 +xvxexpsp,784 +xvxsigdp,785 +xvxsigsp,785 +xxbrd,786 +xxbrh,786 +xxbrq,787 +xxbrw,787 +xxextractuw,788 +xxinsertw,788 +xxperm,794 +xxpermr,794 +xxspltib,796 diff --git a/src/third-party/sleigh/processors/PowerPC/data/manuals/PowerPC.idx b/src/third-party/sleigh/processors/PowerPC/data/manuals/PowerPC.idx new file mode 100644 index 00000000..985fc2e3 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/manuals/PowerPC.idx @@ -0,0 +1,615 @@ +@powerpc.pdf[PowerPC� Microprocessor Family: The Programming Environments Manual for 32 and 64-bit Microprocessors, Version 2.3, March 31, 2005] +add , 353 +add. , 353 +addo , 353 +addo. , 353 +addc , 355 +addc. , 355 +addco , 355 +addco. , 355 +adde , 356 +adde. , 356 +addeo , 356 +addeo. , 356 +addi , 357 +addic , 358 +addic. , 358 +addis , 360 +addme , 361 +addme. , 361 +addmeo , 361 +addmeo. , 361 +addze , 362 +addze. , 362 +addzeo , 362 +addzeo. , 362 +and , 363 +and. , 363 +andc , 364 +andc. , 364 +andi. , 365 +andis. , 366 +b , 367 +ba , 367 +bl , 367 +ble, 367 +bla , 367 +bc , 368 +bca , 368 +bcl , 368 +bcla , 368 +bcctr , 369 +bcctrl , 369 +bclr , 370 +bclrl , 370 +bdnzlr , 370 +bltctr , 369 +bltlr , 370 +bnectr , 369 +bnelr , 370 +clrldi , 534 +clrlsldi , 533 +clrlslwi , 538 +clrlwi , 538 +clrrdi , 535 +clrrwi , 538 +cmp , 371 +cmpd , 371 +cmpdi , 372 +cmpi , 372 +cmpl , 373 +cmpld , 373 +cmpldi , 374 +cmpli , 374 +cmplw , 373 +cmplwi , 374 +cmpw , 371 +cmpwi , 372 +cntlzd , 375 +cntlzd. , 375 +cntlzw , 376 +cntlzw. , 376 +crand , 377 +crandc , 378 +crclr , 384 +creqv , 379 +crmove , 382 +crnand , 380 +crnor , 381 +crnot , 381 +cror , 382 +crorc , 383 +crset , 379 +crxor , 384 +dcba , 721 +dcbf , 385 +dcbi , 386 +dcbst , 387 +dcbt , 388 +dcbtst , 390 +dcbz , 391 +divd , 393 +divd. , 393 +divdo , 393 +divdo. , 393 +divdu , 394 +divdu. , 394 +divduo , 394 +divduo. , 394 +divw , 395 +divw. , 395 +divwo , 395 +divwo. , 395 +divwu , 396 +divwu. , 396 +divwuo , 396 +divwuo. , 396 +eciwx , 397 +ecowx , 398 +eieio , 399 +eqv , 401 +eqv. , 401 +extldi , 535 +extlwi , 538 +extrdi , 534 +extrwi , 538 +extsb , 402 +extsb. , 402 +extsh , 403 +extsh. , 403 +extsw , 404 +extsw. , 404 +fabs , 405 +fabs. , 405 +fadd , 406 +fadd. , 406 +fadds , 407 +fadds. , 407 +fcfid , 408 +fcfid. , 408 +fcmpo , 409 +fcmpu , 410 +fctid , 411 +fctid. , 411 +fctidz , 412 +fctidz. , 412 +fctiw , 413 +fctiw. , 413 +fctiwz , 414 +fctiwz. , 414 +fdiv , 415 +fdiv. , 415 +fdivs , 416 +fdivs. , 416 +fmadd , 417 +fmadd. , 417 +fmadds , 418 +fmadds. , 418 +fmr , 419 +fmr. , 419 +fmsub , 420 +fmsub. , 420 +fmsubs , 421 +fmsubs. , 421 +fmul , 422 +fmul. , 422 +fmuls , 423 +fmuls. , 423 +fnabs , 424 +fnabs. , 424 +fneg , 425 +fneg. , 425 +fnmadd , 426 +fnmadd. , 426 +fnmadds , 427 +fnmadds., 427 +fnmsub , 428 +fnmsub. , 428 +fnmsubs , 429 +fnmsubs., 429 +fres , 430 +fres. , 430 +frsp , 432 +frsp. , 432 +frsqrte , 433 +frsqrte., 433 +fsel , 435 +fsel. , 435 +fsqrt , 436 +fsqrt. , 436 +fsqrts , 437 +fsqrts. , 437 +fsub , 438 +fsub. , 438 +fsubs , 439 +fsubs. , 439 +icbi , 440 +inslwi , 537 +insrdi , 536 +insrwi , 537 +isync , 441 +la , 357 +lbz , 442 +lbzu , 443 +lbzux , 444 +lbzx , 445 +ld , 446 +ldarx , 447 +ldu , 448 +ldux , 449 +ldx , 450 +lfd , 451 +lfdu , 452 +lfdux , 453 +lfdx , 454 +lfs , 455 +lfsu , 456 +lfsux , 457 +lfsx , 458 +lha , 459 +lhau , 460 +lhaux , 461 +lhax , 462 +lhbrx , 463 +lhz , 464 +lhzu , 465 +lhzux , 466 +lhzx , 467 +li , 357 +lis, 360 +lmw , 468 +lswi , 469 +lswx , 471 +lwa , 763 +lwarx , 474 +lwaux , 475 +lwax , 476 +lwbrx , 477 +lwz , 478 +lwzu , 479 +lwzux , 480 +lwzx , 481 +mcrf , 482 +mcrfs , 483 +mcrxr , 484 +mfcr , 485 +mfctr , 489 +mffs , 487 +mffs. , 487 +mflr , 489 +mfmsr , 488 +mfocrf , 486 +mfspr , 489 +mfsr , 492 +mfsrin , 494 +mftb , 496 +mftbu , 497 +mfxer , 489 +mr , 525 +mtcr , 498 +mtcrf , 498 +mtctr , 507 +mtfsb0 , 499 +mtfsb0. , 499 +mtfsb1 , 500 +mtfsb1. , 500 +mtfsf , 501 +mtfsf. , 501 +mtfsfi , 502 +mtfsfi. , 502 +mtlr , 507 +mtmsr , 503 +mtmsrd , 505 +mtocrf , 506 +mtspr , 507 +mtsr , 511 +mtsrd , 512 +mtsrdin , 513 +mtsrin , 514 +mtxer , 507 +mulhd , 515 +mulhd. , 515 +mulhdu , 516 +mulhdu. , 516 +mulhw , 517 +mulhw. , 517 +mulhwu , 518 +mulhwu. , 518 +mulld , 519 +mulld. , 519 +mulldo , 519 +mulldo. , 519 +mulli , 520 +mullw , 521 +mullw. , 521 +mullwo , 521 +mullwo. , 521 +nand , 522 +nand. , 522 +neg , 523 +neg. , 523 +nego , 523 +nego. , 523 +nop , 527 +nor , 524 +nor. , 524 +not , 524 +or , 525 +or. , 525 +orc , 526 +orc. , 526 +ori , 527 +oris , 528 +rfi , 529 +rfid , 530 +rldcl , 531 +rldcl. , 531 +rldcr , 532 +rldcr. , 532 +rldic , 533 +rldic. , 533 +rldicl , 534 +rldicl. , 534 +rldicr , 535 +rldicr. , 535 +rldimi , 536 +rldimi. , 536 +rlwimi , 537 +rlwimi. , 537 +rlwinm , 538 +rlwinm. , 538 +rlwnm , 540 +rlwnm. , 540 +rotld , 531 +rotldi , 534 +rotlw , 540 +rotlwi , 538 +rotrdi , 534 +rotrwi , 538 +sc , 541 +slbia , 542 +slbie , 543 +slbmfee , 544 +slbmfev , 545 +slbmte , 546 +sld , 547 +sld. , 547 +sldi , 535 +slw , 548 +slw. , 548 +slwi , 538 +srad , 549 +srad. , 549 +sradi , 550 +sradi. , 550 +sraw , 551 +sraw. , 551 +srawi , 552 +srawi. , 552 +srd , 553 +srd. , 553 +srdi , 534 +srw , 554 +srw. , 554 +srwi , 538 +stb , 555 +stbu , 556 +stbux , 557 +stbx , 558 +std , 559 +stdcx. , 560 +stdu , 562 +stdux , 563 +stdx , 564 +stfd , 565 +stfdu , 566 +stfdux , 567 +stfdx , 568 +stfiwx , 569 +stfs , 570 +stfsu , 571 +stfsux , 572 +stfsx , 573 +sth , 574 +sthbrx , 575 +sthu , 576 +sthux , 577 +sthx , 578 +stmw , 579 +stswi , 580 +stswx , 581 +stw , 582 +stwbrx , 583 +stwcx. , 584 +stwu , 586 +stwux , 587 +stwx , 588 +sub , 589 +subc , 590 +subf , 589 +subf. , 589 +subfo , 589 +subfo. , 589 +subfc , 590 +subfc. , 590 +subfco , 590 +subfco. , 590 +subfe , 591 +subfe. , 591 +subfeo , 591 +subfeo. , 591 +subfic , 592 +subfme , 593 +subfme. , 593 +subfmeo , 593 +subfmeo., 593 +subfze , 594 +subfze. , 594 +subfzeo , 594 +subfzeo., 594 +subi , 357 +subic , 358 +subic. , 359 +subis , 360 +sync , 595 +td , 597 +tdge , 597 +tdi , 598 +tdlnl , 597 +tdlti , 598 +tdnei , 598 +tlbia , 599 +tlbie , 600 +tlbiel , 601 +tlbsync , 603 +trap , 604 +tw , 604 +tweq , 604 +twgti , 605 +twi , 605 +twlge , 604 +twllei , 605 +xor , 606 +xor. , 606 +xori , 607 +xoris , 608 + + +@altivecpem.pdf [AltiVec Technology Programming Environments Manual, Rev.0.1 11/1998 (ALTIVECPEM/D)] + + +dss , 131 +dssall , 131 +dst , 132 +dstt , 132 +dstst , 134 +dststt , 134 +lvebx , 136 +lvehx , 138 +lvewx , 139 +lvsl , 140 +lvsr , 142 +lvx , 144 +lvxl , 145 +mfvscr , 146 +mtvscr , 147 +stvebx , 148 +stvehx , 149 +stvewx , 150 +stvx , 151 +stvxl , 152 +vaddcuw , 153 +vaddfp , 154 +vaddsbs , 155 +vaddshs , 156 +vaddsws , 157 +vaddubm , 158 +vaddubs , 159 +vadduhm , 160 +vadduhs , 161 +vadduwm , 162 +vadduws , 163 +vand , 164 +vandc , 165 +vavgsb , 166 +vavgsh , 167 +vavgsw , 168 +vavgub , 169 +vavguh , 170 +vavguw , 171 +vcfsx , 172 +vcfux , 173 +vcmpbfp , 174 +vcmpbfp. , 174 +vcmpeqfp , 176 +vcmpeqfp. , 176 +vcmpequb , 177 +vcmpequb. , 177 +vcmpequh , 178 +vcmpequh. , 178 +vcmpequw , 179 +vcmpequw. , 179 +vcmpgefp , 180 +vcmpgefp. , 180 +vcmpgtfp , 181 +vcmpgtfp. , 181 +vcmpgtsb , 182 +vcmpgtsb. , 182 +vcmpgtsh , 183 +vcmpgtsh. , 183 +vcmpgtsw , 184 +vcmpgtsw. , 184 +vcmpgtub , 185 +vcmpgtub. , 185 +vcmpgtuh , 186 +vcmpgtuh. , 186 +vcmpgtuw , 187 +vcmpgtuw. , 187 +vctsxs , 188 +vctuxs , 189 +vexptefp , 190 +vlogefp , 192 +vmaddfp , 194 +vmaxfp , 195 +vmaxsb , 196 +vmaxsh , 197 +vmaxsw , 198 +vmaxub , 199 +vmaxuh , 200 +vmaxuw , 201 +vmhaddshs , 202 +vmhraddshs , 203 +vminfp , 204 +vminsb , 205 +vminsh , 206 +vminsw , 207 +vminub , 208 +vminuh , 209 +vminuw , 210 +vmladduhm , 211 +vmrghb , 212 +vmrghh , 213 +vmrghw , 214 +vmrglb , 215 +vmrglh , 216 +vmrglw , 217 +vmsummbm , 218 +vmsumshm , 219 +vmsumshs , 220 +vmsumubm , 221 +vmsumuhm , 222 +vmsumuhs , 223 +vmulesb , 224 +vmulesh , 225 +vmuleub , 226 +vmuleuh , 227 +vmulosb , 228 +vmulosh , 229 +vmuloub , 230 +vmulouh , 231 +vnmsubfp , 232 +vnor , 233 +vor , 234 +vperm , 235 +vpkpx , 236 +vpkshss , 237 +vpkshus , 238 +vpkswss , 239 +vpkswus , 240 +vpkuhum , 241 +vpkuhus , 242 +vpkuwum , 243 +vpkuwus , 244 +vrefp , 245 +vrfim , 247 +vrfin , 248 +vrfip , 249 +vrfiz , 250 +vrlb , 251 +vrlh , 252 +vrlw , 253 +vrsqrtefp , 254 +vsel , 256 +vsl , 257 +vslb , 258 +vsldoi , 259 +vslh , 260 +vslo , 261 +vslw , 262 +vspltb , 263 +vsplth , 264 +vspltisb , 265 +vspltish , 266 +vspltisw , 267 +vspltw , 268 +vsr , 269 +vsrab , 271 +vsrah , 272 +vsraw , 273 +vsrb , 274 +vsrh , 275 +vsro , 276 +vsrw , 277 +vsubcuw , 278 +vsubfp , 279 +vsubsbs , 280 +vsubshs , 281 +vsubsws , 282 +vsububm , 283 +vsububs , 284 +vsubuhm , 285 +vsubuhs , 286 +vsubuwm , 287 +vsubuws , 288 +vsumsws , 289 +vsum2sws , 290 +vsum4sbs , 291 +vsum4shs , 292 +vsum4ubs , 293 +vupkhpx , 294 +vupkhsb , 295 +vupkhsh , 296 +vupklpx , 297 +vupklsb , 298 +vupklsh , 299 +vxor , 300 diff --git a/src/third-party/sleigh/processors/PowerPC/data/patterns/PPC_BE_patterns.xml b/src/third-party/sleigh/processors/PowerPC/data/patterns/PPC_BE_patterns.xml new file mode 100644 index 00000000..2d640dbb --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/patterns/PPC_BE_patterns.xml @@ -0,0 +1,34 @@ + + + + 0x4e800020 + 010010.. 0x.. 0x.. ......00 + + + 10010100 00100001 11...... .....000 + 0x7c2c0b78 0x38 0x21 ........ ........ 0x91810000 + + + + + + + 010010.. 0x.. 0x.. ......00 + + + 10010100 00100001 11...... .....000 011111.. ...01000 00000010 10100110 + 10010100 00100001 11...... .....000 0x........ 011111.. ...01000 00000010 10100110 + 10010100 00100001 11...... .....000 0x........ 0x........ 011111.. ...01000 00000010 10100110 + 0x7c2c0b78 0x38 0x21 ........ ........ 0x91810000 + + + + + + + 10010100 00100001 11...... .....000 011111.. ...01000 00000010 10100110 + + + + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/patterns/patternconstraints.xml b/src/third-party/sleigh/processors/PowerPC/data/patterns/patternconstraints.xml new file mode 100644 index 00000000..eb73db5d --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/patterns/patternconstraints.xml @@ -0,0 +1,5 @@ + + + PPC_BE_patterns.xml + + diff --git a/src/third-party/sleigh/processors/PowerPC/data/ppc64-r2CallStubs.xml b/src/third-party/sleigh/processors/PowerPC/data/ppc64-r2CallStubs.xml new file mode 100644 index 00000000..5be83ca9 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/data/ppc64-r2CallStubs.xml @@ -0,0 +1,155 @@ + + + + + 0x3d82.... # addis r12,r2,0x#### + 0xf84100.. # std r2,0x##(r1) + 0xe96c.... # ld r11,0x####(r12) + 0x7d6903a6 # mtspr CTR,r11 + 0xe84c.... # ld r2,0x####(r12) + 0xe96c.... # ld r11,0x####(r12) + 0x4e800420 # bctr + + + + + 0xf84100.. # std r2,0x##(r1) + 0xe962.... # ld r11,0x####(r2) + 0x7d6903a6 # mtspr CTR,r11 + 0xe962.... # ld r11,0x####(r2) + 0xe842.... # ld r2,0x####(r2) + 0x4e800420 # bctr + + + + + 0xf84100.. # std r2,0x##(r1) + 0xe982.... # ld r12,0x####(r2) + 0x7d8903a6 # mtspr CTR,r12 + 0x4e800420 # bctr + + + + + + 0x3d62.... # addis r11,r2,0x#### + 0xe98b.... # ld r12,0x####(r11) + 0x7d8903a6 # mtspr CTR,r12 + 0xe84b.... # ld r2,0x####(r11) + 0x4e800420 # bctr + + + + + 0x3d62.... # addis r11,r2,0x#### + 0xe98b.... # ld r12,0x####(r11) + 0x396b.... # addi r11,r11,0x#### + 0x7d8903a6 # mtspr CTR,r12 + 0xe84b.... # ld r2,0x####(r11) + 0xe96b.... # ld r11,0x####(r11) + 0x4e800420 # bctr + + + + + 0x3d62.... # addis r11,r2,0x#### + 0xe98b.... # ld r12,0x####(r11) + 0x7d8903a6 # mtspr CTR,r12 + 0x7d826278 # xor r2,r12,r12 + 0x7d6b1214 # add r11,r11,r2 + 0xe84b.... # ld r2,0x####(r11) + 0x4e800420 # bctr + + + + + 0x3d62.... # addis r11,r2,0x#### + 0xe98b.... # ld r12,0x####(r11) + 0x396b.... # addi r11,r11,0x#### + 0x7d8903a6 # mtspr CTR,r12 + 0x7d826278 # xor r2,r12,r12 + 0x7d6b1214 # add r11,r11,r2 + 0xe84b.... # ld r2,0x####(r11) + 0xe96b.... # ld r11,0x####(r11) + 0x4e800420 # bctr + + + + + 0xf84100.. # std r2,0x##(r1) + 0x3d62.... # addis r11,r2,0x#### + 0xe98b.... # ld r12,0x####(r11) + 0x7d8903a6 # mtspr CTR,r12 + 0xe84b.... # ld r2,0x####(r11) + 0x4e800420 # bctr + + + + + 0xf84100.. # std r2,0x##(r1) + 0x3d62.... # addis r11,r2,0x#### + 0xe98b.... # ld r12,0x####(r11) + 0x396b.... # addi r11,r11,0x#### + 0x7d8903a6 # mtspr CTR,r12 + 0xe84b.... # ld r2,0x####(r11) + 0xe96b.... # ld r11,0x####(r11) + 0x4e800420 # bctr + + + + + 0xf84100.. # std r2,0x##(r1) + 0x3d62.... # addis r11,r2,0x#### + 0xe98b.... # ld r12,0x####(r11) + 0x7d8903a6 # mtspr CTR,r12 + 0x7d826278 # xor r2,r12,r12 + 0x7d6b1214 # add r11,r11,r2 + 0xe84b.... # ld r2,0x####(r11) + 0x4e800420 # bctr + + + + + 0xf84100.. # std r2,0x##(r1) + 0x3d62.... # addis r11,r2,0x#### + 0xe98b.... # ld r12,0x####(r11) + 0x396b.... # addi r11,r11,0x#### + 0x7d8903a6 # mtspr CTR,r12 + 0x7d826278 # xor r2,r12,r12 + 0x7d6b1214 # add r11,r11,r2 + 0xe84b.... # ld r2,0x####(r11) + 0xe96b.... # ld r11,0x####(r11) + 0x4e800420 # bctr + + + + + + 0x3d820000 # addis r12,r2,0x#### + 0xe98c0000 # ld r12,0x####(r12) + 0x7d8903a6 # mtspr CTR,r12 + 0x4e800420 # bctr + + + + + 0xf8410000 # std r2,0x####(r1) + 0x3d820000 # addis r12,r2,0x#### + 0xe98c0000 # ld r12,0x####(r12) + 0x7d8903a6 # mtspr CTR,r12 + 0x4e800420 # bctr + + + diff --git a/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/plugin/core/analysis/PPC64CallStubAnalyzer.java b/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/plugin/core/analysis/PPC64CallStubAnalyzer.java new file mode 100644 index 00000000..842c6a9a --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/plugin/core/analysis/PPC64CallStubAnalyzer.java @@ -0,0 +1,388 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.plugin.core.analysis; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; + +import org.xml.sax.SAXException; + +import generic.jar.ResourceFile; +import ghidra.app.cmd.disassemble.DisassembleCommand; +import ghidra.app.cmd.function.CreateFunctionCmd; +import ghidra.app.cmd.function.CreateThunkFunctionCmd; +import ghidra.app.services.*; +import ghidra.app.util.importer.MessageLog; +import ghidra.framework.Application; +import ghidra.program.model.address.*; +import ghidra.program.model.lang.*; +import ghidra.program.model.listing.*; +import ghidra.program.model.mem.Memory; +import ghidra.program.model.mem.MemoryAccessException; +import ghidra.program.model.symbol.RefType; +import ghidra.program.model.symbol.SourceType; +import ghidra.program.util.*; +import ghidra.util.Msg; +import ghidra.util.bytesearch.*; +import ghidra.util.exception.*; +import ghidra.util.task.TaskMonitor; + +public class PPC64CallStubAnalyzer extends AbstractAnalyzer { + + private static final String NAME = "PPC64 ELF Call Stubs"; + private static final String DESCRIPTION = "Detect ELF Call Stubs and create thunk function"; + private static final String PROCESSOR_NAME = "PowerPC"; + + private static final String CALL_STUB_PATTERN_FILE = "ppc64-r2CallStubs.xml"; + + private static final String UNKNOWN_FUNCTION_NAME = "___UNKNOWN_CALL_STUB___"; + + private static boolean patternLoadFailed; + private static ArrayList beCallStubPatterns; + private static ArrayList leCallStubPatterns; + private static int maxPatternLength; + + private Register r2Reg; + private Register ctrReg; + + public PPC64CallStubAnalyzer() { + super(NAME, DESCRIPTION, AnalyzerType.FUNCTION_ANALYZER); + setDefaultEnablement(true); + setPriority(AnalysisPriority.FUNCTION_ANALYSIS.before()); + } + + @Override + public boolean canAnalyze(Program program) { + Language language = program.getLanguage(); + // TODO: what about 32/64 hybrid case? + if (PROCESSOR_NAME.equals(language.getProcessor().toString()) && + language.getLanguageDescription().getSize() == 64 && + patternsLoaded(language.isBigEndian())) { + r2Reg = program.getRegister("r2"); + ctrReg = program.getRegister("CTR"); + return r2Reg != null && ctrReg != null; + } + return false; + } + + private static synchronized boolean patternsLoaded(boolean bigEndian) { + if (patternLoadFailed) { + return false; + } + + if (!bigEndian) { + if (leCallStubPatterns != null) { + return true; + } + if (!patternsLoaded(true)) { + return false; + } + leCallStubPatterns = flipPatterns(beCallStubPatterns); + return true; + } + + try { + ResourceFile patternFile = Application.getModuleDataFile(CALL_STUB_PATTERN_FILE); + + beCallStubPatterns = new ArrayList<>(); + Pattern.readPatterns(patternFile, beCallStubPatterns, null); + + maxPatternLength = 0; + for (Pattern pattern : beCallStubPatterns) { + int len = pattern.getSize(); + if ((len % 4) != 0) { + throw new SAXException("pattern must contain multiple of 4-bytes"); + } + if (len > maxPatternLength) { + maxPatternLength = len; + } + } + + } catch (FileNotFoundException e) { + Msg.error(PPC64CallStubAnalyzer.class, "PowerPC resource file not found: " + CALL_STUB_PATTERN_FILE); + patternLoadFailed = true; + return false; + } catch (SAXException | IOException e) { + Msg.error(PPC64CallStubAnalyzer.class, "Failed to parse byte pattern file: " + CALL_STUB_PATTERN_FILE, e); + patternLoadFailed = true; + return false; + } + + return true; + } + + private static ArrayList flipPatterns(ArrayList patternlist) { + + ArrayList list = new ArrayList<>(); + for (Pattern pat : patternlist) { + byte[] bytes = flipPatternBytes(pat.getValueBytes()); + byte[] mask = flipPatternBytes(pat.getMaskBytes()); + Pattern newPattern = new Pattern(new DittedBitSequence(bytes, mask), pat.getMarkOffset(), + pat.getPostRules(), pat.getMatchActions()); + list.add(newPattern); + } + return list; + } + + private static byte[] flipPatternBytes(byte[] bytes) { + for (int i = 0; i < bytes.length; i += 4) { + byte b = bytes[i]; + bytes[i] = bytes[i + 3]; + bytes[i + 3] = b; + b = bytes[i + 1]; + bytes[i + 1] = bytes[i + 2]; + bytes[i + 2] = b; + } + return bytes; + } + + @Override + public boolean added(Program program, AddressSetView set, TaskMonitor monitor, MessageLog log) + throws CancelledException { + + Memory memory = program.getMemory(); + Listing listing = program.getListing(); + ProgramContext programContext = program.getProgramContext(); + + SequenceSearchState sequenceSearchState = SequenceSearchState.buildStateMachine( + program.getMemory().isBigEndian() ? beCallStubPatterns : leCallStubPatterns); + + monitor.setIndeterminate(false); + monitor.setMaximum(set.getNumAddresses()); + monitor.setProgress(0); + int functionCount = 0; + + // each address should correspond to a function + for (Function function : listing.getFunctions(set, true)) { + + monitor.checkCanceled(); + monitor.setProgress(functionCount++); + + Address entryAddr = function.getEntryPoint(); + boolean isThunk = function.isThunk(); + + Match stubMatch = null; + if (!isThunk) { + stubMatch = matchKnownCallStubs(entryAddr, memory, sequenceSearchState); + if (stubMatch == null) { + continue; // non-stub + } + } + else if (!thunksUnknownFunction(function)) { + continue; // previously resolved thunk + } + + RegisterValue r2Value = programContext.getRegisterValue(r2Reg, entryAddr); + if (r2Value == null || !r2Value.hasValue()) { + if (!isThunk) { // stubMatch is known + // Thunk unknown function for future processing once r2 is propagated + createThunk(program, entryAddr, stubMatch.getSequenceSize(), getUnknownFunction( + program).getEntryPoint()); + } + continue; + } + + int stubLength = stubMatch != null ? stubMatch.getSequenceSize() + : (int) function.getBody().getNumAddresses(); + + analyzeCallStub(program, function, stubLength, monitor); + } + + return true; + } + + private Match matchKnownCallStubs(Address addr, Memory memory, + SequenceSearchState sequenceSearchState) { + byte[] bytes = new byte[maxPatternLength]; + ArrayList matches = new ArrayList<>(); + int cnt = 0; + try { + cnt = memory.getBytes(addr, bytes); + } + catch (MemoryAccessException e) { + // ignore + } + if (cnt == 0) { + return null; + } + + byte[] searchBytes = bytes; + if (cnt != bytes.length) { + // although rare, shorten searchBytes if unable to fill + searchBytes = new byte[cnt]; + System.arraycopy(bytes, 0, searchBytes, 0, cnt); + } + + matches.clear(); + sequenceSearchState.apply(searchBytes, matches); + if (matches.size() == 0) { + return null; + } + + return matches.get(0); + } + + private void createThunk(Program program, Address stubAddr, int stubLength, + Address thunkedFunctionAddr) { + AddressSet stubBody = new AddressSet(stubAddr, stubAddr.add(stubLength - 1)); + CreateThunkFunctionCmd cmd = new CreateThunkFunctionCmd(stubAddr, stubBody, + thunkedFunctionAddr); + cmd.applyTo(program); + } + + private void analyzeCallStub(Program program, Function stubFunction, int stubLength, + TaskMonitor monitor) throws CancelledException { + + SymbolicPropogator symEval = new SymbolicPropogator(program); + symEval.setParamRefCheck(false); + symEval.setReturnRefCheck(false); + symEval.setStoredRefCheck(false); + + Address entryAddr = stubFunction.getEntryPoint(); + AddressSet stubBody = new AddressSet(entryAddr, entryAddr.add(stubLength - 1)); + + ContextEvaluator eval = new ContextEvaluatorAdapter() { + + @Override + public boolean followFalseConditionalBranches() { + return false; // should never happen - just in case + } + + @Override + public boolean evaluateReference(VarnodeContext context, Instruction instr, int pcodeop, Address address, + int size, RefType refType) { + return true; + } + + @Override + public boolean evaluateDestination(VarnodeContext context, Instruction instruction) { + + // We only handle indirect branch through CTR register + if (!"bctr".equals(instruction.getMnemonicString())) { + return true; + } + + // Change bctr flow to call-return + instruction.setFlowOverride(FlowOverride.CALL_RETURN); + + RegisterValue ctrValue = context.getRegisterValue(ctrReg); + if (ctrValue != null && ctrValue.hasValue()) { + Address destAddr = entryAddr.getNewAddress( + ctrValue.getUnsignedValue().longValue()); + Function destFunction = createDestinationFunction(program, destAddr, + instruction.getAddress(), context.getRegisterValue(r2Reg), monitor); + if (destFunction != null) { + if (!stubFunction.isThunk()) { + createThunk(program, entryAddr, stubLength, + destFunction.getEntryPoint()); + } + else { + stubFunction.setThunkedFunction(destFunction); + } + } + } + + return true; + } + + @Override + public boolean allowAccess(VarnodeContext context, Address address) { + return true; + } + }; + + symEval.flowConstants(entryAddr, stubBody, eval, false, monitor); + } + + private Function getUnknownFunction(Program program) { + + try { + return program.getExternalManager().addExtFunction(Library.UNKNOWN, + UNKNOWN_FUNCTION_NAME, null, SourceType.IMPORTED).getFunction(); + } + catch (InvalidInputException | DuplicateNameException e) { + throw new AssertException("unexpected", e); + } + } + + private boolean thunksUnknownFunction(Function function) { + Function thunkedFunction = function.getThunkedFunction(false); + if (thunkedFunction == null || !thunkedFunction.isExternal()) { + return false; + } + return UNKNOWN_FUNCTION_NAME.equals(thunkedFunction.getName()); + } + + private Function createDestinationFunction(Program program, Address addr, Address flowFromAddr, + RegisterValue regValue, TaskMonitor monitor) { + + Listing listing = program.getListing(); + BookmarkManager bookmarkMgr = program.getBookmarkManager(); + + if (!program.getMemory().contains(addr)) { + bookmarkMgr.setBookmark(flowFromAddr, BookmarkType.ERROR, "Bad Reference", "No memory for call stub destination at " + addr); + return null; + } + + Function function = listing.getFunctionAt(addr); + + if (regValue != null && regValue.hasValue()) { + ProgramContext programContext = program.getProgramContext(); + RegisterValue oldValue = programContext.getRegisterValue(regValue.getRegister(), addr); + if (oldValue == null || !oldValue.hasValue()) { + try { + programContext.setRegisterValue(addr, addr, regValue); + } catch (ContextChangeException e) { + throw new AssertException(e); + } + if (function != null) { + AutoAnalysisManager.getAnalysisManager(program).functionDefined(addr); + } + } + } + if (function != null) { + return function; + } + + CodeUnit cu = listing.getCodeUnitContaining(addr); + if (cu == null) { + throw new AssertException("expected code unit in memory"); + } + if (!addr.equals(cu.getMinAddress())) { + bookmarkMgr.setBookmark(cu.getMinAddress(), BookmarkType.ERROR, "Code Unit Conflict", "Expected function entry at " + addr + " referenced by call stub from " + flowFromAddr); + return null; + } + if (cu instanceof Data) { + Data d = (Data)cu; + if (d.isDefined()) { + bookmarkMgr.setBookmark(addr, BookmarkType.ERROR, "Code Unit Conflict", "Expected function entry referenced by call stub from " + flowFromAddr); + return null; + } + DisassembleCommand cmd = new DisassembleCommand(addr, null, true); + if (!cmd.applyTo(program, monitor)) { + return null; + } + } + + CreateFunctionCmd cmd = new CreateFunctionCmd(addr); + if (cmd.applyTo(program, monitor)) { + return cmd.getFunction(); + } + return null; + } + +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/plugin/core/analysis/PowerPCAddressAnalyzer.java b/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/plugin/core/analysis/PowerPCAddressAnalyzer.java new file mode 100644 index 00000000..0d698a47 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/plugin/core/analysis/PowerPCAddressAnalyzer.java @@ -0,0 +1,749 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.plugin.core.analysis; + +import java.math.BigInteger; +import java.util.ArrayList; + +import ghidra.app.cmd.function.CreateFunctionCmd; +import ghidra.app.cmd.label.AddLabelCmd; +import ghidra.app.plugin.core.disassembler.AddressTable; +import ghidra.app.util.PseudoDisassembler; +import ghidra.app.util.bin.format.pef.PefConstants; +import ghidra.app.util.opinion.ElfLoader; +import ghidra.app.util.opinion.PefLoader; +import ghidra.framework.options.Options; +import ghidra.program.model.address.*; +import ghidra.program.model.block.*; +import ghidra.program.model.data.*; +import ghidra.program.model.lang.*; +import ghidra.program.model.listing.*; +import ghidra.program.model.pcode.Varnode; +import ghidra.program.model.scalar.Scalar; +import ghidra.program.model.symbol.*; +import ghidra.program.model.util.CodeUnitInsertionException; +import ghidra.program.util.*; +import ghidra.util.Msg; +import ghidra.util.exception.*; +import ghidra.util.task.TaskMonitor; + +public class PowerPCAddressAnalyzer extends ConstantPropagationAnalyzer { + + private static final String OPTION_NAME_CHECK_NIBBLE = "Restrict Address to same 256M page"; + private static final String OPTION_DESCRIPTION_CHECK_NIBBLE = ""; + private static final boolean OPTION_DEFAULT_CHECK_HIGH_NIBBLE = false; + + private static final String OPTION_NAME_MARK_DUAL_INSTRUCTION = + "Mark dual instruction references"; + private static final String OPTION_DESCRIPTION_MARK_DUAL_INSTRUCTION = + "Turn on to mark all potential dual instruction refs,\n" + "(lis - addi/orri/subi)\n" + + "even if they are not seen to be used as a reference."; + private static final boolean OPTION_DEFAULT_MARK_DUAL_INSTRUCTION = false; + + private static final String OPTION_NAME_PROPAGATE_R2 = "Propagate r2 register value"; + private static final String OPTION_DESCRIPTION_PROPAGATE_R2 = + "Propagate r2 register value into called functions\n" + + "to facilitate function descriptor resolution."; + + private static final String OPTION_NAME_PROPAGATE_R30 = "Propagate r30 register value"; + private static final String OPTION_DESCRIPTION_PROPAGATE_R30 = + "Propagate r30 register value into called functions\n"; + + private static final String SWITCH_OPTION_NAME = "Switch Table Recovery"; + private static final String SWITCH_OPTION_DESCRIPTION = "Turn on to recover switch tables"; + private static final boolean SWITCH_OPTION_DEFAULT_VALUE = true; + + private boolean markupDualInstructionOption = OPTION_DEFAULT_MARK_DUAL_INSTRUCTION; + private boolean checkHighNibbleOption = OPTION_DEFAULT_CHECK_HIGH_NIBBLE; + private boolean propagateR2value; // see computed default + private boolean propagateR30value; // see computed default + private boolean recoverSwitchTables = SWITCH_OPTION_DEFAULT_VALUE; + + private final static String PROCESSOR_NAME = "PowerPC"; + + public PowerPCAddressAnalyzer() { + super(PROCESSOR_NAME); + } + + @Override + public boolean canAnalyze(Program program) { + return program.getLanguage().getProcessor().equals( + Processor.findOrPossiblyCreateProcessor(PROCESSOR_NAME)); + } + + private boolean getDefaultPropagateR2Option(Program program) { + // TODO: R2 propagation had been disabled for PEF - should it be enabled by default? + boolean isELF = ElfLoader.ELF_NAME.equals(program.getExecutableFormat()); + return isELF && program.getLanguage().getLanguageDescription().getSize() == 64; + } + + private boolean getDefaultPropagateR30Option(Program program) { + boolean isELF = ElfLoader.ELF_NAME.equals(program.getExecutableFormat()); + boolean is32bit = program.getLanguage().getLanguageDescription().getSize() == 32; + // The use of r30 as a GOT pointer during function calls can occurs with the V1.0 ABI + // for relocatable PIC code. The presence of the dynamic table entry DT_PPC_GOT + // can be used as an indicator and the associated symbol __DT_PPC_GOT created by + // the ELF Loader. + return isELF && is32bit && program.getSymbolTable().getSymbols("__DT_PPC_GOT").hasNext(); + } + + @Override + public void registerOptions(Options options, Program program) { + super.registerOptions(options, program); + + options.registerOption(OPTION_NAME_CHECK_NIBBLE, checkHighNibbleOption, null, + OPTION_DESCRIPTION_CHECK_NIBBLE); + + options.registerOption(OPTION_NAME_MARK_DUAL_INSTRUCTION, markupDualInstructionOption, null, + OPTION_DESCRIPTION_MARK_DUAL_INSTRUCTION); + + options.registerOption(SWITCH_OPTION_NAME, recoverSwitchTables, null, + SWITCH_OPTION_DESCRIPTION); + + options.registerOption(OPTION_NAME_PROPAGATE_R2, getDefaultPropagateR2Option(program), null, + OPTION_DESCRIPTION_PROPAGATE_R2); + + options.registerOption(OPTION_NAME_PROPAGATE_R30, getDefaultPropagateR30Option(program), + null, OPTION_DESCRIPTION_PROPAGATE_R30); + } + + @Override + public void optionsChanged(Options options, Program program) { + super.optionsChanged(options, program); + + checkHighNibbleOption = options.getBoolean(OPTION_NAME_CHECK_NIBBLE, checkHighNibbleOption); + + markupDualInstructionOption = + options.getBoolean(OPTION_NAME_MARK_DUAL_INSTRUCTION, markupDualInstructionOption); + + recoverSwitchTables = options.getBoolean(SWITCH_OPTION_NAME, recoverSwitchTables); + + propagateR2value = options.getBoolean(OPTION_NAME_PROPAGATE_R2, propagateR2value); + propagateR30value = options.getBoolean(OPTION_NAME_PROPAGATE_R30, propagateR30value); + } + + @Override + public AddressSet flowConstants(final Program program, Address flowStart, + AddressSetView flowSet, final SymbolicPropogator symEval, final TaskMonitor monitor) + throws CancelledException { + + RegisterValue initR2Value = lookupR2(program, flowStart); + final RegisterValue startingR2Value = initR2Value; + + boolean isPEF = PefLoader.PEF_NAME.equals(program.getExecutableFormat()); + + Register r2 = program.getRegister("r2"); + Register r30 = program.getRegister("r30"); + + // TODO: NEEDS MORE WORK !!! + // - attempt to flow and restore r2 after calls + + // follow all flows building up context + // use context to fill out addresses on certain instructions + ConstantPropagationContextEvaluator eval = + new ConstantPropagationContextEvaluator(trustWriteMemOption) { + + @Override + public boolean evaluateContextBefore(VarnodeContext context, Instruction instr) { + return false; + } + + @Override + public boolean evaluateContext(VarnodeContext context, Instruction instr) { + if (markupDualInstructionOption) { + markupDualInstructions(context, instr); + } + + if ((propagateR2value || propagateR30value) && instr.getFlowType().isCall()) { + + // TODO: Should this be done with evaluateDestination instead + + Reference[] refs = instr.getReferencesFrom(); + for (Reference ref : refs) { + Address destAddr = ref.getToAddress(); + if (propagateR2value && program.getProgramContext().getRegisterValue(r2, + destAddr) == null) { + setRegisterIfNotSet(program, destAddr, startingR2Value); + } + if (propagateR30value) { + RegisterValue r30Value = context.getRegisterValue(r30); + setRegisterIfNotSet(program, destAddr, r30Value); + } + } + } + + // NOTE: ELF restores r2 after returning from called function stub + // which may not fit with restoring r2 context as done for PEF + + // handle the nasty reset of "r2" + // TODO: this should probably be an option + if (propagateR2value && isPEF && isPEFCallingConvention(program, instr)) { + if (startingR2Value != null) { + context.setRegisterValue(startingR2Value); + } + } + return false; + } + + private void markupDualInstructions(VarnodeContext context, Instruction instr) { + String mnemonic = instr.getMnemonicString(); + if (mnemonic.equals("subi") || mnemonic.equals("addi")) { + Register reg = instr.getRegister(0); + if (reg != null) { + BigInteger val = context.getValue(reg, false); + if (val != null) { + long lval = val.longValue(); + Address refAddr = + instr.getMinAddress().getNewTruncatedAddress(lval, true); + // TODO: this needs a much more thourough check. + // What is at the other end of the instruction! + if ((lval > 4096 || lval < 0) && + program.getMemory().contains(refAddr)) { + if (instr.getOperandReferences(2).length == 0) { + instr.addOperandReference(2, refAddr, RefType.DATA, + SourceType.ANALYSIS); + } + } + } + } + } + } + + @Override + public boolean evaluateReference(VarnodeContext context, Instruction instr, + int pcodeop, Address address, int size, RefType refType) { + + if (instr.getFlowType().isJump()) { + // for branching instructions, if we have a good target, mark it + // if this isn't straight code (thunk computation), let someone else lay down the reference + return !symEval.encounteredBranch(); + } + + // don't markup li from a scalar, addresses don't fit in an instruction. + String mnemonic = instr.getMnemonicString(); + if (mnemonic.equals("li") && instr.getScalar(1) != null) { + return false; + } + + // lis is only the upper half of the instruction, don't mark it as a reference. + if (mnemonic.equals("lis")) { + return false; + } + + // don't use short constant on load/store as address + if (mnemonic.startsWith("ld") || mnemonic.startsWith("lw") || + mnemonic.startsWith("lb") || mnemonic.startsWith("st")) { + for (Object obj : instr.getOpObjects(1)) { + if ((obj instanceof Scalar) && + ((Scalar) obj).getUnsignedValue() == address.getOffset()) { + return false; + } + } + } + + // markup the data flow for this instruction + if (refType.isData()) { + return true; + } + + return super.evaluateReference(context, instr, pcodeop, address, size, refType); + } + + @Override + public boolean evaluateDestination(VarnodeContext context, + Instruction instruction) { + String mnemonic = instruction.getMnemonicString(); + if (!instruction.getFlowType().isJump()) { + return false; + } + if (mnemonic.equals("bcctr") || mnemonic.equals("bcctrl") || + mnemonic.equals("bctr")) { + // record the destination that is unknown + if (!checkAlreadyRecovered(instruction.getProgram(), + instruction.getMinAddress())) { + destSet.addRange(instruction.getMinAddress(), + instruction.getMinAddress()); + } + } + return false; + } + + @Override + public Long unknownValue(VarnodeContext context, Instruction instruction, + Varnode node) { + if (node.isRegister()) { + Register reg = program.getRegister(node.getAddress()); + if (reg != null) { + if (reg.getName().equals("xer_so")) { + return new Long(0); + } + if (propagateR2value && reg.getName().equals("r2") && + startingR2Value != null && startingR2Value.hasValue()) { + return new Long(startingR2Value.getUnsignedValue().longValue()); + } + } + } + return null; + } + + @Override + public boolean followFalseConditionalBranches() { + return true; + } + + @Override + public boolean evaluateSymbolicReference(VarnodeContext context, Instruction instr, + Address address) { + return false; + } + + @Override + public boolean allowAccess(VarnodeContext context, Address addr) { + return trustWriteMemOption; + } + }; + + AddressSet resultSet = symEval.flowConstants(flowStart, flowSet, eval, true, monitor); + + if (recoverSwitchTables) { + recoverSwitches(program, symEval, eval.getDestinationSet(), monitor); + } + + return resultSet; + } + + private void setRegisterIfNotSet(Program program, Address addr, RegisterValue regValue) { + if (regValue == null || !regValue.hasValue() || + regValue.getUnsignedValue().equals(BigInteger.ZERO)) { + return; + } + ProgramContext programContext = program.getProgramContext(); + RegisterValue oldValue = programContext.getRegisterValue(regValue.getRegister(), addr); + if (oldValue != null && oldValue.hasValue() && + !oldValue.getUnsignedValueIgnoreMask().equals(BigInteger.ZERO)) { + return; + } + try { + programContext.setRegisterValue(addr, addr, regValue); + if (program.getListing().getFunctionAt(addr) != null) { + AutoAnalysisManager analysisMgr = AutoAnalysisManager.getAnalysisManager(program); + analysisMgr.functionDefined(addr); // kick function for re-analysis + analysisMgr.codeDefined(addr); // kick off code value propagation for the function + } + } + catch (ContextChangeException e) { + throw new AssertException("unexpected", e); + } + } + + private RegisterValue lookupR2(Program program, Address flowStart) { + RegisterValue initR2Value = null; + if (propagateR2value) { + initR2Value = + program.getProgramContext().getRegisterValue(program.getRegister("r2"), flowStart); + if (initR2Value == null || !initR2Value.hasValue()) { + initR2Value = findR2Value(program, flowStart); + setRegisterIfNotSet(program, flowStart, initR2Value); + } + } + return initR2Value; + } + + private boolean checkAlreadyRecovered(Program program, Address addr) { + int referenceCountFrom = program.getReferenceManager().getReferenceCountFrom(addr); + + if (referenceCountFrom > 1) { + return true; + } + Reference[] refs = program.getReferenceManager().getReferencesFrom(addr); + if (refs.length == 1 && !refs[0].getReferenceType().isData()) { + return true; + } + + return false; + } + + private void recoverSwitches(final Program program, SymbolicPropogator symEval, + AddressSet destinationSet, TaskMonitor monitor) throws CancelledException { + + final ArrayList
targetList = new ArrayList<>(); + + // now handle symbolic execution assuming values! + class SwitchEvaluator implements ContextEvaluator { + + private static final int STARTING_MAX_TABLE_SIZE = 64; + + long tableIndexOffset; + Address targetSwitchAddr = null; + boolean hitTheGuard = false; + Long assumeValue = new Long(0); + int tableSizeMax = STARTING_MAX_TABLE_SIZE; + + public void setGuard(boolean hitGuard) { + hitTheGuard = hitGuard; + } + + public void setAssume(Long assume) { + assumeValue = assume; + } + + public void setTargetSwitchAddr(Address addr) { + targetSwitchAddr = addr; + } + + public int getMaxTableSize() { + return tableSizeMax; + } + + @Override + public boolean evaluateContextBefore(VarnodeContext context, Instruction instr) { + return false; + } + + @Override + public boolean evaluateContext(VarnodeContext context, Instruction instr) { + // find the cmpli to set the size of the table + // tableSize = size + String mnemonic = instr.getMnemonicString(); + if ((mnemonic.compareToIgnoreCase("cmpi") == 0) || + (mnemonic.compareToIgnoreCase("cmpwi") == 0) || + (mnemonic.compareToIgnoreCase("cmpli") == 0) || + (mnemonic.compareToIgnoreCase("cmplwi") == 0)) { + int numOps = instr.getNumOperands(); + if (numOps > 1) { + Register reg = instr.getRegister(numOps - 2); + if ((reg != null)) { + Scalar scalar = instr.getScalar(numOps - 1); + if (scalar != null) { + int newTableSizeMax = (int) scalar.getSignedValue() + 1; + if (newTableSizeMax > 0 && newTableSizeMax < 128) { + tableSizeMax = newTableSizeMax; + } + hitTheGuard = true; + RegisterValue rval = context.getRegisterValue(reg); + context.clearRegister(reg); + if (rval != null) { + long lval = rval.getSignedValue().longValue(); + if (lval < 0) { + tableIndexOffset = -lval; + } + } + } + } + } + } + if (instr.getFlowType().isConditional()) { + hitTheGuard = true; + } + + return false; + } + + @Override + public Address evaluateConstant(VarnodeContext context, Instruction instr, int pcodeop, + Address constant, int size, RefType refType) { + return null; + } + + @Override + public boolean evaluateReference(VarnodeContext context, Instruction instr, int pcodeop, + Address address, int size, RefType refType) { + + // TODO: if ever loading from instructions in memory, must EXIT! + if (!((refType.isComputed() || refType.isConditional()) && + program.getMemory().contains(address))) { + if (refType.isRead()) { + createDataType(program, instr, address); + } + return false; + } + if (!targetList.contains(address)) { + targetList.add(address); + } + return true; // just go ahead and mark up the instruction + } + + @Override + public boolean evaluateDestination(VarnodeContext context, Instruction instruction) { + return instruction.getMinAddress().equals(targetSwitchAddr); + } + + @Override + public Long unknownValue(VarnodeContext context, Instruction instruction, + Varnode node) { + if (node.isRegister()) { + if (instruction.getFlowType().isJump()) { + return null; + } + Register reg = program.getRegister(node.getAddress()); + if (reg != null) { + // never assume for flags, or control registers + if (reg.getName().equals("xer_so") || reg.getName().startsWith("cr")) { + return new Long(0); + } + } + if (hitTheGuard) { + return assumeValue; + } + } + return null; + } + + @Override + public boolean followFalseConditionalBranches() { + return false; + } + + @Override + public boolean evaluateSymbolicReference(VarnodeContext context, Instruction instr, + Address address) { + return false; + } + + @Override + public boolean allowAccess(VarnodeContext context, Address addr) { + return false; + } + } + + SwitchEvaluator switchEvaluator = new SwitchEvaluator(); + + // now flow with the simple block of this branch.... + + // for each unknown branch destination, + AddressIterator iter = destinationSet.getAddresses(true); + SimpleBlockModel model = new SimpleBlockModel(program); + while (iter.hasNext() && !monitor.isCancelled()) { + Address loc = iter.next(); + + // first see if something else has already done this! + int referenceCountFrom = program.getReferenceManager().getReferenceCountFrom(loc); + if (referenceCountFrom > 2) { + continue; + } + + CodeBlock bl = null; + try { + bl = model.getFirstCodeBlockContaining(loc, monitor); + } + catch (CancelledException e) { + return; + } + + AddressSet branchSet = new AddressSet(bl); + CodeBlockReferenceIterator bliter; + try { + bliter = bl.getSources(monitor); + boolean oneSource = (bl.getNumSources(monitor) == 1); + while (bliter.hasNext()) { + CodeBlockReference sbl = bliter.next(); + if (sbl.getFlowType().isCall()) { + continue; + } + if ((sbl.getFlowType().isFallthrough() || oneSource) || + !sbl.getFlowType().isConditional()) { + bl = sbl.getSourceBlock(); + if (bl != null) { + branchSet.add(bl); + } + } + } + } + catch (CancelledException e) { + break; + } + + for (long assume = 0; assume < switchEvaluator.getMaxTableSize(); assume++) { + switchEvaluator.setAssume(new Long(assume)); + switchEvaluator.setGuard(false); + switchEvaluator.setTargetSwitchAddr(loc); + + symEval.flowConstants(branchSet.getMinAddress(), branchSet, switchEvaluator, false, + monitor); + // if it didn't get it after try with 0 + if (assume > 0 && targetList.size() < 1) { + break; + } + if (symEval.readExecutable()) { + break; + } + } + // re-create the function body with the newly found code + if (targetList.size() > 1) { + AddressTable table; + table = new AddressTable(loc, targetList.toArray(new Address[0]), + program.getDefaultPointerSize(), 0, false); + table.fixupFunctionBody(program, program.getListing().getInstructionAt(loc), + monitor); + labelTable(program, loc, targetList); + } + else if (targetList.size() == 1) { + Function f = program.getFunctionManager().getFunctionContaining(loc); + CreateFunctionCmd.fixupFunctionBody(program, f, monitor); + } + } + } + + private void createDataType(Program program, Instruction instr, Address address) { + if (!program.getListing().isUndefined(address, address)) { + return; + } + String mnemonic = instr.getMnemonicString(); + if (mnemonic.startsWith("l") || mnemonic.startsWith("s")) { + char endCh = mnemonic.charAt(1); + DataType dt = null; + switch (endCh) { + case 'd': + dt = Undefined8DataType.dataType; + break; + case 'w': + dt = Undefined4DataType.dataType; + break; + case 'h': + dt = Undefined2DataType.dataType; + break; + case 'b': + dt = Undefined1DataType.dataType; + break; + } + if (dt != null) { + try { + program.getListing().createData(address, dt); + } + catch (CodeUnitInsertionException e) { + // ignore + } + catch (DataTypeConflictException e) { + // ignore + } + } + } + } + + private RegisterValue findR2Value(Program program, Address start) { + + if (PefLoader.PEF_NAME.equals(program.getExecutableFormat())) { + return findPefR2Value(program, start); + } +// if (ElfLoader.ELF_NAME.equals(program.getExecutableFormat())) { +// return findElfR2Value(program, start); +// } + return null; + } + +// private RegisterValue findElfR2Value(Program program, Address start) { +// +// // look for TOC_BASE injected by PowerPC_ElfExtension +// Symbol tocSym = SymbolUtilities.getLabelOrFunctionSymbol(program, +// PowerPC64_ElfExtension.TOC_BASE, this, false); +// if (tocSym == null) { +// return null; +// } +// +// Register r2 = program.getRegister("r2"); +// return new RegisterValue(r2, BigInteger.valueOf(tocSym.getAddress().getOffset())); +// } + + private RegisterValue findPefR2Value(Program program, Address start) { + + Listing listing = program.getListing(); + ReferenceManager referenceManager = program.getReferenceManager(); + Symbol tocSymbol = SymbolUtilities.getExpectedLabelOrFunctionSymbol(program, + PefConstants.TOC, err -> Msg.error(this, err)); + if (tocSymbol == null) { + return null; + } + + PseudoDisassembler pdis = new PseudoDisassembler(program); + ReferenceIterator refIter = referenceManager.getReferencesTo(start); + + while (refIter.hasNext()) { + Reference ref = refIter.next(); + // if is a data pointer + Data data = listing.getDataAt(ref.getFromAddress()); + if (data == null) { + continue; + } + if (!data.isPointer()) { + continue; + } + // check after the data pointer to see if it is the same as the TOC value + Address dataAddr = data.getMaxAddress().add(1); + Address tocAddr = pdis.getIndirectAddr(dataAddr); + if (tocSymbol.getAddress().equals(tocAddr)) { + BigInteger tocValue = BigInteger.valueOf(tocAddr.getOffset()); + Register r2 = program.getRegister("r2"); + return new RegisterValue(r2, tocValue); + } + } + return null; + } + + protected boolean isPEFCallingConvention(Program program, Instruction instr) { + + if (instr.getMnemonicString().equals("lwz")) { + Register reg = instr.getRegister(0); + if (reg != null && reg.getName().equals("r2")) { + Object[] objs = instr.getOpObjects(1); + Register stackRegister = program.getCompilerSpec().getStackPointer(); + for (Object obj : objs) { + if (obj instanceof Register && ((Register) obj) != stackRegister) { + return false; + } + // TODO: verify stack offset for 64-bit PEF + if (obj instanceof Scalar && ((Scalar) obj).getValue() != 0x14) { + return false; + } + } + Address fallAddr = instr.getFallFrom(); + Instruction fallInstr = program.getListing().getInstructionContaining(fallAddr); + if (fallInstr != null && fallInstr.getFlowType().isCall()) { + return true; + } + } + } + return false; + } + + private void labelTable(Program program, Address loc, ArrayList
targets) { + Namespace space = null; + + Instruction start_inst = program.getListing().getInstructionAt(loc); + + // not putting switch into functions anymore + // program.getSymbolTable().getNamespace(start_inst.getMinAddress()); + String spaceName = "switch_" + start_inst.getMinAddress(); + try { + space = program.getSymbolTable().createNameSpace(space, spaceName, SourceType.ANALYSIS); + } + catch (DuplicateNameException e) { + space = program.getSymbolTable().getNamespace(spaceName, program.getGlobalNamespace()); + } + catch (InvalidInputException e) { + // just go with default space + } + + int tableNumber = 0; + for (Address addr : targets) { + AddLabelCmd lcmd = new AddLabelCmd(addr, "case_" + Long.toHexString(tableNumber), space, + SourceType.ANALYSIS); + tableNumber++; + lcmd.setNamespace(space); + + lcmd.applyTo(program); + } + } +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/util/bin/format/elf/extend/PowerPC64_ElfExtension.java b/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/util/bin/format/elf/extend/PowerPC64_ElfExtension.java new file mode 100644 index 00000000..87d8e3ab --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/util/bin/format/elf/extend/PowerPC64_ElfExtension.java @@ -0,0 +1,534 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.util.bin.format.elf.extend; + +import java.math.BigInteger; + +import ghidra.app.util.bin.format.elf.*; +import ghidra.app.util.bin.format.elf.ElfDynamicType.ElfDynamicValueType; +import ghidra.app.util.bin.format.elf.relocation.PowerPC64_ElfRelocationConstants; +import ghidra.app.util.opinion.ElfLoader; +import ghidra.program.model.address.*; +import ghidra.program.model.data.PointerDataType; +import ghidra.program.model.data.QWordDataType; +import ghidra.program.model.lang.*; +import ghidra.program.model.listing.*; +import ghidra.program.model.mem.MemoryBlock; +import ghidra.program.model.reloc.Relocation; +import ghidra.program.model.symbol.*; +import ghidra.util.Msg; +import ghidra.util.exception.*; +import ghidra.util.task.TaskMonitor; + +public class PowerPC64_ElfExtension extends ElfExtension { + + private static final int PLT_ENTRY_SIZE = 8; // could be 16(local) or 24 w/ opd_api, 32 for VxWorks + private static final int PLT_HEAD_SIZE = 16; // could be 24 w/ obd_api, 32 for VxWorks + + // Elf Dynamic Type Extensions + public static final ElfDynamicType DT_PPC64_GLINK = new ElfDynamicType(0x70000000, + "DT_PPC64_GLINK", "Specify the start of the .glink section", ElfDynamicValueType.ADDRESS); + public static final ElfDynamicType DT_PPC64_OPD = new ElfDynamicType(0x70000001, "DT_PPC64_OPD", + "Specify the start of the .opd section", ElfDynamicValueType.ADDRESS); + public static final ElfDynamicType DT_PPC64_OPDSZ = new ElfDynamicType(0x70000002, + "DT_PPC64_OPDSZ", "Specify the size of the .opd section", ElfDynamicValueType.ADDRESS); + public static final ElfDynamicType DT_PPC64_OPT = new ElfDynamicType(0x70000003, "DT_PPC64_OPT", + "Specify whether various optimisations are possible", ElfDynamicValueType.VALUE); + + // PPC64 ABI Version Flag Bits contained within ElfHeader e_flags + private static final int EF_PPC64_ABI = 3; + + public static final String TOC_BASE = "TOC_BASE"; // injected symbol to mark global TOC_BASE + + @Override + public boolean canHandle(ElfHeader elf) { + return elf.e_machine() == ElfConstants.EM_PPC64 && elf.is64Bit(); + } + + @Override + public boolean canHandle(ElfLoadHelper elfLoadHelper) { + Language language = elfLoadHelper.getProgram().getLanguage(); + return canHandle(elfLoadHelper.getElfHeader()) && + "PowerPC".equals(language.getProcessor().toString()) && + language.getLanguageDescription().getSize() == 64; + } + + @Override + public String getDataTypeSuffix() { + return "_PPC64"; + } + + @Override + public void processElf(ElfLoadHelper elfLoadHelper, TaskMonitor monitor) + throws CancelledException { + + if (!canHandle(elfLoadHelper)) { + return; + } + + findTocBase(elfLoadHelper, monitor); // create TOC_BASE symbol (used by relocations) + } + + @Override + public void processGotPlt(ElfLoadHelper elfLoadHelper, TaskMonitor monitor) + throws CancelledException { + + if (!canHandle(elfLoadHelper)) { + return; + } + + setEntryPointContext(elfLoadHelper, monitor); + + processOPDSection(elfLoadHelper, monitor); + + super.processGotPlt(elfLoadHelper, monitor); + + processPpc64v2PltPointerTable(elfLoadHelper, monitor); + processPpc64PltGotPointerTable(elfLoadHelper, monitor); + } + + private void findTocBase(ElfLoadHelper elfLoadHelper, TaskMonitor monitor) { + + // TODO: Verify that this works for non-V2 ABI cases (this assumes TOC based upon .got location) + + Program program = elfLoadHelper.getProgram(); + + try { + Address tocAddr = null; + + // Check for .toc section + MemoryBlock tocBlock = program.getMemory().getBlock(".toc"); + if (tocBlock != null) { + tocAddr = tocBlock.getStart(); + } + else { + MemoryBlock gotBlock = + program.getMemory().getBlock(ElfSectionHeaderConstants.dot_got); + if (gotBlock != null) { + tocAddr = gotBlock.getStart().addNoWrap(0x8000); + } + } + + if (tocAddr != null) { + elfLoadHelper.createSymbol(tocAddr, TOC_BASE, false, false, null); + } + + } + catch (AddressOverflowException | InvalidInputException e) { + // ignore + } + } + + private void processPpc64PltGotPointerTable(ElfLoadHelper elfLoadHelper, TaskMonitor monitor) + throws CancelledException { + + ElfHeader elf = elfLoadHelper.getElfHeader(); + if (getPpc64ABIVersion(elf) == 2) { + // paint TOC_BASE value as r2 across executable blocks since r2 + // is needed to resolve call stubs + Symbol tocSymbol = SymbolUtilities.getLabelOrFunctionSymbol(elfLoadHelper.getProgram(), + TOC_BASE, err -> elfLoadHelper.getLog().error("PowerPC64_ELF", err)); + if (tocSymbol != null) { + paintTocAsR2value(tocSymbol.getAddress().getOffset(), elfLoadHelper, monitor); + } + // TODO: verify ABI detection + return; + } + + ElfDynamicTable dynamicTable = elf.getDynamicTable(); + if (dynamicTable == null || !dynamicTable.containsDynamicValue(ElfDynamicType.DT_PLTGOT) || + !dynamicTable.containsDynamicValue(ElfDynamicType.DT_PLTRELSZ) || + !dynamicTable.containsDynamicValue(ElfDynamicType.DT_PLTREL)) { + return; + } + + try { + long pltgotOffset = + elf.adjustAddressForPrelink(dynamicTable.getDynamicValue(ElfDynamicType.DT_PLTGOT)); + Address pltAddr = elfLoadHelper.getDefaultAddress(pltgotOffset); + Program program = elfLoadHelper.getProgram(); + MemoryBlock pltBlock = program.getMemory().getBlock(pltAddr); + if (pltBlock == null || pltBlock.isExecute()) { + return; + } + + int relEntrySize = (dynamicTable.getDynamicValue( + ElfDynamicType.DT_PLTREL) == ElfDynamicType.DT_RELA.value) ? 24 : 16; + + long pltEntryCount = + dynamicTable.getDynamicValue(ElfDynamicType.DT_PLTRELSZ) / relEntrySize; + + for (int i = 0; i < pltEntryCount; i++) { + monitor.checkCanceled(); + pltAddr = pltAddr.addNoWrap(24); + Symbol refSymbol = markupDescriptorEntry(pltAddr, false, elfLoadHelper); + if (refSymbol != null && refSymbol.getSymbolType() == SymbolType.FUNCTION && + refSymbol.getSource() == SourceType.DEFAULT) { + try { + // Force source type on function to prevent potential removal by clear-flow + refSymbol.setName(".pltgot." + refSymbol.getName(), SourceType.IMPORTED); + } + catch (DuplicateNameException | InvalidInputException e) { + // ignore + } + } + } + } + catch (NotFoundException e) { + throw new AssertException("unexpected", e); + } + catch (AddressOverflowException e) { + elfLoadHelper.log("Failed to process PltGot entries: " + e.getMessage()); + } + } + + private void paintTocAsR2value(long tocBaseOffset, ElfLoadHelper elfLoadHelper, + TaskMonitor monitor) { + + Program program = elfLoadHelper.getProgram(); + ProgramContext programContext = program.getProgramContext(); + Register r2reg = program.getRegister("r2"); + RegisterValue tocValue = new RegisterValue(r2reg, BigInteger.valueOf(tocBaseOffset)); + + for (MemoryBlock block : program.getMemory().getBlocks()) { + if (block.isExecute()) { + try { + programContext.setRegisterValue(block.getStart(), block.getEnd(), tocValue); + } + catch (ContextChangeException e) { + String msg = "Failed to set r2 as TOC_BASE on memory block " + block.getName(); + Msg.error(this, msg + ": " + e.getMessage()); + elfLoadHelper.log(msg); + } + } + } + + } + + private void processPpc64v2PltPointerTable(ElfLoadHelper elfLoadHelper, TaskMonitor monitor) + throws CancelledException { + + ElfHeader elf = elfLoadHelper.getElfHeader(); + ElfSectionHeader pltSection = elf.getSection(ElfSectionHeaderConstants.dot_plt); + if (pltSection == null) { + return; + } + Program program = elfLoadHelper.getProgram(); + MemoryBlock pltBlock = program.getMemory().getBlock(pltSection.getNameAsString()); + if (pltBlock == null) { + return; + } + if (pltSection.isExecutable()) { + return; + } + + // set pltBlock read-only to permit decompiler simplification + pltBlock.setWrite(false); + + if (getPpc64ABIVersion(elf) != 2) { + // TODO: add support for other PLT implementations + return; + } + + // TODO: Uncertain + + Address addr = pltBlock.getStart().add(PLT_HEAD_SIZE); + try { + while (addr.compareTo(pltBlock.getEnd()) < 0) { + monitor.checkCanceled(); + if (elfLoadHelper.createData(addr, PointerDataType.dataType) == null) { + break; // stop early if failed to create a pointer + } + addr = addr.addNoWrap(PLT_ENTRY_SIZE); + } + } + catch (AddressOverflowException e) { + // ignore + } + + } + + private void processOPDSection(ElfLoadHelper elfLoadHelper, TaskMonitor monitor) + throws CancelledException { + + MemoryBlock opdBlock = elfLoadHelper.getProgram().getMemory().getBlock(".opd"); + if (opdBlock == null) { + return; + } + + monitor.setMessage("Processing Function Descriptor Symbols..."); + + Address addr = opdBlock.getStart(); + Address endAddr = opdBlock.getEnd(); + + monitor.setShowProgressValue(true); + monitor.setProgress(0); + monitor.setMaximum((endAddr.subtract(addr) + 1) / 24); + int count = 0; + + try { + while (addr.compareTo(endAddr) < 0) { + monitor.checkCanceled(); + monitor.setProgress(++count); + processOPDEntry(elfLoadHelper, addr); + addr = addr.addNoWrap(24); + } + } + catch (AddressOverflowException e) { + // ignore end of space + } + + // allow .opd section contents to be treated as constant values + opdBlock.setWrite(false); + } + + private void processOPDEntry(ElfLoadHelper elfLoadHelper, Address opdAddr) { + + Program program = elfLoadHelper.getProgram(); + SymbolTable symbolTable = program.getSymbolTable(); + + boolean isGlobal = symbolTable.isExternalEntryPoint(opdAddr); + + Symbol refSymbol = markupDescriptorEntry(opdAddr, isGlobal, elfLoadHelper); + if (refSymbol == null) { + return; + } + Address refAddr = refSymbol.getAddress(); + + // Remove OPD function if one was created - a function symbol in the + // OPD section should not be a function as it is a descriptor entry only. + Function f = program.getFunctionManager().getFunctionAt(opdAddr); + if (f == null) { + // no OPD function symbol - rename referenced Function to non-default name to + // help preserve it if it gets in the path of a future clear-flow command. + if (refSymbol.getSymbolType() == SymbolType.FUNCTION && + refSymbol.getSource() == SourceType.DEFAULT) { + try { + // Force source type on function to prevent potential removal by clear-flow + refSymbol.setName(".opd." + refSymbol.getName(), SourceType.IMPORTED); + } + catch (DuplicateNameException | InvalidInputException e) { + // ignore + } + } + return; // assume it was already handled + } + // eliminate function on descriptor + f.getSymbol().delete(); + + // TODO: Could we have problems by moving the symbol from the descriptor + // table to the actual function? + + Symbol[] symbols = program.getSymbolTable().getSymbols(opdAddr); + for (Symbol symbol : symbols) { + if (symbol.isDynamic()) { + continue; + } + String name = symbol.getName(); // primary should be first + symbol.delete(); + try { + elfLoadHelper.createSymbol(refAddr, name, false, false, null); + } + catch (InvalidInputException e) { + Msg.error(this, "Failed to move function descriptor symbol properly: " + name); + } + } + } + + private Symbol markupDescriptorEntry(Address entryAddr, boolean isGlobal, + ElfLoadHelper elfLoadHelper) { + Program program = elfLoadHelper.getProgram(); + + // markup function descriptor (3 elements, 24-bytes) + Data refPtr = elfLoadHelper.createData(entryAddr, PointerDataType.dataType); + Data tocPtr = elfLoadHelper.createData(entryAddr.add(program.getDefaultPointerSize()), + PointerDataType.dataType); + // TODO: uncertain what 3rd procedure descriptor element represents + elfLoadHelper.createData(entryAddr.add(2 * program.getDefaultPointerSize()), + QWordDataType.dataType); + + if (refPtr == null || tocPtr == null) { + Msg.error(this, "Failed to process PPC64 descriptor at " + entryAddr); + return null; + } + + Address refAddr = (Address) refPtr.getValue(); + if (refAddr == null || program.getMemory().getBlock(refAddr) == null) { + return null; + } + + ElfDefaultGotPltMarkup.setConstant(refPtr); + ElfDefaultGotPltMarkup.setConstant(tocPtr); + + Function function = program.getListing().getFunctionAt(refAddr); + if (function == null) { + // Check for potential pointer table (unsure a non-function would be referenced by OPD section) + Relocation reloc = program.getRelocationTable().getRelocation(refAddr); + if (reloc != null && + reloc.getType() == PowerPC64_ElfRelocationConstants.R_PPC64_RELATIVE) { + return program.getSymbolTable().getPrimarySymbol(refAddr); + } + + // Otherwise, create function at OPD referenced location + function = elfLoadHelper.createOneByteFunction(null, refAddr, isGlobal); + } + + // set r2 to TOC base for each function + Address tocAddr = (Address) tocPtr.getValue(); + if (tocAddr != null) { + Register r2reg = program.getRegister("r2"); + RegisterValue tocValue = new RegisterValue(r2reg, tocAddr.getOffsetAsBigInteger()); + try { + program.getProgramContext().setRegisterValue(refAddr, refAddr, tocValue); + } + catch (ContextChangeException e) { + throw new AssertException(e); + } + } + return function.getSymbol(); + } + + private void setPPC64v2GlobalFunctionR12Context(Program program, Address functionAddr) { + // Global entry - assume r12 contains function address + RegisterValue entryOffset = new RegisterValue(program.getRegister("r12"), + BigInteger.valueOf(functionAddr.getOffset())); + ProgramContext programContext = program.getProgramContext(); + try { + programContext.setRegisterValue(functionAddr, functionAddr, entryOffset); + } + catch (ContextChangeException e) { + throw new AssertException(e); + } + } + + /** + * Adjust any program context needed on symbols + * @param elfLoadHelper + * @param monitor + * @throws CancelledException + */ + private void setEntryPointContext(ElfLoadHelper elfLoadHelper, TaskMonitor monitor) + throws CancelledException { + Program program = elfLoadHelper.getProgram(); + + if (getPpc64ABIVersion(elfLoadHelper.getElfHeader()) == 2) { + + monitor.setMessage("Assuming r12 for global functions..."); + + FunctionManager functionMgr = program.getFunctionManager(); + for (Address addr : program.getSymbolTable().getExternalEntryPointIterator()) { + monitor.checkCanceled(); + if (functionMgr.getFunctionAt(addr) != null) { + // assume r12 set to function entry for all global functions + setPPC64v2GlobalFunctionR12Context(program, addr); + } + } + + // ensure that r12 context has been set on global entry function + Symbol entrySymbol = SymbolUtilities.getLabelOrFunctionSymbol( + elfLoadHelper.getProgram(), ElfLoader.ELF_ENTRY_FUNCTION_NAME, + err -> elfLoadHelper.getLog().error("PowerPC64_ELF", err)); + if (entrySymbol != null && entrySymbol.getSymbolType() == SymbolType.FUNCTION) { + setPPC64v2GlobalFunctionR12Context(program, entrySymbol.getAddress()); + } + } + } + + // upper 3-bits of ElfSymbol st_other identify local vs. global behavior and number of instructions + // at which the local function entry is offset from the global entry. Local function + // entry names will be prefixed with a '.' + private static int[] PPC64_ABIV2_GLOBAL_ENTRY_OFFSET = new int[] { 0, 0, 1, 2, 4, 8, 16, 0 }; + + @Override + public Address evaluateElfSymbol(ElfLoadHelper elfLoadHelper, ElfSymbol elfSymbol, + Address address, boolean isExternal) { + + ElfHeader elfHeader = elfLoadHelper.getElfHeader(); + + // Check for V2 ABI + if (isExternal || elfSymbol.getType() != ElfSymbol.STT_FUNC || + getPpc64ABIVersion(elfHeader) != 2) { + return address; + } + + // NOTE: I don't think the ABI supports little-endian + Language language = elfLoadHelper.getProgram().getLanguage(); + if (!canHandle(elfLoadHelper) || elfHeader.e_machine() != ElfConstants.EM_PPC64 || + language.getLanguageDescription().getSize() != 64) { + return address; + } + + // Handle V2 ABI - st_other signals local entry vs. global entry behavior and offset. + // 4-byte instructions are assumed.l + + Function localFunction = null; + int localOffset = PPC64_ABIV2_GLOBAL_ENTRY_OFFSET[(elfSymbol.getOther() & 0xe0) >>> 5] * 4; + if (localOffset != 0) { + + // generate local symbol TODO: this should really be done after demangling + String name = elfSymbol.getNameAsString(); + String localName = "." + name; + try { + Address localFunctionAddr = address.add(localOffset); + localFunction = elfLoadHelper.createOneByteFunction(null, localFunctionAddr, false); + if (localFunction != null && + localFunction.getSymbol().getSource() == SourceType.DEFAULT) { + elfLoadHelper.createSymbol(localFunctionAddr, localName, true, false, null); + } + // TODO: global function should be a thunk to the local function - need analyzer to do this + String cmt = "local function entry for global function " + name + " at {@address " + + address + "}"; + elfLoadHelper.getProgram().getListing().setComment(localFunctionAddr, + CodeUnit.PRE_COMMENT, cmt); + } + catch (AddressOutOfBoundsException | InvalidInputException e) { + elfLoadHelper.log("Failed to generate local function symbol " + localName + " at " + + address + "+" + localOffset); + } + } + + Function f = + elfLoadHelper.createOneByteFunction(elfSymbol.getNameAsString(), address, false); + if (f != null && localFunction != null) { + f.setThunkedFunction(localFunction); + return null; // symbol creation handled + } + + return address; + } + + /** + * Get the PPC64 ABI version specified within the ELF header. + * Expected values include: + *
    + *
  • 1 for original function descriptor using ABI
  • + *
  • 2 for revised ABI without function descriptors
  • + *
  • 0 for unspecified or not using any features affected by the differences
  • + *
+ * @param elf ELF header + * @return ABI version + */ + public static int getPpc64ABIVersion(ElfHeader elf) { + if (elf.e_machine() != ElfConstants.EM_PPC64) { + return 0; + } + // TODO: While the e_flags should indicate the use of function descriptors, this + // may not be set reliably. The presence of the .opd section is another + // indicator but could be missing if sections have been stripped. + return elf.e_flags() & EF_PPC64_ABI; + } + +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/util/bin/format/elf/extend/PowerPC_ElfExtension.java b/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/util/bin/format/elf/extend/PowerPC_ElfExtension.java new file mode 100644 index 00000000..f16f1cb2 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/util/bin/format/elf/extend/PowerPC_ElfExtension.java @@ -0,0 +1,320 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.util.bin.format.elf.extend; + +import java.math.BigInteger; + +import ghidra.app.util.bin.format.MemoryLoadable; +import ghidra.app.util.bin.format.elf.*; +import ghidra.app.util.bin.format.elf.ElfDynamicType.ElfDynamicValueType; +import ghidra.program.database.function.OverlappingFunctionException; +import ghidra.program.disassemble.Disassembler; +import ghidra.program.model.address.*; +import ghidra.program.model.lang.*; +import ghidra.program.model.listing.*; +import ghidra.program.model.mem.*; +import ghidra.program.model.symbol.SourceType; +import ghidra.util.*; +import ghidra.util.exception.*; +import ghidra.util.task.TaskMonitor; + +public class PowerPC_ElfExtension extends ElfExtension { + + // Label prefix to be applied to the blrl instruction found within the .got + // and the name of the call-fixup to be applied if it has been defined by + // the compiler spec + public static String GOT_THUNK_NAME = "get_pc_thunk_lr"; + + private static int BLRL_INSTRUCTION = 0x4e800021; + + // Elf Dynamic Type Extensions + public static final ElfDynamicType DT_PPC_GOT = new ElfDynamicType(0x70000000, "DT_PPC_GOT", + "Specify the value of _GLOBAL_OFFSET_TABLE_", ElfDynamicValueType.ADDRESS); + public static final ElfDynamicType DT_PPC_OPT = new ElfDynamicType(0x70000001, "DT_PPC_OPT", + "Specify that tls descriptors should be optimized", ElfDynamicValueType.VALUE); + + // Program header (segment) flags + private static final int PF_PPC_VLE = 0x10000000; + + // Section header flags + private static final int SHF_PPC_VLE = 0x10000000; + + @Override + public boolean canHandle(ElfHeader elf) { + return elf.e_machine() == ElfConstants.EM_PPC && elf.is32Bit(); + } + + @Override + public boolean canHandle(ElfLoadHelper elfLoadHelper) { + Language language = elfLoadHelper.getProgram().getLanguage(); + return canHandle(elfLoadHelper.getElfHeader()) && + "PowerPC".equals(language.getProcessor().toString()) && + language.getLanguageDescription().getSize() == 32; + } + + @Override + public String getDataTypeSuffix() { + return "_PPC"; + } + + @Override + public void processElf(ElfLoadHelper elfLoadHelper, TaskMonitor monitor) + throws CancelledException { + + if (!canHandle(elfLoadHelper)) { + return; + } + + processPpcVleSections(elfLoadHelper, monitor); + } + + @Override + public void processGotPlt(ElfLoadHelper elfLoadHelper, TaskMonitor monitor) + throws CancelledException { + + processDynamicPpcGotEntry(elfLoadHelper); + + super.processGotPlt(elfLoadHelper, monitor); + + // check for blrl instruction at end of got sections + markupGotBLRL(elfLoadHelper, monitor); + } + + private void processDynamicPpcGotEntry(ElfLoadHelper elfLoadHelper) { + + ElfHeader elfHeader = elfLoadHelper.getElfHeader(); + + // Presence of DT_PPC_GOT signals old ABI + ElfDynamicTable dynamicTable = elfHeader.getDynamicTable(); + if (dynamicTable == null || !dynamicTable.containsDynamicValue(DT_PPC_GOT)) { + return; + } + + try { + Address gotAddr = + elfLoadHelper.getDefaultAddress(dynamicTable.getDynamicValue(DT_PPC_GOT)); + + Program program = elfLoadHelper.getProgram(); + Memory memory = program.getMemory(); + try { + // Update first got entry normally updated by link editor to refer to dynamic table + int dynamicOffset = + memory.getInt(gotAddr) + (int) elfLoadHelper.getImageBaseWordAdjustmentOffset(); + memory.setInt(gotAddr, dynamicOffset); + } + catch (MemoryAccessException e) { + elfLoadHelper.log(e); + } + } + catch (NotFoundException e) { + throw new AssertException(e); + } + } + + /** + * Determine if program's cspec has defined the get_pc_thunk_lr call-fixup + * @param program + * @return true if get_pc_thunk_lr call-fixup is defined + */ + private boolean gotThunkCallFixupExists(Program program) { + for (String fixupName : program.getCompilerSpec().getPcodeInjectLibrary().getCallFixupNames()) { + if (GOT_THUNK_NAME.equals(fixupName)) { + return true; + } + } + return false; + } + + /** + * Identify presence of blrl instruction within .got section with execute permission. + * The instruction will be disassembled and transformed into a get_pc_thunk_lr function + * with an applied call-fixup. + * @param elfLoadHelper + * @param monitor + * @throws CancelledException + */ + private void markupGotBLRL(ElfLoadHelper elfLoadHelper, TaskMonitor monitor) + throws CancelledException { + + Program program = elfLoadHelper.getProgram(); + Memory memory = program.getMemory(); + Listing listing = program.getListing(); + + boolean applyCallFixup = gotThunkCallFixupExists(program); + + Disassembler disassembler = Disassembler.getDisassembler(program, monitor, null); + + MemoryBlock[] blocks = memory.getBlocks(); + + for (MemoryBlock block : blocks) { + monitor.checkCanceled(); + + MemoryBlock gotBlock = block; + + if (!gotBlock.getName().startsWith(ElfSectionHeaderConstants.dot_got) || + !gotBlock.isExecute()) { + continue; + } + + Address blrlAddr = findBLRL(gotBlock, memory.isBigEndian()); + if (blrlAddr == null) { + continue; + } + + listing.clearCodeUnits(blrlAddr, gotBlock.getEnd(), false); + + Address blrlEndAddr = blrlAddr.add(3); + AddressSet range = new AddressSet(blrlAddr, blrlEndAddr); + + disassembler.disassemble(blrlAddr, range); + + try { + Instruction blrlInstr = listing.getInstructionAt(blrlAddr); + if (blrlInstr == null) { + elfLoadHelper.log( + "Failed to generate blrl instruction within " + gotBlock.getName()); + continue; + } + + blrlInstr.setFlowOverride(FlowOverride.RETURN); + + Function f = listing.createFunction(GOT_THUNK_NAME + gotBlock.getName(), blrlAddr, + range, SourceType.IMPORTED); + if (applyCallFixup) { + f.setCallFixup(GOT_THUNK_NAME); + } + + } + catch (InvalidInputException | OverlappingFunctionException e) { + // should not happen + } + + } + } + + /** + * Check for trailing BLRL instruction at end of GOT block. + * Searching from the end of the .got any non-zero entry will + * cause the search to end within that .got + * @param block + * @param bigEndian + * @return address of BLRL bytes or null if not found + */ + private Address findBLRL(MemoryBlock block, boolean bigEndian) { + + DataConverter conv = + bigEndian ? BigEndianDataConverter.INSTANCE : LittleEndianDataConverter.INSTANCE; + + Address start = block.getStart(); + Address addr = block.getEnd(); + byte[] bytes = new byte[4]; + + addr = addr.getNewAddress(addr.getOffset() & ~0x3); + try { + while (addr.compareTo(start) > 0) { + if (block.getBytes(addr, bytes) == 4) { + int val = conv.getInt(bytes); + if (val == BLRL_INSTRUCTION) { + return addr; + } + if (val != 0) { + return null; + } + } + addr = addr.subtractNoWrap(4); + } + } + catch (MemoryAccessException | AddressOverflowException e) { + // ignore + } + return null; + } + + /** + * Identify PowerPC VLE sections and set the 'vle' context bit to enable + * proper code disassembly. + * @param elfLoadHelper Elf load helper object + * @param monitor task monitor + * @throws CancelledException + */ + private void processPpcVleSections(ElfLoadHelper elfLoadHelper, TaskMonitor monitor) + throws CancelledException { + + // TODO: Check language ID for VLE ":VLE" + + Program program = elfLoadHelper.getProgram(); + LanguageID langID = program.getLanguageID(); + if (langID.toString().indexOf(":VLE") < 0) { + return; // non VLE variant + } + + Register vleContextReg = program.getRegister("vle"); + if (vleContextReg == null || !vleContextReg.isProcessorContext()) { + elfLoadHelper.log("ERROR: failed to locate 'vle' context register field"); + return; + } + + monitor.setMessage("Checking for VLE sections..."); + + RegisterValue enableVLE = new RegisterValue(vleContextReg, BigInteger.ONE); + + ElfHeader elf = elfLoadHelper.getElfHeader(); + if (elf.e_shnum() != 0) { + // Rely on section headers if present + for (ElfSectionHeader section : elf.getSections( + ElfSectionHeaderConstants.SHT_PROGBITS)) { + monitor.checkCanceled(); + if ((section.getFlags() & SHF_PPC_VLE) == 0) { + continue; + } + enableVLE(section, enableVLE, elfLoadHelper); + } + } + else { + for (ElfProgramHeader segment : elf.getProgramHeaders( + ElfProgramHeaderConstants.PT_LOAD)) { + monitor.checkCanceled(); + if ((segment.getFlags() & PF_PPC_VLE) == 0) { + continue; + } + enableVLE(segment, enableVLE, elfLoadHelper); + } + } + } + + private void enableVLE(MemoryLoadable header, RegisterValue enableVLE, + ElfLoadHelper elfLoadHelper) { + Address loadAddress = elfLoadHelper.findLoadAddress(header, 0); + if (loadAddress == null) { + elfLoadHelper.log("Failed to locate VLE load section/segment"); + return; + } + Program program = elfLoadHelper.getProgram(); + MemoryBlock block = program.getMemory().getBlock(loadAddress); + if (block != null) { + elfLoadHelper.log("Marked block " + block.getName() + " as VLE"); + try { + program.getProgramContext().setRegisterValue(block.getStart(), block.getEnd(), + enableVLE); + } + catch (ContextChangeException e) { + elfLoadHelper.log( + "ERROR: failed to set 'vle' context due to conflict: " + e.getMessage()); + } + } + } + +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/util/bin/format/elf/relocation/PowerPC64_ElfRelocationConstants.java b/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/util/bin/format/elf/relocation/PowerPC64_ElfRelocationConstants.java new file mode 100644 index 00000000..5e80f900 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/util/bin/format/elf/relocation/PowerPC64_ElfRelocationConstants.java @@ -0,0 +1,135 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.util.bin.format.elf.relocation; + +public class PowerPC64_ElfRelocationConstants { + + public static final int R_PPC64_NONE = 0; + public static final int R_PPC64_ADDR32 = 1; // word32* S + A + public static final int R_PPC64_ADDR24 = 2; // low24* (S + A) >> 2 + public static final int R_PPC64_ADDR16 = 3; // half16* S + A + public static final int R_PPC64_ADDR16_LO = 4; // half16 #lo(S + A) + public static final int R_PPC64_ADDR16_HI = 5; // half16 #hi(S + A) + public static final int R_PPC64_ADDR16_HA = 6; // half16 #ha(S + A) + public static final int R_PPC64_ADDR14 = 7; // low14* (S + A) >> 2 + public static final int R_PPC64_ADDR14_BRTAKEN = 8; // low14* (S + A) >> 2 + public static final int R_PPC64_ADDR14_BRNTAKEN = 9; // low14* (S + A) >> 2 + public static final int R_PPC64_REL24 = 10; // low24* (S + A - P) >> 2 + public static final int R_PPC64_REL14 = 11; // low14* (S + A - P) >> 2 + public static final int R_PPC64_REL14_BRTAKEN = 12; // low14* (S + A - P) >> 2 + public static final int R_PPC64_REL14_BRNTAKEN = 13; // low14* (S + A - P) >> 2 + public static final int R_PPC64_GOT16 = 14; // half16* G + public static final int R_PPC64_GOT16_LO = 15; // half16 #lo(G) + public static final int R_PPC64_GOT16_HI = 16; // half16 #hi(G) + public static final int R_PPC64_GOT16_HA = 17; // half16 #ha(G) + public static final int R_PPC64_COPY = 19; + public static final int R_PPC64_GLOB_DAT = 20; // doubleword64 S + A + public static final int R_PPC64_JMP_SLOT = 21; // none see below + public static final int R_PPC64_RELATIVE = 22; // doubleword64 B + A + public static final int R_PPC64_UADDR32 = 24; // word32* S + A + public static final int R_PPC64_UADDR16 = 25; // half16* S + A + public static final int R_PPC64_REL32 = 26; // word32* S + A - P + public static final int R_PPC64_PLT32 = 27; // word32* L + public static final int R_PPC64_PLTREL32 = 28; // word32* L - P + public static final int R_PPC64_PLT16_LO = 29; // half16 #lo(L) + public static final int R_PPC64_PLT16_HI = 30; // half16 #hi(L) + public static final int R_PPC64_PLT16_HA = 31; // half16 #ha(L) + public static final int R_PPC64_SECTOFF = 33; // half16* R + A + public static final int R_PPC64_SECTOFF_LO = 34; // half16 #lo(R + A) + public static final int R_PPC64_SECTOFF_HI = 35; // half16 #hi(R + A) + public static final int R_PPC64_SECTOFF_HA = 36; // half16 #ha(R + A) + public static final int R_PPC64_ADDR30 = 37; // word30 (S + A - P) >> 2 + public static final int R_PPC64_ADDR64 = 38; // doubleword64 S + A + public static final int R_PPC64_ADDR16_HIGHER = 39; // half16 #higher(S + A) + public static final int R_PPC64_ADDR16_HIGHERA = 40; // half16 #highera(S + A) + public static final int R_PPC64_ADDR16_HIGHEST = 41; // half16 #highest(S + A) + public static final int R_PPC64_ADDR16_HIGHESTA = 42; // half16 #highesta(S + A) + public static final int R_PPC64_UADDR64 = 43; // doubleword64 S + A + public static final int R_PPC64_REL64 = 44; // doubleword64 S + A - P + public static final int R_PPC64_PLT64 = 45; // doubleword64 L + public static final int R_PPC64_PLTREL64 = 46; // doubleword64 L - P + public static final int R_PPC64_TOC16 = 47; // half16* S + A - .TOC. + public static final int R_PPC64_TOC16_LO = 48; // half16 #lo(S + A - .TOC.) + public static final int R_PPC64_TOC16_HI = 49; // half16 #hi(S + A - .TOC.) + public static final int R_PPC64_TOC16_HA = 50; // half16 #ha(S + A - .TOC.) + public static final int R_PPC64_TOC = 51; // doubleword64 .TOC. + public static final int R_PPC64_PLTGOT16 = 52; // half16* M + public static final int R_PPC64_PLTGOT16_LO = 53; // half16 #lo(M) + public static final int R_PPC64_PLTGOT16_HI = 54; // half16 #hi(M) + public static final int R_PPC64_PLTGOT16_HA = 55; // half16 #ha(M) + public static final int R_PPC64_ADDR16_DS = 56; // half16ds* (S + A) >> 2 + public static final int R_PPC64_ADDR16_LO_DS = 57; // half16ds #lo(S + A) >> 2 + public static final int R_PPC64_GOT16_DS = 58; // half16ds* G >> 2 + public static final int R_PPC64_GOT16_LO_DS = 59; // half16ds #lo(G) >> 2 + public static final int R_PPC64_PLT16_LO_DS = 60; // half16ds #lo(L) >> 2 + public static final int R_PPC64_SECTOFF_DS = 61; // half16ds* (R + A) >> 2 + public static final int R_PPC64_SECTOFF_LO_DS = 62; // half16ds #lo(R + A) >> 2 + public static final int R_PPC64_TOC16_DS = 63; // half16ds* (S + A - .TOC.) >> 2 + public static final int R_PPC64_TOC16_LO_DS = 64; // half16ds #lo(S + A - .TOC.) >> 2 + public static final int R_PPC64_PLTGOT16_DS = 65; // half16ds* M >> 2 + public static final int R_PPC64_PLTGOT16_LO_DS = 66; // half16ds #lo(M) >> 2 + public static final int R_PPC64_TLS = 67; + public static final int R_PPC64_DTPMOD64 = 68; // doubleword64 @dtpmod + public static final int R_PPC64_TPREL16 = 69; // half16* @tprel + public static final int R_PPC64_TPREL16_LO = 60; // half16 #lo(@tprel) + public static final int R_PPC64_TPREL16_HI = 71; // half16 #hi(@tprel) + public static final int R_PPC64_TPREL16_HA = 72; // half16 #ha(@tprel) + public static final int R_PPC64_TPREL64 = 73; // doubleword64 @tprel + public static final int R_PPC64_DTPREL16 = 74; // half16* @dtprel + public static final int R_PPC64_DTPREL16_LO = 75; // half16 #lo(@dtprel) + public static final int R_PPC64_DTPREL16_HI = 76; // half16 #hi(@dtprel) + public static final int R_PPC64_DTPREL16_HA = 77; // half16 #ha(@dtprel) + public static final int R_PPC64_DTPREL64 = 78; // doubleword64 @dtprel + public static final int R_PPC64_GOT_TLSGD16 = 79; // half16* @got@tlsgd + public static final int R_PPC64_GOT_TLSGD16_LO = 80; // half16 #lo(@got@tlsgd) + public static final int R_PPC64_GOT_TLSGD16_HI = 81; // half16 #hi(@got@tlsgd) + public static final int R_PPC64_GOT_TLSGD16_HA = 82; // half16 #ha(@got@tlsgd) + public static final int R_PPC64_GOT_TLSLD16 = 83; // half16* @got@tlsld + public static final int R_PPC64_GOT_TLSLD16_LO = 84; // half16 #lo(@got@tlsld) + public static final int R_PPC64_GOT_TLSLD16_HI = 85; // half16 #hi(@got@tlsld) + public static final int R_PPC64_GOT_TLSLD16_HA = 86; // half16 #ha(@got@tlsld) + public static final int R_PPC64_GOT_TPREL16_DS = 87; // half16ds* @got@tprel + public static final int R_PPC64_GOT_TPREL16_LO_DS = 88; // half16ds #lo(@got@tprel) + public static final int R_PPC64_GOT_TPREL16_HI = 89; // half16 #hi(@got@tprel) + public static final int R_PPC64_GOT_TPREL16_HA = 90; // half16 #ha(@got@tprel) + public static final int R_PPC64_GOT_DTPREL16_DS = 91; // half16ds* @got@dtprel + public static final int R_PPC64_GOT_DTPREL16_LO_DS = 92;// half16ds #lo(@got@dtprel) + public static final int R_PPC64_GOT_DTPREL16_HI = 93; // half16 #hi(@got@dtprel) + public static final int R_PPC64_GOT_DTPREL16_HA = 94; // half16 #ha(@got@dtprel) + public static final int R_PPC64_TPREL16_DS = 95; // half16ds* @tprel + public static final int R_PPC64_TPREL16_LO_DS = 96; // half16ds #lo(@tprel) + public static final int R_PPC64_TPREL16_HIGHER = 97; // half16 #higher(@tprel) + public static final int R_PPC64_TPREL16_HIGHERA = 98; // half16 #highera(@tprel) + public static final int R_PPC64_TPREL16_HIGHEST = 99; // half16 #highest(@tprel) + public static final int R_PPC64_TPREL16_HIGHESTA = 100; // half16 #highesta(@tprel) + public static final int R_PPC64_DTPREL16_DS = 101; // half16ds* @dtprel + public static final int R_PPC64_DTPREL16_LO_DS = 102; // half16ds #lo(@dtprel) + public static final int R_PPC64_DTPREL16_HIGHER = 103; // half16 #higher(@dtprel) + public static final int R_PPC64_DTPREL16_HIGHERA = 104; // half16 #highera(@dtprel) + public static final int R_PPC64_DTPREL16_HIGHEST = 105; // half16 #highest(@dtprel) + public static final int R_PPC64_DTPREL16_HIGHESTA = 106; // half16 #highesta(@dtprel) + + // Masks for manipulating Power PC relocation targets + public static final int PPC64_WORD32 = 0xFFFFFFFF; + public static final int PPC64_WORD30 = 0xFFFFFFFC; + public static final int PPC64_LOW24 = 0x03FFFFFC; + public static final int PPC64_LOW14 = 0x0020FFFC; + public static final int PPC64_HALF16 = 0xFFFF; + + private PowerPC64_ElfRelocationConstants() { + // no construct + } +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/util/bin/format/elf/relocation/PowerPC64_ElfRelocationHandler.java b/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/util/bin/format/elf/relocation/PowerPC64_ElfRelocationHandler.java new file mode 100644 index 00000000..aff18018 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/util/bin/format/elf/relocation/PowerPC64_ElfRelocationHandler.java @@ -0,0 +1,277 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.util.bin.format.elf.relocation; + +import ghidra.app.util.bin.format.elf.*; +import ghidra.app.util.bin.format.elf.extend.PowerPC64_ElfExtension; +import ghidra.app.util.importer.MessageLog; +import ghidra.program.model.address.Address; +import ghidra.program.model.lang.Language; +import ghidra.program.model.listing.Program; +import ghidra.program.model.mem.*; +import ghidra.program.model.symbol.Symbol; +import ghidra.program.model.symbol.SymbolUtilities; +import ghidra.util.*; +import ghidra.util.exception.NotFoundException; + +public class PowerPC64_ElfRelocationHandler extends ElfRelocationHandler { + + @Override + public boolean canRelocate(ElfHeader elf) { + return elf.e_machine() == ElfConstants.EM_PPC64 && elf.is64Bit(); + } + + @Override + public void relocate(ElfRelocationContext elfRelocationContext, ElfRelocation relocation, + Address relocationAddress) throws MemoryAccessException, NotFoundException { + + ElfHeader elf = elfRelocationContext.getElfHeader(); + if (elf.e_machine() != ElfConstants.EM_PPC64 || !elf.is64Bit()) { + return; + } + + Program program = elfRelocationContext.getProgram(); + Memory memory = program.getMemory(); + + int type = relocation.getType(); + if (type == PowerPC64_ElfRelocationConstants.R_PPC64_NONE) { + return; + } + int symbolIndex = relocation.getSymbolIndex(); + + Language language = elfRelocationContext.getProgram().getLanguage(); + if (!"PowerPC".equals(language.getProcessor().toString()) || + language.getLanguageDescription().getSize() != 64) { + markAsError(program, relocationAddress, Long.toString(type), null, + "Unsupported language for 64-bit PowerPC relocation", + elfRelocationContext.getLog()); + } + + // NOTE: Based upon glibc source it appears that PowerPC only uses RELA relocations + long addend = relocation.getAddend(); + + long offset = relocationAddress.getOffset(); + + ElfSymbol sym = elfRelocationContext.getSymbol(symbolIndex); + String symbolName = sym.getNameAsString(); + Address symbolAddr = elfRelocationContext.getSymbolAddress(sym); + long symbolValue = elfRelocationContext.getSymbolValue(sym); + + int oldValue = memory.getInt(relocationAddress); + int newValue = 0; + + // IMPORTANT NOTE: + // Handling of Object modules (*.o) is currently problematic since relocations + // which are fixing-up function references can refer to the TOC or OPD tables + // since function call stubs are not added until a full-link is performed. + // This can result in the code improperly flowing into these function + // linkage tables. Relocation R_PPC64_REL24 has been changed to attempt + // a work-around for local function call made via .opd entries. Care must + // be taken not to do this for relocation types used within call stub code. + + // Obtain TOC base used by certain relocations + long toc = 0; + switch (type) { + case PowerPC64_ElfRelocationConstants.R_PPC64_TOC16_LO: + case PowerPC64_ElfRelocationConstants.R_PPC64_TOC16_HI: + case PowerPC64_ElfRelocationConstants.R_PPC64_TOC16_HA: + case PowerPC64_ElfRelocationConstants.R_PPC64_TOC16_LO_DS: + case PowerPC64_ElfRelocationConstants.R_PPC64_TOC: + + MessageLog log = elfRelocationContext.getLog(); + Symbol tocBaseSym = SymbolUtilities.getLabelOrFunctionSymbol(program, + PowerPC64_ElfExtension.TOC_BASE, err -> log.error("PPC_ELF", err)); + if (tocBaseSym == null) { + markAsError(program, relocationAddress, type, symbolName, + "TOC_BASE unknown", log); + return; + } + toc = tocBaseSym.getAddress().getOffset(); + break; + default: + } + + switch (type) { + case PowerPC64_ElfRelocationConstants.R_PPC64_COPY: + markAsWarning(program, relocationAddress, "R_PPC64_COPY", symbolName, + symbolIndex, "Runtime copy not supported", elfRelocationContext.getLog()); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_ADDR32: + newValue = (int) (symbolValue + addend); + memory.setInt(relocationAddress, newValue); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_ADDR24: + newValue = (int) ((symbolValue + addend) >> 2); + newValue = + (oldValue & ~PowerPC64_ElfRelocationConstants.PPC64_LOW24) | (newValue << 2); + memory.setInt(relocationAddress, newValue); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_ADDR16: + newValue = (int) (symbolValue + addend); + memory.setShort(relocationAddress, (short) newValue); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_ADDR16_LO: + newValue = (int) (symbolValue + addend); + memory.setShort(relocationAddress, (short) newValue); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_TOC16_LO: + newValue = (int) (symbolValue + addend - toc); + memory.setShort(relocationAddress, (short) newValue); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_TOC16_LO_DS: + newValue = (int) ((symbolValue + addend - toc) >> 2); + newValue = ((oldValue >>> 16) & 0x3) | (newValue << 2); + memory.setShort(relocationAddress, (short) newValue); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_ADDR16_HI: + newValue = (int) (symbolValue + addend); + newValue = ((newValue >> 16) & 0xFFFF); + memory.setShort(relocationAddress, (short) newValue); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_TOC16_HI: + newValue = (int) (symbolValue + addend - toc); + newValue = ((newValue >> 16) & 0xFFFF); + memory.setShort(relocationAddress, (short) newValue); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_ADDR16_HA: + newValue = (int) (symbolValue + addend); + newValue = ((newValue >> 16) + (((newValue & 0x8000) != 0) ? 1 : 0)); + memory.setShort(relocationAddress, (short) newValue); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_TOC16_HA: + newValue = (int) (symbolValue + addend - toc); + newValue = ((newValue >> 16) + (((newValue & 0x8000) != 0) ? 1 : 0)); + memory.setShort(relocationAddress, (short) newValue); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_ADDR14: + case PowerPC64_ElfRelocationConstants.R_PPC64_ADDR14_BRTAKEN: + case PowerPC64_ElfRelocationConstants.R_PPC64_ADDR14_BRNTAKEN: + newValue = (int) ((symbolValue + addend) >> 2); + newValue = (oldValue & ~PowerPC64_ElfRelocationConstants.PPC64_LOW14) | + ((newValue << 2) & PowerPC64_ElfRelocationConstants.PPC64_LOW24); + memory.setInt(relocationAddress, newValue); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_REL24: + + // attempt to handle Object module case where referenced symbol resides within .opd + symbolValue = fixupOPDSymbolValue(elfRelocationContext, sym); + + newValue = (int) ((symbolValue + addend - offset) >> 2); + newValue = ((newValue << 2) & PowerPC64_ElfRelocationConstants.PPC64_LOW24); + newValue = (oldValue & ~PowerPC64_ElfRelocationConstants.PPC64_LOW24) | newValue; + memory.setInt(relocationAddress, newValue); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_RELATIVE: + long value64 = elfRelocationContext.getImageBaseWordAdjustmentOffset() + addend; + memory.setLong(relocationAddress, value64); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_REL32: + newValue = (int) (symbolValue + addend - offset); + memory.setInt(relocationAddress, newValue); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_REL14: + case PowerPC64_ElfRelocationConstants.R_PPC64_REL14_BRTAKEN: + case PowerPC64_ElfRelocationConstants.R_PPC64_REL14_BRNTAKEN: + newValue = (int) (symbolValue + addend - offset) >> 2; + newValue = (oldValue & ~PowerPC64_ElfRelocationConstants.PPC64_LOW14) | + ((newValue << 2) & PowerPC64_ElfRelocationConstants.PPC64_LOW14); + memory.setInt(relocationAddress, newValue); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_JMP_SLOT: + // TODO: do we need option to allow function descriptor + // use - or not? The EF_PPC64_ABI in e_flags is not reliable. + Address functionDescriptorAddr = relocationAddress.getNewAddress(symbolValue); + MemoryBlock block = memory.getBlock(functionDescriptorAddr); + if (block == null) { + throw new MemoryAccessException( + "Function descriptor not found at: " + functionDescriptorAddr); + } + if (MemoryBlock.EXTERNAL_BLOCK_NAME.equals(block.getName())) { + // If symbol is in EXTERNAL block, we don't have descriptor entry; + // just fill-in first slot with EXTERNAL address + memory.setLong(relocationAddress, symbolValue); + } + else { + // Copy function descriptor data + byte[] bytes = new byte[24]; // TODO: can descriptor size vary ? + memory.getBytes(functionDescriptorAddr, bytes); + memory.setBytes(relocationAddress, bytes); + } + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_UADDR32: + newValue = (int) (symbolValue + addend); + memory.setInt(relocationAddress, newValue); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_UADDR16: + newValue = (int) (symbolValue + addend); + memory.setShort(relocationAddress, (short) newValue); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_UADDR64: + case PowerPC64_ElfRelocationConstants.R_PPC64_ADDR64: + case PowerPC64_ElfRelocationConstants.R_PPC64_GLOB_DAT: + if (addend != 0 && isUnsupportedExternalRelocation(program, relocationAddress, + symbolAddr, symbolName, addend, elfRelocationContext.getLog())) { + addend = 0; // prefer bad fixup for EXTERNAL over really-bad fixup + } + value64 = symbolValue + addend; + memory.setLong(relocationAddress, value64); + break; + case PowerPC64_ElfRelocationConstants.R_PPC64_TOC: + memory.setLong(relocationAddress, toc); + break; + default: + markAsUnhandled(program, relocationAddress, type, symbolIndex, symbolName, + elfRelocationContext.getLog()); + break; + } + + } + + /** + * This method generates a symbol value with possible substitution for those + * symbols residing within the .opd to refer to the real function instead. + * Care must be taken not to invoke this method for relocations which may be + * applied to call stubs. It is also important that relocations have already + * been applied to the .opd section since we will be using its data for + * locating the real function. + * @param elfRelocationContext + * @param sym + * @return symbol value + * @throws MemoryAccessException + */ + private long fixupOPDSymbolValue(ElfRelocationContext elfRelocationContext, ElfSymbol sym) + throws MemoryAccessException { + Address addr = elfRelocationContext.getSymbolAddress(sym); + if (addr == null) { + return 0; + } + Program program = elfRelocationContext.getProgram(); + MemoryBlock block = program.getMemory().getBlock(addr); + if (block == null || !".opd".equals(block.getName())) { + return addr.getOffset(); + } + // .opd symbols will get moved to the real function by the extension (see processFunctionDescriptors) + // Call stubs should always use the .opd symbol value and not the function address so we can - this + // distinction can only be made using the relocation type. + byte[] bytes = new byte[8]; + block.getBytes(addr, bytes); + boolean bigEndian = elfRelocationContext.getElfHeader().isBigEndian(); + DataConverter dataConverter = + bigEndian ? BigEndianDataConverter.INSTANCE : LittleEndianDataConverter.INSTANCE; + return dataConverter.getLong(bytes); + } + +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/util/bin/format/elf/relocation/PowerPC_ElfRelocationConstants.java b/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/util/bin/format/elf/relocation/PowerPC_ElfRelocationConstants.java new file mode 100644 index 00000000..efdaedf5 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/util/bin/format/elf/relocation/PowerPC_ElfRelocationConstants.java @@ -0,0 +1,71 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.util.bin.format.elf.relocation; + +public class PowerPC_ElfRelocationConstants { + + public static final int R_PPC_NONE = 0; + public static final int R_PPC_ADDR32 = 1; // word32 S + A + public static final int R_PPC_ADDR24 = 2; // low24 (S + A) >> 2 + public static final int R_PPC_ADDR16 = 3; // half16 S + A + public static final int R_PPC_ADDR16_LO = 4; // half16 #lo(S + A) + public static final int R_PPC_ADDR16_HI = 5; // half16 #hi(S + A) + public static final int R_PPC_ADDR16_HA = 6; // half16 #ha(S + A) + public static final int R_PPC_ADDR14 = 7; // low14 (S + A) >> 2 + public static final int R_PPC_ADDR14_BRTAKEN = 8; // low14 (S + A) >> 2 + public static final int R_PPC_ADDR14_BRNTAKEN = 9; // low14 (S + A) >> 2 + public static final int R_PPC_REL24 = 10; // low24 (S + A - P) >> 2 + public static final int R_PPC_REL14 = 11; // low14 (S + A - P) >> 2 + public static final int R_PPC_REL14_BRTAKEN = 12; // low14 (S + A - P) >> + // 2 + public static final int R_PPC_REL14_BRNTAKEN = 13; // low14 (S + A - P) >> + // 2 + public static final int R_PPC_GOT16 = 14; // half16 G + A + public static final int R_PPC_GOT16_LO = 15; // half16 #lo(G + A) + public static final int R_PPC_GOT16_HI = 16; // half16 #hi(G + A) + public static final int R_PPC_GOT16_HA = 17; // half16 #ha(G + A) + public static final int R_PPC_PLTREL24 = 18; // low24 (L + A + P) >> 2 + public static final int R_PPC_COPY = 19; // none none + public static final int R_PPC_GLOB_DAT = 20; // word32 S + A + public static final int R_PPC_JMP_SLOT = 21; // Old ABI: word32 S + A, New ABI: generate branch instruction + public static final int R_PPC_RELATIVE = 22; // word32 S + A + public static final int R_PPC_LOCAL24PC = 23; // none + public static final int R_PPC_UADDR32 = 24; // low24 + public static final int R_PPC_UADDR16 = 25; // half16 S + A + public static final int R_PPC_REL32 = 26; // word32 S + A - P + public static final int R_PPC_PLT32 = 27; // word32 L + A + public static final int R_PPC_PLTREL32 = 28; // word32 L + A - P + public static final int R_PPC_PLT16_LO = 29; // half16 #lo(L + A) + public static final int R_PPC_PLT16_HI = 30; // half16 #hi(L + A) + public static final int R_PPC_PLT16_HA = 31; // half16 #ha(L + A) + public static final int R_PPC_SDAREL16 = 32; // half16 S + A - _SDA_BASE_ + public static final int R_PPC_SECTOFF = 33; // half16 R + A + public static final int R_PPC_SECTOFF_LO = 34; // half16 #lo(R + A) + public static final int R_PPC_SECTOFF_HI = 35; // half16 #hi(R + A) + public static final int R_PPC_SECTOFF_HA = 36; // half16 #ha(R + A) + public static final int R_PPC_ADDR30 = 37; // word30 (S + A - P) >> 2 + + // Masks for manipulating Power PC relocation targets + public static final int PPC_WORD32 = 0xFFFFFFFF; + public static final int PPC_WORD30 = 0xFFFFFFFC; + public static final int PPC_LOW24 = 0x03FFFFFC; + public static final int PPC_LOW14 = 0x0020FFFC; + public static final int PPC_HALF16 = 0xFFFF; + + private PowerPC_ElfRelocationConstants() { + // no construct + } +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/util/bin/format/elf/relocation/PowerPC_ElfRelocationHandler.java b/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/util/bin/format/elf/relocation/PowerPC_ElfRelocationHandler.java new file mode 100644 index 00000000..178b04d0 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/app/util/bin/format/elf/relocation/PowerPC_ElfRelocationHandler.java @@ -0,0 +1,215 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.app.util.bin.format.elf.relocation; + +import ghidra.app.util.bin.format.elf.*; +import ghidra.app.util.bin.format.elf.extend.PowerPC_ElfExtension; +import ghidra.program.model.address.Address; +import ghidra.program.model.lang.Language; +import ghidra.program.model.listing.Program; +import ghidra.program.model.mem.Memory; +import ghidra.program.model.mem.MemoryAccessException; +import ghidra.util.exception.NotFoundException; + +public class PowerPC_ElfRelocationHandler extends ElfRelocationHandler { + + @Override + public boolean canRelocate(ElfHeader elf) { + return elf.e_machine() == ElfConstants.EM_PPC && elf.is32Bit(); + } + + @Override + public void relocate(ElfRelocationContext elfRelocationContext, ElfRelocation relocation, + Address relocationAddress) throws MemoryAccessException, NotFoundException { + + ElfHeader elf = elfRelocationContext.getElfHeader(); + if (elf.e_machine() != ElfConstants.EM_PPC || !elf.is32Bit()) { + return; + } + + Program program = elfRelocationContext.getProgram(); + Memory memory = program.getMemory(); + + int type = relocation.getType(); + if (type == PowerPC_ElfRelocationConstants.R_PPC_NONE) { + return; + } + int symbolIndex = relocation.getSymbolIndex(); + + Language language = elfRelocationContext.getProgram().getLanguage(); + if (!"PowerPC".equals(language.getProcessor().toString()) || + language.getLanguageDescription().getSize() != 32) { + markAsError(program, relocationAddress, Long.toString(type), null, + "Unsupported language for 32-bit PowerPC relocation", + elfRelocationContext.getLog()); + } + + // NOTE: Based upon glibc source it appears that PowerPC only uses RELA relocations + int addend = (int) relocation.getAddend(); + + int offset = (int) relocationAddress.getOffset(); + + ElfSymbol sym = elfRelocationContext.getSymbol(symbolIndex); + +// if (sym.isLocal() && sym.getSectionHeaderIndex() != ElfSectionHeaderConstants.SHN_UNDEF) { +// +// // see glibc - sysdeps/powerpc/powerpc32/dl-machine.h elf_machine_rela +// +// // TODO: Unclear if this logic is complete. Need to find example where this is necessary. +// +// // Relocation addend already includes original symbol value but needs to account +// // for any image base adjustment +// symbolValue = (int) elfRelocationContext.getImageBaseWordAdjustmentOffset(); +// } +// else { + Address symbolAddr = (elfRelocationContext.getSymbolAddress(sym)); + int symbolValue = (int) elfRelocationContext.getSymbolValue(sym); +// } + String symbolName = sym.getNameAsString(); + + int oldValue = memory.getInt(relocationAddress); + int newValue = 0; + + switch (type) { + case PowerPC_ElfRelocationConstants.R_PPC_COPY: + markAsWarning(program, relocationAddress, "R_PPC_COPY", symbolName, + symbolIndex, "Runtime copy not supported", elfRelocationContext.getLog()); + break; + case PowerPC_ElfRelocationConstants.R_PPC_ADDR32: + case PowerPC_ElfRelocationConstants.R_PPC_UADDR32: + case PowerPC_ElfRelocationConstants.R_PPC_GLOB_DAT: + if (addend != 0 && isUnsupportedExternalRelocation(program, relocationAddress, + symbolAddr, symbolName, addend, elfRelocationContext.getLog())) { + addend = 0; // prefer bad fixup for EXTERNAL over really-bad fixup + } + newValue = symbolValue + addend; + memory.setInt(relocationAddress, newValue); + break; + case PowerPC_ElfRelocationConstants.R_PPC_ADDR24: + newValue = (symbolValue + addend) >> 2; + newValue = (oldValue & ~PowerPC_ElfRelocationConstants.PPC_LOW24) | (newValue << 2); + memory.setInt(relocationAddress, newValue); + break; + case PowerPC_ElfRelocationConstants.R_PPC_ADDR16: + case PowerPC_ElfRelocationConstants.R_PPC_UADDR16: + case PowerPC_ElfRelocationConstants.R_PPC_ADDR16_LO: + newValue = symbolValue + addend; + memory.setShort(relocationAddress, (short) newValue); + break; + case PowerPC_ElfRelocationConstants.R_PPC_ADDR16_HI: + newValue = (symbolValue + addend) >> 16; + memory.setShort(relocationAddress, (short) newValue); + break; + /** + * + R_POWERPC_ADDR16_HA: ((Symbol + Addend + 0x8000) >> 16) & 0xffff + static inline void addr16_ha(unsigned char* view, Address value) + { This::addr16_hi(view, value + 0x8000); } + + static inline void + addr16_hi(unsigned char* view, Address value) + { This::template rela<16,16>(view, 16, 0xffff, value + 0x8000, CHECK_NONE); } + + rela(unsigned char* view, + unsigned int right_shift, + typename elfcpp::Valtype_base::Valtype dst_mask, + Address value, + Overflow_check overflow) + { + typedef typename elfcpp::Swap::Valtype Valtype; + Valtype* wv = reinterpret_cast(view); + Valtype val = elfcpp::Swap::readval(wv); // original bytes + + Valtype reloc = value >> 16; + val &= ~0xffff; + reloc &= dst_mask; + elfcpp::Swap::writeval(wv, val | reloc); // write instr btes + return overflowed(value >> 16, overflow); + } + + + */ + case PowerPC_ElfRelocationConstants.R_PPC_ADDR16_HA: + newValue = (symbolValue + addend + 0x8000) >> 16; + memory.setShort(relocationAddress, (short) newValue); + break; + case PowerPC_ElfRelocationConstants.R_PPC_ADDR14: + case PowerPC_ElfRelocationConstants.R_PPC_ADDR14_BRTAKEN: + case PowerPC_ElfRelocationConstants.R_PPC_ADDR14_BRNTAKEN: + newValue = (symbolValue + addend) >> 2; + newValue = (oldValue & ~PowerPC_ElfRelocationConstants.PPC_LOW14) | + ((newValue << 2) & PowerPC_ElfRelocationConstants.PPC_LOW24); + memory.setInt(relocationAddress, newValue); + break; + case PowerPC_ElfRelocationConstants.R_PPC_REL24: + newValue = (symbolValue + addend - offset) >> 2; + newValue = ((newValue << 2) & PowerPC_ElfRelocationConstants.PPC_LOW24); + newValue = (oldValue & ~PowerPC_ElfRelocationConstants.PPC_LOW24) | newValue; + memory.setInt(relocationAddress, newValue); + break; + case PowerPC_ElfRelocationConstants.R_PPC_RELATIVE: + newValue = (int) elfRelocationContext.getImageBaseWordAdjustmentOffset() + addend; + memory.setInt(relocationAddress, newValue); + break; + case PowerPC_ElfRelocationConstants.R_PPC_REL32: + newValue = (symbolValue + addend - offset); + memory.setInt(relocationAddress, newValue); + break; + case PowerPC_ElfRelocationConstants.R_PPC_REL14: + case PowerPC_ElfRelocationConstants.R_PPC_REL14_BRTAKEN: + case PowerPC_ElfRelocationConstants.R_PPC_REL14_BRNTAKEN: + newValue = (symbolValue + addend - offset) >> 2; + newValue = (oldValue & ~PowerPC_ElfRelocationConstants.PPC_LOW14) | + ((newValue << 2) & PowerPC_ElfRelocationConstants.PPC_LOW14); + memory.setInt(relocationAddress, newValue); + break; + case PowerPC_ElfRelocationConstants.R_PPC_JMP_SLOT: + int value = symbolValue + addend; + ElfDynamicTable dynamicTable = elf.getDynamicTable(); + if (dynamicTable != null && + dynamicTable.containsDynamicValue(PowerPC_ElfExtension.DT_PPC_GOT)) { + // Old ABI - presence of dynamic entry DT_PPC_GOT used as indicator + memory.setInt(relocationAddress, value); + break; + } + int displacement = value - offset; + if ((displacement << 6 >> 6) == displacement) { + // inject branch relative instruction + newValue = 0x48000000 | (displacement & 0x3fffffc); + memory.setInt(relocationAddress, newValue); + } + else if ((value > 0 && value <= 0x1fffffc) || (value < 0 && value >= 0xfe000000)) { + // inject branch absolute instruction + newValue = 0x48000002 | (value & 0x3fffffc); + memory.setInt(relocationAddress, newValue); + } + else { + // TODO: Handle this case if needed - hopefully the EXTERNAL block is + // not too far away since a fabricated GOT would be in the same block + // and we may only have room in the plt for two instructions. + markAsUnhandled(program, relocationAddress, type, symbolIndex, symbolName, + elfRelocationContext.getLog()); + } + break; + default: + markAsUnhandled(program, relocationAddress, type, symbolIndex, symbolName, + elfRelocationContext.getLog()); + break; + } + + } + +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/program/emulation/PPCEmulateInstructionStateModifier.java b/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/program/emulation/PPCEmulateInstructionStateModifier.java new file mode 100644 index 00000000..4b6a7a3c --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/main/java/ghidra/program/emulation/PPCEmulateInstructionStateModifier.java @@ -0,0 +1,111 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.program.emulation; + +import java.math.BigInteger; + +import ghidra.pcode.emulate.Emulate; +import ghidra.pcode.emulate.EmulateInstructionStateModifier; +import ghidra.pcode.emulate.callother.CountLeadingZerosOpBehavior; +import ghidra.pcode.emulate.callother.OpBehaviorOther; +import ghidra.pcode.memstate.MemoryState; +import ghidra.pcodeCPort.error.LowlevelError; +import ghidra.program.model.pcode.Varnode; + +public class PPCEmulateInstructionStateModifier extends EmulateInstructionStateModifier { + + public PPCEmulateInstructionStateModifier(Emulate emu) { + super(emu); + + registerPcodeOpBehavior("countLeadingZeros", new CountLeadingZerosOpBehavior()); + registerPcodeOpBehavior("vectorPermute", new vectorPermuteOpBehavior()); + + } + + private class vectorPermuteOpBehavior implements OpBehaviorOther { + + @Override + public void evaluate(Emulate emu, Varnode out, Varnode[] inputs) { + + if (out == null) { + throw new LowlevelError("CALLOTHER: Vector permute op missing required output"); + } + + if (inputs.length != 4) { + throw new LowlevelError( + "CALLOTHER: Vector permute op requires three non-constant varnode input"); + } + for (int i = 1; i < 4; i++) { + if (inputs[i].getSize() == 0 || inputs[i].isConstant()) { + throw new LowlevelError( + "CALLOTHER: Vector permute op requires three non-constant varnode input"); + + } + } + + Varnode in1 = inputs[1]; + Varnode in2 = inputs[2]; + Varnode in3 = inputs[3]; + if ((in1.getSize() != 16) || (in2.getSize() != 16) || (in3.getSize() != 16) || + (out.getSize() != 16)) { + throw new LowlevelError( + "CALLOTHER: Vector permute op inputs/output must be 16bytes long"); + } + + MemoryState memoryState = emu.getMemoryState(); + + // Combine two 16-byte inputs to form single 32-byte input + BigInteger src = memoryState.getBigInteger(in1, false); + src = src.shiftLeft(128); + src = src.or(memoryState.getBigInteger(in2, false)); + byte[] srcin = getUnsignedValueArray(src.toByteArray(), 32); + + // Get 16-byte permute input + byte[] pin = memoryState.getBigInteger(in3, false).toByteArray(); + byte[] permute = getUnsignedValueArray(pin, 16); + + // Generate 16-byte output + byte[] outarray = new byte[16]; + for (int i = 0; i < 16; i++) { + outarray[i] = srcin[(permute[i] & 0x1f)]; + } + + memoryState.setValue(out, new BigInteger(outarray)); + } + } + + /** + * Generate an unsigned value array from variable length srcBytes extending or truncating + * bytes as needed to ensure a returned length of byteLength. The MSB is located + * at byte index 0, therefore adjustments may be needed to ensure that the LSB retains + * its position in the least-significant byte. A short srcBytes array will result in + * zero-filled most-significant bytes within the result. + * @param srcBytes unsigned source value array + * @param byteLength desired result value length in bytes + * @return a value byte array of the specified byteLength + */ + private static byte[] getUnsignedValueArray(byte[] srcBytes, int byteLength) { + if (srcBytes.length == byteLength) { + return srcBytes; + } + byte[] result = new byte[byteLength]; + int srcStartIndex = Math.max(0, srcBytes.length - byteLength); // discard excessive most-significant bytes + int copyCount = Math.min(byteLength, srcBytes.length); // limit copy to requested number of bytes + int destStartIndex = byteLength - copyCount; // adjust if too few bytes provided + System.arraycopy(srcBytes, srcStartIndex, result, destStartIndex, copyCount); + return result; + } +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPC64_O0_EmulatorTest.java b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPC64_O0_EmulatorTest.java new file mode 100644 index 00000000..8165ce7b --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPC64_O0_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class PPC64_O0_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "PowerPC:BE:64:default"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public PPC64_O0_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "powerpc64_GCC_O0"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(PPC64_O0_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPC64_O3_EmulatorTest.java b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPC64_O3_EmulatorTest.java new file mode 100644 index 00000000..4ab136fd --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPC64_O3_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class PPC64_O3_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "PowerPC:BE:64:default"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public PPC64_O3_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "powerpc64_GCC_O3"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(PPC64_O3_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCA2Alt_O0_EmulatorTest.java b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCA2Alt_O0_EmulatorTest.java new file mode 100644 index 00000000..8844daee --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCA2Alt_O0_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class PPCA2Alt_O0_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "PowerPC:BE:64:A2ALT-32addr"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public PPCA2Alt_O0_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ppcA2Alt_GCC_O0"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(PPCA2Alt_O0_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCA2Alt_O3_EmulatorTest.java b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCA2Alt_O3_EmulatorTest.java new file mode 100644 index 00000000..2aebeb2b --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCA2Alt_O3_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class PPCA2Alt_O3_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "PowerPC:BE:64:A2ALT-32addr"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public PPCA2Alt_O3_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ppcA2Alt_GCC_O3"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(PPCA2Alt_O3_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCA2_O0_EmulatorTest.java b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCA2_O0_EmulatorTest.java new file mode 100644 index 00000000..18084d51 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCA2_O0_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class PPCA2_O0_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "PowerPC:BE:64:A2-32addr"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public PPCA2_O0_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ppcA2_GCC_O0"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(PPCA2_O0_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCA2_O3_EmulatorTest.java b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCA2_O3_EmulatorTest.java new file mode 100644 index 00000000..0b340c7b --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCA2_O3_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class PPCA2_O3_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "PowerPC:BE:64:A2-32addr"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public PPCA2_O3_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ppcA2_GCC_O3"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(PPCA2_O3_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCP8Alt_O0_EmulatorTest.java b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCP8Alt_O0_EmulatorTest.java new file mode 100644 index 00000000..4d92e30d --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCP8Alt_O0_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class PPCP8Alt_O0_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "PowerPC:BE:64:A2ALT-32addr"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public PPCP8Alt_O0_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ppcP8Alt_GCC_O0"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(PPCP8Alt_O0_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCP8Alt_O3_EmulatorTest.java b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCP8Alt_O3_EmulatorTest.java new file mode 100644 index 00000000..3f9030b8 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCP8Alt_O3_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class PPCP8Alt_O3_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "PowerPC:BE:64:A2ALT-32addr"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public PPCP8Alt_O3_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ppcP8Alt_GCC_O3"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(PPCP8Alt_O3_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCP9Alt_O0_EmulatorTest.java b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCP9Alt_O0_EmulatorTest.java new file mode 100644 index 00000000..877c33aa --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCP9Alt_O0_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class PPCP9Alt_O0_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "PowerPC:BE:64:A2ALT-32addr"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public PPCP9Alt_O0_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ppcP9Alt_GCC_O0"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(PPCP9Alt_O0_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCP9Alt_O3_EmulatorTest.java b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCP9Alt_O3_EmulatorTest.java new file mode 100644 index 00000000..18b71394 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPCP9Alt_O3_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class PPCP9Alt_O3_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "PowerPC:BE:64:A2ALT-32addr"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public PPCP9Alt_O3_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "ppcP9Alt_GCC_O3"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(PPCP9Alt_O3_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPC_O0_EmulatorTest.java b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPC_O0_EmulatorTest.java new file mode 100644 index 00000000..8268bf84 --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPC_O0_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class PPC_O0_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "PowerPC:BE:32:default"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public PPC_O0_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "powerpc32_GCC_O0"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(PPC_O0_EmulatorTest.class); + } +} diff --git a/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPC_O3_EmulatorTest.java b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPC_O3_EmulatorTest.java new file mode 100644 index 00000000..e6c5784f --- /dev/null +++ b/src/third-party/sleigh/processors/PowerPC/src/test.processors/java/ghidra/test/processors/PPC_O3_EmulatorTest.java @@ -0,0 +1,40 @@ +/* ### + * IP: GHIDRA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package ghidra.test.processors; + +import ghidra.test.processors.support.ProcessorEmulatorTestAdapter; +import junit.framework.Test; + +public class PPC_O3_EmulatorTest extends ProcessorEmulatorTestAdapter { + + private static final String LANGUAGE_ID = "PowerPC:BE:32:default"; + private static final String COMPILER_SPEC_ID = "default"; + + private static final String[] REG_DUMP_SET = new String[] {}; + + public PPC_O3_EmulatorTest(String name) throws Exception { + super(name, LANGUAGE_ID, COMPILER_SPEC_ID, REG_DUMP_SET); + } + + @Override + protected String getProcessorDesignator() { + return "powerpc32_GCC_O3"; + } + + public static Test suite() { + return ProcessorEmulatorTestAdapter.buildEmulatorTestSuite(PPC_O3_EmulatorTest.class); + } +} diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index c13490e2..3ec83c33 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -8,11 +8,12 @@ if(PROJECT_IS_TOP_LEVEL) find_package(maat REQUIRED) enable_testing() endif() - +set(CMAKE_BUILD_TYPE Debug) # Unit tests add_executable(unit-tests unit-tests/test_all.cpp unit-tests/test_archEVM.cpp + unit-tests/test_archPPC64.cpp unit-tests/test_archX64.cpp unit-tests/test_archX86.cpp unit-tests/test_event.cpp diff --git a/tests/unit-tests/test_all.cpp b/tests/unit-tests/test_all.cpp index 4d85bea5..57501e5c 100644 --- a/tests/unit-tests/test_all.cpp +++ b/tests/unit-tests/test_all.cpp @@ -18,6 +18,7 @@ void test_solver(); void test_loader(); void test_serialization(); void test_archEVM(); +void test_archPPC64(); int main(int argc, char ** argv) @@ -52,6 +53,7 @@ int main(int argc, char ** argv) test_archX86(); test_archX64(); test_archEVM(); + test_archPPC64(); test_solver(); test_loader(); test_serialization(); @@ -92,6 +94,8 @@ int main(int argc, char ** argv) test_loader(); else if( !strcmp(argv[i], "serial")) test_serialization(); + else if( !strcmp(argv[i], "PPC64")) + test_archPPC64(); /* else if( !strcmp(argv[i], "ARM64")) test_archARM64(); diff --git a/tests/unit-tests/test_archPPC64.cpp b/tests/unit-tests/test_archPPC64.cpp new file mode 100644 index 00000000..85bab246 --- /dev/null +++ b/tests/unit-tests/test_archPPC64.cpp @@ -0,0 +1,881 @@ +/* +Commonwealth of Australia represented by the Department of Defence + +Produced by Nathan Do, Student Intern at DSTG (Defence Science and Technology Group) +*/ + +#include "maat/arch.hpp" +#include "maat/varcontext.hpp" +#include "maat/engine.hpp" +#include "maat/exception.hpp" +#include +#include +#include +#include + +using std::cout; +using std::endl; +using std::string; + +namespace test +{ +namespace archPPC64 +{ + using namespace maat; + // assert test if true + unsigned int _assert(bool val, const string& msg){ + if( !val){ + cout << "\nFail: " << msg << std::endl; + throw test_exception(); + } + return 1; + } + + // register check test + unsigned int simple_move() + { + string code; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + // write to mem map + sym.mem->map(0x1000,0x2000); + code = string("\x38\x80\x00\x03",4); // assembly code = li r4 0x03 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,1); + unsigned int ret_value =0; + ret_value += _assert(sym.cpu.ctx().get(PPC64::R4).as_uint() == 0x03, "R4 is not equal to 0x3"); + + return ret_value; + } + + unsigned int simple_branch() + { + string code; + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + code = string("\x39\x20\x00\x05",4); // li r9,0x05 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); // code.size() = 4 + sym.run_from(0x1000,1); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R9).as_uint()==0x05,"R9 is not equal to 0x05"); + + code = string("\x38\xa0\x00\x0a",4); // li r5, 0x0a + sym.mem->write_buffer(0x1004,(uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,2); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R5).as_uint()==0x0a,"R5 is not equal to 0x0A"); + + code = string("\x7c\x05\x48\x00",4); // cpmw r5,r9 + sym.mem->write_buffer(0x1008,(uint8_t*)code.c_str(), code.size()); + code = string("\x48\x00\x00\x08",4); //b skip 1 instruction? + sym.mem->write_buffer(0x100c,(uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,4); + ret_value += _assert(sym.cpu.ctx().get(PPC64::PC).as_uint()==0x01014,"PC didn't increment by 8"); + + code = string("\x38\x80\x00\x01",4); // li r4, 0x01 + sym.mem->write_buffer(0x1010,(uint8_t*)code.c_str(), code.size()); + code = string("\x38\xa0\x00\x03",4); //li r5, 0x02 + sym.mem->write_buffer(0x1014,(uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,5); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R4).as_uint()!=0x01,"R4 not equal to 2"); + + return ret_value; + } + + unsigned int disass_cmpw() + { + string code; + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + sym.cpu.ctx().set(PPC64::R5, exprcst(64, 0x1111)); + sym.cpu.ctx().set(PPC64::R9, exprcst(64, 0xf00001111)); + + + code = string("\x7c\x05\x48\x00",4); // cpmw r5,r9 + sym.mem->write_buffer(0x1000,(uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,1); + + ret_value += _assert(sym.cpu.ctx().get(PPC64::CR0).as_uint() == 0x2, "1: ArchPPC64: failed to disassembly and/or execute cmpw"); + + return ret_value; + } + + unsigned int simple_addition() + { + string code; + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + + code = string("\x39\x20\x00\x05",4); // li r9,0x05 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); // code.size() = 4 + sym.run_from(0x1000,1); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R9).as_uint()==0x05,"R9 is not equal to 0x05"); + + code = string("\x38\xa0\x00\x0a",4); // li r5, 0x0a + sym.mem->write_buffer(0x1004,(uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,2); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R5).as_uint()==0x0a,"R5 is not equal to 0x0A"); + + code = string("\x7c\x65\x4a\x14",4); // add r3 r5 r9 + sym.mem->write_buffer(0x1008,(uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,3); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R3).as_uint()==0x0f,"R3 is not equal to 0x0F"); + + return ret_value; + } + + unsigned int addition_16bits() + { + string code; + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + + code = string("\x39\x20\x00\x00",4); // li r9,0x00 ori r9, r9, 60000 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); // code.size() = 4 + sym.run_from(0x1000,1); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R9).as_uint()==0x0,"R9 is not equal to 0x0"); + + code = string("\x61\x29\xea\x60",4); // ori r9 r9 0xea60 + sym.mem->write_buffer(0x1004, (uint8_t*)code.c_str(), code.size()); // code.size() = 4 + sym.run_from(0x1000,2); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R9).as_uint()==0xea60,"R9 is not equal to 0xEA60"); + + code = string("\x38\xa0\x00\x00",4); // li r5, 0x0a + sym.mem->write_buffer(0x1008,(uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,3); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R5).as_uint()==0x0,"R5 is not equal to 0x0"); + + code = string("\x60\xa5\xea\x60",4); // ori r5 r5 0xea60 + sym.mem->write_buffer(0x100c,(uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,4); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R5).as_uint()==0xea60,"R5 is not equal to 0x0"); + + code = string("\x7c\x65\x4a\x15",4); // addc r3 r5 r9 + sym.mem->write_buffer(0x1010,(uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,5); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R3).as_uint()==0x1D4C0,"R3 is not equal to 0x1FE"); + + return ret_value; + } + + unsigned int addition_Test_Flags() + { + string code; + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + + sym.cpu.ctx().set(PPC64::R5, exprcst(64,0xffffffffabcd1234)); + sym.cpu.ctx().set(PPC64::R4, exprcst(64,0x7fffffffabcd1234)); + + code = string("\x7c\x64\x2c\x15",4); // addco. r3, r4, r5 + sym.mem->write_buffer(0x1000,(uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,1); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R3).as_uint() == 0x7fffffff579a2468, "1. Addition test flag failed to disassemble"); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R5).as_uint() == 0xffffffffabcd1234, "2. Addition test flag failed to disassemble"); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R4).as_uint() == 0x7fffffffabcd1234, "3. Addition test flag failed to disassemble"); + ret_value += _assert(sym.cpu.ctx().get(PPC64::CR0).as_uint() == 0x04, "4. Addition test flag failed to disassemble. Result is not positive."); + ret_value += _assert(sym.cpu.ctx().get(PPC64::XER_SO).as_uint() == 0x0, "5. Addition test flag failed to disassemble. Summary overflow flag set."); + ret_value += _assert(sym.cpu.ctx().get(PPC64::XER_OV).as_uint() == 0x0, "6. Addition test flag failed to disassemble. Overflow flag set"); + ret_value += _assert(sym.cpu.ctx().get(PPC64::XER_CA).as_uint() == 0x1, "7. Addition test flag failed to disassemble. Carry flag not Set"); + + sym.cpu.ctx().set(PPC64::R5, exprcst(64,0x7fffffffffffffff)); + sym.cpu.ctx().set(PPC64::R4, exprcst(64,0x1)); + + sym.run_from(0x1000,1); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R3).as_uint() == 0x8000000000000000, "8. Addition test flag failed to disassemble"); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R5).as_uint() == 0x7fffffffffffffff, "9. Addition test flag failed to disassemble"); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R4).as_uint() == 0x1, "10. Addition test flag failed to disassemble"); + ret_value += _assert(sym.cpu.ctx().get(PPC64::CR0).as_uint() == 9, "11. Addition test flag failed to disassemble. Result is not negative or SO bit not set."); + ret_value += _assert(sym.cpu.ctx().get(PPC64::XER_SO).as_uint() == 0x1, "12. Addition test flag failed to disassemble. Summary overflow flag not set."); + ret_value += _assert(sym.cpu.ctx().get(PPC64::XER_OV).as_uint() == 0x1, "13. Addition test flag failed to disassemble. Overflow flag not set"); + ret_value += _assert(sym.cpu.ctx().get(PPC64::XER_CA).as_uint() == 0x0, "14. Addition test flag failed to disassemble. Carry flag Set"); + + sym.cpu.ctx().set(PPC64::R5, exprcst(64,0x8000000000000000)); + sym.cpu.ctx().set(PPC64::R4, exprcst(64,0x8000000000000000)); + + sym.run_from(0x1000,1); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R3).as_uint() == 0, "15. Addition test flag failed to disassemble"); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R5).as_uint() == 0x8000000000000000, "16. Addition test flag failed to disassemble"); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R4).as_uint() == 0x8000000000000000, "17. Addition test flag failed to disassemble"); + ret_value += _assert(sym.cpu.ctx().get(PPC64::CR0).as_uint() == 3, "18. Addition test flag failed to disassemble. Result is not equal/zero or SO bit not set."); + ret_value += _assert(sym.cpu.ctx().get(PPC64::XER_SO).as_uint() == 0x1, "19. Addition test flag failed to disassemble. Summary overflow flag not set."); + ret_value += _assert(sym.cpu.ctx().get(PPC64::XER_OV).as_uint() == 0x1, "20. Addition test flag failed to disassemble. Overflow flag not set"); + ret_value += _assert(sym.cpu.ctx().get(PPC64::XER_CA).as_uint() == 0x1, "21. Addition test flag failed to disassemble. Carry flag not set"); + + return ret_value; + } + + unsigned int compare_default() + { + string code; + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + + code = string("\x39\x20\x00\x05",4); // li r9,0x05 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,1); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R9).as_uint()==0x05,"R9 is not equal to 0x05"); + + code = string("\x38\xa0\x00\x0a",4); // li r5, 0x0a + sym.mem->write_buffer(0x1004,(uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,2); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R5).as_uint()==0x0a,"R5 is not equal to 0x0A"); + + code = string("\x7c\x05\x48\x00",4); // cmpw r5,r9 + sym.mem->write_buffer(0x1008,(uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,3); + ret_value += _assert(sym.cpu.ctx().get(PPC64::CR0).as_uint()==0x04, "CR0 isn't set to 4 meaning r5 not greater than r9"); + ret_value += _assert(sym.cpu.ctx().get(PPC64::CR).as_uint()==1073741824, "CR0 isn't set in the right spot of in CR"); + + return ret_value; + } + + unsigned int storeword_loadword() + { + string code; + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0xffffff); + sym.mem->map(0x0,0); + sym.cpu.ctx().set(PPC64::R3, exprcst(64,0xDEADBEEF)); + sym.cpu.ctx().set(PPC64::R4, exprcst(64,0x110000)); + + code = string("\x90\x64\x00\x00",4); // stw r3,0(r4) + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + code = string("\x80\xa4\x00\x00",4); // lwz r5,0(r4) + sym.mem->write_buffer(0x1004, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,2); + ret_value += _assert((uint32_t)sym.mem->read(0x110000, 4).as_uint() == 0xDEADBEEF, "1: Failed to disassemble store and load instructions."); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R5).as_uint() == 0xDEADBEEF, "2: Failed to disassemble store and load instructions."); + + sym.cpu.ctx().set(PPC64::R3, exprcst(64,0x12345678DEADBEEF)); + sym.cpu.ctx().set(PPC64::R4, exprcst(64,0x110000)); + sym.cpu.ctx().set(PPC64::R5, exprcst(64,0)); + + sym.run_from(0x1000,2); + ret_value += _assert(sym.mem->read(0x110000, 4).as_uint() == 0xDEADBEEF, "3: Failed to disassemble store and load instructions."); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R5).as_uint() == 0xDEADBEEF, "4: Failed to disassemble store and load instructions."); + + sym.cpu.ctx().set(PPC64::R3, exprcst(64,0x12345678)); + sym.cpu.ctx().set(PPC64::R5, exprcst(64,0)); + + sym.cpu.ctx().set(PPC64::R3, exprcst(64,0xBADC0FFEE0DDF00D)); + code = string("\xf8\x64\x00\x00",4); // std r3,0(r4) + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + code = string("\xE8\xA4\x00\x00",4); // ld r5,0(r4) + sym.mem->write_buffer(0x1004, (uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,2); + + ret_value += _assert(sym.mem->read(0x110000, 8).as_uint() == 0xBADC0FFEE0DDF00D, "5: Failed to disassemble store and load instructions."); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R5).as_uint() == 0xBADC0FFEE0DDF00D, "6: Failed to disassemble store and load instructions."); + + return ret_value; + } + + unsigned int mullw_disass() + { + // mullw = Multiply Low Word + string code; + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0xffffff); + sym.mem->map(0x0,0); + + sym.cpu.ctx().set(PPC64::R5, exprcst(64,0x12345678)); + sym.cpu.ctx().set(PPC64::R4, exprcst(64,0x100000001)); + sym.cpu.ctx().set(PPC64::R3, exprcst(64,0)); + code = string("\x7c\x64\x29\xd6",4); // mullw r3, r4, r5 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,1); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R3).as_uint() == 0x12345678, "1: Failed to disassemble multiply low word."); + + + return ret_value; + } + + unsigned int compare_CR3() + { + string code; + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + + code = string("\x39\x20\x00\x05",4); // li r9,0x05 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,1); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R9).as_uint()==0x05,"R9 is not equal to 0x05"); + + code = string("\x38\xa0\x00\x05",4); // li r5, 0x05 + sym.mem->write_buffer(0x1004,(uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,2); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R5).as_uint()==0x05,"R5 is not equal to 0x0A"); + + code = string("\x7d\x85\x48\x00",4); // cmpw cr3, r5,r9 + sym.mem->write_buffer(0x1008,(uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,3); + ret_value += _assert(sym.cpu.ctx().get(PPC64::CR3).as_uint()==0x02, "CR3 isn't set to 1 meaning r5 not equal to r9"); + ret_value += _assert(sym.cpu.ctx().get(PPC64::CR).as_uint()==131072, "CR3 isn't set in the right spot of in CR"); + + return ret_value; + } + + unsigned int bge_branch() + { + string code; + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + + code = string("\x39\x20\x00\x05",4); // li r9,0x05 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,1); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R9).as_uint()==0x05,"R9 is not equal to 0x05"); + + code = string("\x38\xa0\x00\x0a",4); // li r5, 0x0a + sym.mem->write_buffer(0x1004,(uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,2); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R5).as_uint()==0x0a,"R5 is not equal to 0x0A"); + + code = string("\x7c\x05\x48\x00",4); // cmpw r5,r9 + sym.mem->write_buffer(0x1008,(uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,3); + + code = string("\x40\x80\x00\x08",4); // bge 8 + sym.mem->write_buffer(0x100c,(uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,4); + + code = string("\x38\x80\x00\x02",4); //li r4, 0x02 + sym.mem->write_buffer(0x1014,(uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,5); + //TODO ADD RET + + return ret_value; + } + + // UNFINISHED UNIT TEST + unsigned int for_loop() + { + string code; + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + sym.mem->map(0x0,0x1000); + + unsigned int test_reg_val = 0; + + code = string("\x39\x20\x00\x00",4); // li r9,0x0 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,1); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R9).as_uint()==0x00,"R9 is not equal to 0x0"); + + code = string("\x91\x3f\x00\x0c",4); // stw r9,12(r31) + sym.mem->write_buffer(0x1004, (uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,2); + + cout<<"before b:"<write_buffer(0x1008, (uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,3); + cout<<"after b:"<write_buffer(0x100c, (uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,4); + cout<<"what is in r9 offset 8: "<write_buffer(0x1010, (uint8_t*)code.c_str(), code.size()); // code.size() = 4 + sym.run_from(0x1000,5); + //test + test_reg_val; + // ret_value += _assert(sym.cpu.ctx().get(PPC64::R9).as_uint()==(test_reg_val+1),"R9 didn't get plus 1"); + + code = string("\x91\x3f\x00\x0c",4); // stw r9,8(r31) + sym.mem->write_buffer(0x1014, (uint8_t*)code.c_str(), code.size()); // code.size() = 4 + sym.run_from(0x1000,6); + + code = string("\x81\x3f\x00\x0c",4);// lwz r9,12(r31) + sym.mem->write_buffer(0x1018, (uint8_t*)code.c_str(), code.size()); // code.size() = 4 + sym.run_from(0x1000,7); + cout<<"what is in r9 offset 12: "<write_buffer(0x101c, (uint8_t*)code.c_str(), code.size()); // code.size() = 4 + sym.run_from(0x1000,8); + cout<<"what is in r9 offset 12: "<write_buffer(0x1020, (uint8_t*)code.c_str(), code.size()); // code.size() = 4 + sym.run_from(0x1000,9); + // cout<<"what is in r9 offset 12: "<write_buffer(0x1024, (uint8_t*)code.c_str(), code.size()); // code.size() = 4 + sym.run_from(0x1000,10); + cout<<"what is in r9 offset 12: "<write_buffer(0x1028, (uint8_t*)code.c_str(), code.size()); // code.size() = 4 + sym.run_from(0x1000,11); + cout<<"what is inside cmp register: "<write_buffer(0x102c, (uint8_t*)code.c_str(), code.size()); // code.size() = 4 + sym.run_from(0x1000,12); + + return ret_value; + } + + unsigned int disass_bne() + { + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + sym.mem->map(0x0,0x1000); + string code; + + code = string("\x40\x82\x00\x20", 4); // bne 0x20 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.mem->write_buffer(0x1020, (uint8_t*)string("\x38\x40\x10\x00", 4).c_str(), 4); // li r2,0x1000 + sym.mem->write_buffer(0x1004, (uint8_t*)string("\x38\x40\x10\x00", 4).c_str(), 4); // li r2,0x1000 + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,8)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1020, "1: ArchPPC64: failed to disassembly and/or execute BNE"); + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,4)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1020, "2: ArchPPC64: failed to disassembly and/or execute BNE"); + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,1)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1020, "3: ArchPPC64: failed to disassembly and/or execute BNE"); + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,2)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1004, "4: ArchPPC64: failed to disassembly and/or execute BNE"); + + return ret_value; + } + + unsigned int disass_ble() + { + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + sym.mem->map(0x0,0x1000); + string code; + + code = string("\x40\x81\x00\x20", 4); // ble 0x20 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.mem->write_buffer(0x1020, (uint8_t*)string("\x38\x40\x10\x00", 4).c_str(), 4); // li r2,0x1000 + sym.mem->write_buffer(0x1004, (uint8_t*)string("\x38\x40\x10\x00", 4).c_str(), 4); // li r2,0x1000 + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,2)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1020, "1: ArchPPC64: failed to disassembly and/or execute BLE"); + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,10)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1020, "2: ArchPPC64: failed to disassembly and/or execute BLE"); + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,8)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1020, "3: ArchPPC64: failed to disassembly and/or execute BLE"); + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,4)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1004, "4: ArchPPC64: failed to disassembly and/or execute BLE"); + + + return ret_value; + } + + unsigned int disass_blt() + { + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + sym.mem->map(0x0,0x1000); + string code; + + code = string("\x41\x80\x00\x20", 4); // blt 0x20 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.mem->write_buffer(0x1020, (uint8_t*)string("\x38\x40\x10\x00", 4).c_str(), 4); // li r2,0x1000 + sym.mem->write_buffer(0x1004, (uint8_t*)string("\x38\x40\x10\x00", 4).c_str(), 4); // li r2,0x1000 + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,8)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1020, "1: ArchPPC64: failed to disassembly and/or execute BLE"); + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,2)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1004, "2: ArchPPC64: failed to disassembly and/or execute BLE"); + + return ret_value; + } + + unsigned int disass_bge() + { + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + sym.mem->map(0x0,0x1000); + string code; + + code = string("\x40\x80\x00\x20", 4); // bge 0x12 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.mem->write_buffer(0x1020, (uint8_t*)string("\x38\x40\x10\x00", 4).c_str(), 4); // li r2,0x1000 + sym.mem->write_buffer(0x1004, (uint8_t*)string("\x38\x40\x10\x00", 4).c_str(), 4); // li r2,0x1000 + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,6)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1020, "1: ArchPPC64: failed to disassembly and/or execute BGE"); + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,4)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1020, "2: ArchPPC64: failed to disassembly and/or execute BGE"); + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,2)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1020, "3: ArchPPC64: failed to disassembly and/or execute BGE"); + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,9)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1004, "4: ArchPPC64: failed to disassembly and/or execute BGE"); + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,8)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1004, "5: ArchPPC64: failed to disassembly and/or execute BGE"); + + return ret_value; + } + + unsigned int disass_bgt() + { + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + sym.mem->map(0x0,0x1000); + string code; + + code = string("\x41\x81\x00\x20", 4); // bgt 0x20 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.mem->write_buffer(0x1020, (uint8_t*)string("\x38\x40\x10\x00", 4).c_str(), 4); // li r2,0x1000 + sym.mem->write_buffer(0x1004, (uint8_t*)string("\x38\x40\x10\x00", 4).c_str(), 4); // li r2,0x1000 + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,6)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1020, "1: ArchPPC64: failed to disassembly and/or execute BGT"); + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,4)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1020, "2: ArchPPC64: failed to disassembly and/or execute BGT"); + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,9)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1004, "3: ArchPPC64: failed to disassembly and/or execute BGT"); + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,8)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1004, "4: ArchPPC64: failed to disassembly and/or execute BGT"); + + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,2)); + sym.run_from(0x1000, 1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1004, "5: ArchPPC64: failed to disassembly and/or execute BGT"); + + return ret_value; + } + + unsigned int disass_cntlzw() + { + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + sym.mem->map(0x0,0x1000); + string code; + + sym.cpu.ctx().set(PPC64::R3, exprcst(64,0x1234)); + sym.cpu.ctx().set(PPC64::R5, exprcst(64,0x5)); + code = string("\x7c\x65\x00\x34", 4); // cntlzw r5,r3 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::R5).as_uint() == 19, "1: ArchPPC64: R5 not equal to 19"); + + + sym.cpu.ctx().set(PPC64::R8, exprcst(64,0x674321)); + sym.cpu.ctx().set(PPC64::R10, exprcst(64,0x5)); + code = string("\x7d\x0a\x00\x34", 4); // cntlzw r10,r8 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::R10).as_uint() == 9, "2: ArchPPC64: R10 not equal to 9"); + + sym.cpu.ctx().set(PPC64::R8, exprcst(64,0x0FFFFFFF00619920)); + sym.cpu.ctx().set(PPC64::R10, exprcst(64,0x5)); + code = string("\x7d\x0a\x00\x34", 4); // cntlzw r10,r8 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::R10).as_uint() == 9, "2: ArchPPC64: R10 not equal to 9"); + + sym.cpu.ctx().set(PPC64::R8, exprcst(64,0x1)); + sym.cpu.ctx().set(PPC64::CR0, exprcst(8,0x0)); + code = string("\x7d\x0a\x00\x35", 4); // cntlzw. r10,r8 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::R10).as_uint() == 31, "3: ArchPPC64: failed to disassembly and/or execute cntlzw"); + ret_value += _assert( sym.cpu.ctx().get(PPC64::CR0).as_uint() == 4, "3: ArchPPC64: failed to disassembly and/or execute cntlzw"); + + return ret_value; + } + + unsigned int disass_subf() + { + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + sym.mem->map(0x0,0x1000); + string code; + + sym.cpu.ctx().set(PPC64::R3, exprcst(64,10000)); + sym.cpu.ctx().set(PPC64::R4, exprcst(64,5000)); + code = string("\x7c\x44\x18\x50", 4); // subf r2, r4, r3 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::R2).as_uint() == 5000, "1: ArchPPC64: failed to disassembly and/or execute subf"); + + code = string("\x7c\x44\x18\x51", 4); // subf. r2, r4, r3 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::R2).as_uint() == 5000, "2: ArchPPC64: failed to disassembly and/or execute subf."); + ret_value += _assert( sym.cpu.ctx().get(PPC64::CR0).as_uint() == 4, "3: ArchPPC64: R3 is not greater than R4"); + + return ret_value; + } + + unsigned int disass_mulli() + { + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + sym.mem->map(0x0,0x1000); + string code; + + sym.cpu.ctx().set(PPC64::R4, exprcst(64,0x3000)); + code = string("\x1c\xc4\x00\x0a", 4); // mulli r6, r4, 10 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::R6).as_uint() == 0x1e000, "ArchPPC64: failed to disassembly and/or execute mulli"); + + return ret_value; + } + + unsigned int disass_mtspr() + { + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + sym.mem->map(0x0,0x1000); + string code; + + sym.cpu.ctx().set(PPC64::R5, exprcst(64,0x50)); + code = string("\x7c\xa8\x03\xa6", 4); // mtspr LR,r5 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.mem->write_buffer(0x1004, (uint8_t*)string("\x38\x40\x10\x00", 4).c_str(), 4); // li r2,0x1000 + sym.run_from(0x1000,2); + ret_value += _assert( sym.cpu.ctx().get(PPC64::LR).as_uint() == 0x50, "1: ArchPPC64: failed to disassembly and/or execute mtspr"); + ret_value += _assert( sym.cpu.ctx().get(PPC64::R2).as_uint() == 0x1000, "2: ArchPPC64: failed to disassembly and/or execute mtspr"); + + sym.cpu.ctx().set(PPC64::R0, exprcst(64,0x50)); + code = string("\x7c\x08\x03\xa6", 4); // mtspr LR,r0 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.mem->write_buffer(0x1004, (uint8_t*)string("\x38\x40\x11\x11", 4).c_str(), 4); // li r2,0x1111 + sym.run_from(0x1000,1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::LR).as_uint() == 0x50, "3: ArchPPC64: failed to disassembly and/or execute mtspr"); + + return ret_value; + } + + unsigned int test_r0() + { + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + sym.mem->map(0x0,0x1000); + string code; + + sym.cpu.ctx().set(PPC64::R7, exprcst(64,0x1234)); + sym.cpu.ctx().set(PPC64::R0, exprcst(64,0x1234)); + code = string("\x7c\xa7\x02\x14", 4); // add r5, r7, r0 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.mem->write_buffer(0x1004, (uint8_t*)string("\x38\x40\x10\x00", 4).c_str(), 4); // li r2,0x1000 + sym.run_from(0x1000,2); + ret_value += _assert( sym.cpu.ctx().get(PPC64::R5).as_uint() == 0x2468, "1: ArchPPC64: failed to disassembly and/or execute test_r0"); + ret_value += _assert( sym.cpu.ctx().get(PPC64::R2).as_uint() == 0x1000, "1: ArchPPC64: failed to disassembly and/or execute test_r0"); + + code = string("\x38\x60\x01\x00", 4); // addi r3, r0, 256 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.run_from(0x1000,2); + ret_value += _assert( sym.cpu.ctx().get(PPC64::R0).as_uint() == 0x1234,"2: ArchPPC64: failed to disassembly and/or execute test_r0"); + ret_value += _assert( sym.cpu.ctx().get(PPC64::R3).as_uint() == 0x100, "2: ArchPPC64: failed to disassembly and/or execute test_r0"); + ret_value += _assert( sym.cpu.ctx().get(PPC64::R2).as_uint() == 0x1000, "2: ArchPPC64: failed to disassembly and/or execute test_r0"); + + return ret_value; + } + + unsigned int disass_bl() + { + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + sym.mem->map(0x0,0x1000); + string code; + + code = string("\x48\x00\x00\x21", 4); // bl 0x20 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.mem->write_buffer(0x1020, (uint8_t*)string("\x38\x40\x10\x00", 4).c_str(), 4); // li r2,0x1000 + sym.run_from(0x1000,2); + ret_value += _assert( sym.cpu.ctx().get(PPC64::LR).as_uint() == 0x1004, "1: ArchPPC64: failed to disassemble and/or execute BL"); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1024, "2: ArchPPC64: failed to disassemble and/or execute BL"); + ret_value += _assert( sym.cpu.ctx().get(PPC64::R2).as_uint() == 0x1000, "3: ArchPPC64: failed to disassemble and/or execute BL"); + + return ret_value; + } + + unsigned int disass_bctr() + { + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + sym.mem->map(0x0,0x1000); + string code; + + code = string("\x4e\x80\x04\x20", 4); // bctr + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + sym.cpu.ctx().set(PPC64::CTR, exprcst(32,0x1500)); + sym.mem->write_buffer(0x1500, (uint8_t*)string("\x38\x40\x10\x00", 4).c_str(), 4); // li r2,0x1000 + sym.run_from(0x1000,2); + ret_value += _assert( sym.cpu.ctx().get(PPC64::R2).as_uint() == 0x1000,"1: ArchPPC64: failed to disassembly and/or execute BCTR"); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1504,"2: ArchPPC64: failed to disassembly and/or execute BCTR"); + ret_value += _assert( sym.cpu.ctx().get(PPC64::CTR).as_uint() == 0x1500,"3: ArchPPC64: failed to disassembly and/or execute BCTR"); + + return ret_value; + } + + unsigned int disass_bctrl() + { + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64); + sym.mem->map(0x1000,0x2000); + sym.mem->map(0x0,0x1000); + string code; + + code = string("\x4e\x80\x04\x21", 4); // bctrl + sym.mem->write_buffer(0x1200, (uint8_t*)code.c_str(), code.size()); + sym.cpu.ctx().set(PPC64::CTR, exprcst(64,0x1500)); + sym.mem->write_buffer(0x1500, (uint8_t*)string("\x38\x40\x10\x00", 4).c_str(), 4); // li r2,0x1000 + sym.run_from(0x1200,1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1500,"1: ArchPPC64: failed to disassembly and/or execute BCTRL"); + + sym.run_from(0x1200,2); + ret_value += _assert( sym.cpu.ctx().get(PPC64::R2).as_uint() == 0x1000,"2: ArchPPC64: failed to disassembly and/or execute BCTRL"); + ret_value += _assert( sym.cpu.ctx().get(PPC64::PC).as_uint() == 0x1504,"3: ArchPPC64: failed to disassembly and/or execute BCTRL"); + ret_value += _assert( sym.cpu.ctx().get(PPC64::CTR).as_uint() == 0x1500,"4: ArchPPC64: failed to disassembly and/or execute BCTRL"); + + return ret_value; + } + + unsigned int disass_lbz() + { + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64, maat::env::OS::LINUX); + sym.mem->map(0x1000,0x2000); + string code; + + sym.cpu.ctx().set(PPC64::R10, exprcst(64,0x1500)); + sym.cpu.ctx().set(PPC64::R9, exprcst(64,0x1234)); + sym.mem->write(0x1234,exprcst(64,0x1abc2def12345678)); + + code = string("\x89\x49\x00\x00", 4); // lbz r10,0x0(r9) + + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(), code.size()); + + sym.run_from(0x1000,1); + ret_value += _assert( sym.cpu.ctx().get(PPC64::R10).as_uint() == 0x1a, "1: ArchPPC64: failed to disassembly and/or execute lbz"); + ret_value += _assert( sym.cpu.ctx().get(PPC64::R9).as_uint() == 0x1234,"3: ArchPPC64: failed to disassembly and/or execute lbz"); + ret_value += _assert( sym.mem->read(0x1234,4).as_uint() == 0x1abc2def, "4: ArchPPC64: failed to disassembly and/or execute lbz"); + + return ret_value; + } + + unsigned int disass_extsw() + { + unsigned int ret_value = 0; + MaatEngine sym = MaatEngine(Arch::Type::PPC64, maat::env::OS::LINUX); + sym.mem->map(0x1000,0x2000); + string code; + + sym.cpu.ctx().set(PPC64::R9, exprcst(64,0x12345678abcdef12)); + code = string("\x7d\x29\x07\xb4",4 ); // extsw r9,r9 + sym.mem->write_buffer(0x1000, (uint8_t*)code.c_str(),code.size()); + + sym.run_from(0x1000,1); + ret_value += _assert(sym.cpu.ctx().get(PPC64::R9).as_uint() == 0xffffffffabcdef12, "1: ArchPPC64: Failed to disassemble extend signed word"); + return ret_value; + } + +}// Namespace PPC64 +}// Namespace Test +using namespace test::archPPC64; + +void test_archPPC64() { + unsigned int total = 0; + string green = "\033[1;32m"; + string def = "\033[0m"; + string bold = "\033[1m"; + + // Start testing + std::cout << bold << "[" << green << "+" + << def << bold << "]" << def << std::left << std::setw(34) + << " Testing Arch PPC64 support... " << std::flush; + + /* + commented out code so its faster to run, + you can make it faster if you pass a + reference to an already existing maatEngine like you did in your previous test casses + */ + total += simple_move(); + total += simple_branch(); + total += disass_cmpw(); + total += simple_addition(); + total += addition_16bits(); + total += addition_Test_Flags(); + total += storeword_loadword(); + total += mullw_disass(); + total += compare_default(); + total += compare_CR3(); + total += bge_branch(); + total += disass_bne(); + total += disass_ble(); + total += disass_blt(); + total += disass_bge(); + total += disass_bgt(); + total += disass_cntlzw(); + total += disass_subf(); + total += disass_mulli(); + total += disass_mtspr(); + total += test_r0(); + total += disass_bl(); + total += disass_bctrl(); + total += disass_lbz(); + total += disass_extsw(); + + std::cout << "\t" << total << "/" << total << green << "\t\tOK" << def << std::endl; +} \ No newline at end of file