*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
*/
#include "exec.h"
#include "host-utils.h"
#include "softfloat.h"
-
-#include "op_helper.h"
+#include "helper.h"
void helper_tb_flush (void)
{
- tlb_flush(env, 1);
-}
-
-void cpu_dump_EA (target_ulong EA);
-void helper_print_mem_EA (target_ulong EA)
-{
- cpu_dump_EA(EA);
+ tb_flush(env);
}
/*****************************************************************************/
int i;
res = 0;
- for (i = 0; i < 7; i++) {
+ for (i = 0; i < 8; i++) {
opa = op1 >> (i * 8);
opb = op2 >> (i * 8);
if (opa >= opb)
/* F floating (VAX) */
static always_inline uint64_t float32_to_f (float32 fa)
{
- uint32_t a;
uint64_t r, exp, mant, sig;
+ CPU_FloatU a;
- a = *(uint32_t*)(&fa);
- sig = ((uint64_t)a & 0x80000000) << 32;
- exp = (a >> 23) & 0xff;
- mant = ((uint64_t)a & 0x007fffff) << 29;
+ a.f = fa;
+ sig = ((uint64_t)a.l & 0x80000000) << 32;
+ exp = (a.l >> 23) & 0xff;
+ mant = ((uint64_t)a.l & 0x007fffff) << 29;
if (exp == 255) {
/* NaN or infinity */
static always_inline float32 f_to_float32 (uint64_t a)
{
- uint32_t r, exp, mant_sig;
+ uint32_t exp, mant_sig;
+ CPU_FloatU r;
exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
if (exp < 3) {
/* Underflow */
- r = 0;
+ r.l = 0;
} else {
- r = ((exp - 2) << 23) | mant_sig;
+ r.l = ((exp - 2) << 23) | mant_sig;
}
- return *(float32*)(&a);
+ return r.f;
}
uint32_t helper_f_to_memory (uint64_t a)
/* G floating (VAX) */
static always_inline uint64_t float64_to_g (float64 fa)
{
- uint64_t a, r, exp, mant, sig;
+ uint64_t r, exp, mant, sig;
+ CPU_DoubleU a;
- a = *(uint64_t*)(&fa);
- sig = a & 0x8000000000000000ull;
- exp = (a >> 52) & 0x7ff;
- mant = a & 0x000fffffffffffffull;
+ a.d = fa;
+ sig = a.ll & 0x8000000000000000ull;
+ exp = (a.ll >> 52) & 0x7ff;
+ mant = a.ll & 0x000fffffffffffffull;
if (exp == 2047) {
/* NaN or infinity */
static always_inline float64 g_to_float64 (uint64_t a)
{
- uint64_t r, exp, mant_sig;
+ uint64_t exp, mant_sig;
+ CPU_DoubleU r;
exp = (a >> 52) & 0x7ff;
mant_sig = a & 0x800fffffffffffffull;
if (exp < 3) {
/* Underflow */
- r = 0;
+ r.ll = 0;
} else {
- r = ((exp - 2) << 52) | mant_sig;
+ r.ll = ((exp - 2) << 52) | mant_sig;
}
- return *(float64*)(&a);
+ return r.d;
}
uint64_t helper_g_to_memory (uint64_t a)
/* S floating (single) */
static always_inline uint64_t float32_to_s (float32 fa)
{
- uint32_t a;
+ CPU_FloatU a;
uint64_t r;
- a = *(uint32_t*)(&fa);
+ a.f = fa;
- r = (((uint64_t)(a & 0xc0000000)) << 32) | (((uint64_t)(a & 0x3fffffff)) << 29);
- if (((a & 0x7f800000) != 0x7f800000) && (!(a & 0x40000000)))
+ r = (((uint64_t)(a.l & 0xc0000000)) << 32) | (((uint64_t)(a.l & 0x3fffffff)) << 29);
+ if (((a.l & 0x7f800000) != 0x7f800000) && (!(a.l & 0x40000000)))
r |= 0x7ll << 59;
return r;
}
static always_inline float32 s_to_float32 (uint64_t a)
{
- uint32_t r = ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
- return *(float32*)(&r);
+ CPU_FloatU r;
+ r.l = ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
+ return r.f;
}
uint32_t helper_s_to_memory (uint64_t a)
static always_inline float64 t_to_float64 (uint64_t a)
{
/* Memory format is the same as float64 */
- return *(float64*)(&a);
+ CPU_DoubleU r;
+ r.ll = a;
+ return r.d;
}
static always_inline uint64_t float64_to_t (float64 fa)
{
/* Memory format is the same as float64 */
- return *(uint64*)(&fa);
+ CPU_DoubleU r;
+ r.d = fa;
+ return r.ll;
}
uint64_t helper_addt (uint64_t a, uint64_t b)
return __helper_cvtql(a, 1, 1);
}
+/* PALcode support special instructions */
#if !defined (CONFIG_USER_ONLY)
-void helper_mfpr (int iprn)
+void helper_hw_rei (void)
{
- uint64_t val;
+ env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
+ env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
+ /* XXX: re-enable interrupts and memory mapping */
+}
- if (cpu_alpha_mfpr(env, iprn, &val) == 0)
- T0 = val;
+void helper_hw_ret (uint64_t a)
+{
+ env->pc = a & ~3;
+ env->ipr[IPR_EXC_ADDR] = a & 1;
+ /* XXX: re-enable interrupts and memory mapping */
+}
+
+uint64_t helper_mfpr (int iprn, uint64_t val)
+{
+ uint64_t tmp;
+
+ if (cpu_alpha_mfpr(env, iprn, &tmp) == 0)
+ val = tmp;
+
+ return val;
+}
+
+void helper_mtpr (int iprn, uint64_t val)
+{
+ cpu_alpha_mtpr(env, iprn, val, NULL);
}
-void helper_mtpr (int iprn)
+void helper_set_alt_mode (void)
{
- cpu_alpha_mtpr(env, iprn, T0, NULL);
+ env->saved_mode = env->ps & 0xC;
+ env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC);
}
+
+void helper_restore_mode (void)
+{
+ env->ps = (env->ps & ~0xC) | env->saved_mode;
+}
+
#endif
/*****************************************************************************/
* Hopefully, we emulate the PALcode, then we should never see
* HW_LD / HW_ST instructions.
*/
-void helper_ld_phys_to_virt (void)
+uint64_t helper_ld_virt_to_phys (uint64_t virtaddr)
{
uint64_t tlb_addr, physaddr;
int index, mmu_idx;
void *retaddr;
mmu_idx = cpu_mmu_index(env);
- index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo:
tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
- if ((T0 & TARGET_PAGE_MASK) ==
+ if ((virtaddr & TARGET_PAGE_MASK) ==
(tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
+ physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
} else {
/* the page is not in the TLB : fill it */
retaddr = GETPC();
- tlb_fill(T0, 0, mmu_idx, retaddr);
+ tlb_fill(virtaddr, 0, mmu_idx, retaddr);
goto redo;
}
- T0 = physaddr;
+ return physaddr;
}
-void helper_st_phys_to_virt (void)
+uint64_t helper_st_virt_to_phys (uint64_t virtaddr)
{
uint64_t tlb_addr, physaddr;
int index, mmu_idx;
void *retaddr;
mmu_idx = cpu_mmu_index(env);
- index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo:
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
- if ((T0 & TARGET_PAGE_MASK) ==
+ if ((virtaddr & TARGET_PAGE_MASK) ==
(tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
+ physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
} else {
/* the page is not in the TLB : fill it */
retaddr = GETPC();
- tlb_fill(T0, 1, mmu_idx, retaddr);
+ tlb_fill(virtaddr, 1, mmu_idx, retaddr);
goto redo;
}
- T0 = physaddr;
+ return physaddr;
+}
+
+void helper_ldl_raw(uint64_t t0, uint64_t t1)
+{
+ ldl_raw(t1, t0);
+}
+
+void helper_ldq_raw(uint64_t t0, uint64_t t1)
+{
+ ldq_raw(t1, t0);
+}
+
+void helper_ldl_l_raw(uint64_t t0, uint64_t t1)
+{
+ env->lock = t1;
+ ldl_raw(t1, t0);
+}
+
+void helper_ldq_l_raw(uint64_t t0, uint64_t t1)
+{
+ env->lock = t1;
+ ldl_raw(t1, t0);
+}
+
+void helper_ldl_kernel(uint64_t t0, uint64_t t1)
+{
+ ldl_kernel(t1, t0);
+}
+
+void helper_ldq_kernel(uint64_t t0, uint64_t t1)
+{
+ ldq_kernel(t1, t0);
+}
+
+void helper_ldl_data(uint64_t t0, uint64_t t1)
+{
+ ldl_data(t1, t0);
+}
+
+void helper_ldq_data(uint64_t t0, uint64_t t1)
+{
+ ldq_data(t1, t0);
+}
+
+void helper_stl_raw(uint64_t t0, uint64_t t1)
+{
+ stl_raw(t1, t0);
+}
+
+void helper_stq_raw(uint64_t t0, uint64_t t1)
+{
+ stq_raw(t1, t0);
+}
+
+uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1)
+{
+ uint64_t ret;
+
+ if (t1 == env->lock) {
+ stl_raw(t1, t0);
+ ret = 0;
+ } else
+ ret = 1;
+
+ env->lock = 1;
+
+ return ret;
+}
+
+uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1)
+{
+ uint64_t ret;
+
+ if (t1 == env->lock) {
+ stq_raw(t1, t0);
+ ret = 0;
+ } else
+ ret = 1;
+
+ env->lock = 1;
+
+ return ret;
}
#define MMUSUFFIX _mmu