2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include "host-utils.h"
25 /*****************************************************************************/
26 /* Exceptions processing helpers */
28 void do_raise_exception_err (uint32_t exception, int error_code)
31 if (logfile && exception < 0x100)
32 fprintf(logfile, "%s: %d %d\n", __func__, exception, error_code);
34 env->exception_index = exception;
35 env->error_code = error_code;
40 void do_raise_exception (uint32_t exception)
42 do_raise_exception_err(exception, 0);
45 void do_interrupt_restart (void)
47 if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
48 !(env->CP0_Status & (1 << CP0St_ERL)) &&
49 !(env->hflags & MIPS_HFLAG_DM) &&
50 (env->CP0_Status & (1 << CP0St_IE)) &&
51 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask)) {
52 env->CP0_Cause &= ~(0x1f << CP0Ca_EC);
53 do_raise_exception(EXCP_EXT_INTERRUPT);
57 void do_restore_state (void *pc_ptr)
60 unsigned long pc = (unsigned long) pc_ptr;
64 cpu_restore_state (tb, env, pc, NULL);
78 #if defined(TARGET_MIPS64)
79 #if TARGET_LONG_BITS > HOST_LONG_BITS
80 /* Those might call libgcc functions. */
93 T0 = (int64_t)T0 >> T1;
98 T0 = (int64_t)T0 >> (T1 + 32);
106 void do_dsrl32 (void)
108 T0 = T0 >> (T1 + 32);
116 tmp = T0 << (0x40 - T1);
117 T0 = (T0 >> T1) | tmp;
121 void do_drotr32 (void)
125 tmp = T0 << (0x40 - (32 + T1));
126 T0 = (T0 >> (32 + T1)) | tmp;
131 T0 = T1 << (T0 & 0x3F);
136 T0 = (int64_t)T1 >> (T0 & 0x3F);
141 T0 = T1 >> (T0 & 0x3F);
144 void do_drotrv (void)
150 tmp = T1 << (0x40 - T0);
151 T0 = (T1 >> T0) | tmp;
156 #endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
168 #endif /* TARGET_MIPS64 */
170 /* 64 bits arithmetic for 32 bits hosts */
171 static always_inline uint64_t get_HILO (void)
173 return ((uint64_t)(env->HI[env->current_tc][0]) << 32) | (uint32_t)env->LO[env->current_tc][0];
176 static always_inline void set_HILO (uint64_t HILO)
178 env->LO[env->current_tc][0] = (int32_t)HILO;
179 env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
182 static always_inline void set_HIT0_LO (uint64_t HILO)
184 env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF);
185 T0 = env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
188 static always_inline void set_HI_LOT0 (uint64_t HILO)
190 T0 = env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF);
191 env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
194 #if TARGET_LONG_BITS > HOST_LONG_BITS
199 tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
200 set_HILO((int64_t)get_HILO() + tmp);
207 tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
208 set_HILO(get_HILO() + tmp);
215 tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
216 set_HILO((int64_t)get_HILO() - tmp);
223 tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
224 set_HILO(get_HILO() - tmp);
226 #endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
228 /* Multiplication variants of the vr54xx. */
231 set_HI_LOT0(0 - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
236 set_HI_LOT0(0 - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
241 set_HI_LOT0(((int64_t)get_HILO()) + ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
244 void do_macchi (void)
246 set_HIT0_LO(((int64_t)get_HILO()) + ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
251 set_HI_LOT0(((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
254 void do_macchiu (void)
256 set_HIT0_LO(((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
261 set_HI_LOT0(((int64_t)get_HILO()) - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
264 void do_msachi (void)
266 set_HIT0_LO(((int64_t)get_HILO()) - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
271 set_HI_LOT0(((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
274 void do_msachiu (void)
276 set_HIT0_LO(((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
281 set_HIT0_LO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
284 void do_mulhiu (void)
286 set_HIT0_LO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
289 void do_mulshi (void)
291 set_HIT0_LO(0 - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
294 void do_mulshiu (void)
296 set_HIT0_LO(0 - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
302 muls64(&(env->LO[env->current_tc][0]), &(env->HI[env->current_tc][0]), T0, T1);
305 void do_dmultu (void)
307 mulu64(&(env->LO[env->current_tc][0]), &(env->HI[env->current_tc][0]), T0, T1);
311 #ifdef CONFIG_USER_ONLY
312 void do_mfc0_random (void)
314 cpu_abort(env, "mfc0 random\n");
317 void do_mfc0_count (void)
319 cpu_abort(env, "mfc0 count\n");
322 void cpu_mips_store_count(CPUState *env, uint32_t value)
324 cpu_abort(env, "mtc0 count\n");
327 void cpu_mips_store_compare(CPUState *env, uint32_t value)
329 cpu_abort(env, "mtc0 compare\n");
332 void cpu_mips_start_count(CPUState *env)
334 cpu_abort(env, "start count\n");
337 void cpu_mips_stop_count(CPUState *env)
339 cpu_abort(env, "stop count\n");
342 void cpu_mips_update_irq(CPUState *env)
344 cpu_abort(env, "mtc0 status / mtc0 cause\n");
347 void do_mtc0_status_debug(uint32_t old, uint32_t val)
349 cpu_abort(env, "mtc0 status debug\n");
352 void do_mtc0_status_irqraise_debug (void)
354 cpu_abort(env, "mtc0 status irqraise debug\n");
357 void cpu_mips_tlb_flush (CPUState *env, int flush_global)
359 cpu_abort(env, "mips_tlb_flush\n");
365 void do_mfc0_mvpcontrol (void)
367 T0 = env->mvp->CP0_MVPControl;
370 void do_mfc0_mvpconf0 (void)
372 T0 = env->mvp->CP0_MVPConf0;
375 void do_mfc0_mvpconf1 (void)
377 T0 = env->mvp->CP0_MVPConf1;
380 void do_mfc0_random (void)
382 T0 = (int32_t)cpu_mips_get_random(env);
385 void do_mfc0_tcstatus (void)
387 T0 = env->CP0_TCStatus[env->current_tc];
390 void do_mftc0_tcstatus(void)
392 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
394 T0 = env->CP0_TCStatus[other_tc];
397 void do_mfc0_tcbind (void)
399 T0 = env->CP0_TCBind[env->current_tc];
402 void do_mftc0_tcbind(void)
404 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
406 T0 = env->CP0_TCBind[other_tc];
409 void do_mfc0_tcrestart (void)
411 T0 = env->PC[env->current_tc];
414 void do_mftc0_tcrestart(void)
416 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
418 T0 = env->PC[other_tc];
421 void do_mfc0_tchalt (void)
423 T0 = env->CP0_TCHalt[env->current_tc];
426 void do_mftc0_tchalt(void)
428 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
430 T0 = env->CP0_TCHalt[other_tc];
433 void do_mfc0_tccontext (void)
435 T0 = env->CP0_TCContext[env->current_tc];
438 void do_mftc0_tccontext(void)
440 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
442 T0 = env->CP0_TCContext[other_tc];
445 void do_mfc0_tcschedule (void)
447 T0 = env->CP0_TCSchedule[env->current_tc];
450 void do_mftc0_tcschedule(void)
452 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
454 T0 = env->CP0_TCSchedule[other_tc];
457 void do_mfc0_tcschefback (void)
459 T0 = env->CP0_TCScheFBack[env->current_tc];
462 void do_mftc0_tcschefback(void)
464 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
466 T0 = env->CP0_TCScheFBack[other_tc];
469 void do_mfc0_count (void)
471 T0 = (int32_t)cpu_mips_get_count(env);
474 void do_mftc0_entryhi(void)
476 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
478 T0 = (env->CP0_EntryHi & ~0xff) | (env->CP0_TCStatus[other_tc] & 0xff);
481 void do_mftc0_status(void)
483 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
484 uint32_t tcstatus = env->CP0_TCStatus[other_tc];
486 T0 = env->CP0_Status & ~0xf1000018;
487 T0 |= tcstatus & (0xf << CP0TCSt_TCU0);
488 T0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
489 T0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
492 void do_mfc0_lladdr (void)
494 T0 = (int32_t)env->CP0_LLAddr >> 4;
497 void do_mfc0_watchlo (uint32_t sel)
499 T0 = (int32_t)env->CP0_WatchLo[sel];
502 void do_mfc0_watchhi (uint32_t sel)
504 T0 = env->CP0_WatchHi[sel];
507 void do_mfc0_debug (void)
510 if (env->hflags & MIPS_HFLAG_DM)
514 void do_mftc0_debug(void)
516 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
518 /* XXX: Might be wrong, check with EJTAG spec. */
519 T0 = (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
520 (env->CP0_Debug_tcstatus[other_tc] &
521 ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
524 #if defined(TARGET_MIPS64)
525 void do_dmfc0_tcrestart (void)
527 T0 = env->PC[env->current_tc];
530 void do_dmfc0_tchalt (void)
532 T0 = env->CP0_TCHalt[env->current_tc];
535 void do_dmfc0_tccontext (void)
537 T0 = env->CP0_TCContext[env->current_tc];
540 void do_dmfc0_tcschedule (void)
542 T0 = env->CP0_TCSchedule[env->current_tc];
545 void do_dmfc0_tcschefback (void)
547 T0 = env->CP0_TCScheFBack[env->current_tc];
550 void do_dmfc0_lladdr (void)
552 T0 = env->CP0_LLAddr >> 4;
555 void do_dmfc0_watchlo (uint32_t sel)
557 T0 = env->CP0_WatchLo[sel];
559 #endif /* TARGET_MIPS64 */
561 void do_mtc0_index (void)
564 unsigned int tmp = env->tlb->nb_tlb;
570 env->CP0_Index = (env->CP0_Index & 0x80000000) | (T0 & (num - 1));
573 void do_mtc0_mvpcontrol (void)
578 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
579 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
581 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
582 mask |= (1 << CP0MVPCo_STLB);
583 newval = (env->mvp->CP0_MVPControl & ~mask) | (T0 & mask);
585 // TODO: Enable/disable shared TLB, enable/disable VPEs.
587 env->mvp->CP0_MVPControl = newval;
590 void do_mtc0_vpecontrol (void)
595 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
596 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
597 newval = (env->CP0_VPEControl & ~mask) | (T0 & mask);
599 /* Yield scheduler intercept not implemented. */
600 /* Gating storage scheduler intercept not implemented. */
602 // TODO: Enable/disable TCs.
604 env->CP0_VPEControl = newval;
607 void do_mtc0_vpeconf0 (void)
612 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
613 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
614 mask |= (0xff << CP0VPEC0_XTC);
615 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
617 newval = (env->CP0_VPEConf0 & ~mask) | (T0 & mask);
619 // TODO: TC exclusive handling due to ERL/EXL.
621 env->CP0_VPEConf0 = newval;
624 void do_mtc0_vpeconf1 (void)
629 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
630 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
631 (0xff << CP0VPEC1_NCP1);
632 newval = (env->CP0_VPEConf1 & ~mask) | (T0 & mask);
634 /* UDI not implemented. */
635 /* CP2 not implemented. */
637 // TODO: Handle FPU (CP1) binding.
639 env->CP0_VPEConf1 = newval;
642 void do_mtc0_yqmask (void)
644 /* Yield qualifier inputs not implemented. */
645 env->CP0_YQMask = 0x00000000;
648 void do_mtc0_vpeopt (void)
650 env->CP0_VPEOpt = T0 & 0x0000ffff;
653 void do_mtc0_entrylo0 (void)
655 /* Large physaddr (PABITS) not implemented */
656 /* 1k pages not implemented */
657 env->CP0_EntryLo0 = T0 & 0x3FFFFFFF;
660 void do_mtc0_tcstatus (void)
662 uint32_t mask = env->CP0_TCStatus_rw_bitmask;
665 newval = (env->CP0_TCStatus[env->current_tc] & ~mask) | (T0 & mask);
667 // TODO: Sync with CP0_Status.
669 env->CP0_TCStatus[env->current_tc] = newval;
672 void do_mttc0_tcstatus (void)
674 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
676 // TODO: Sync with CP0_Status.
678 env->CP0_TCStatus[other_tc] = T0;
681 void do_mtc0_tcbind (void)
683 uint32_t mask = (1 << CP0TCBd_TBE);
686 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
687 mask |= (1 << CP0TCBd_CurVPE);
688 newval = (env->CP0_TCBind[env->current_tc] & ~mask) | (T0 & mask);
689 env->CP0_TCBind[env->current_tc] = newval;
692 void do_mttc0_tcbind (void)
694 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
695 uint32_t mask = (1 << CP0TCBd_TBE);
698 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
699 mask |= (1 << CP0TCBd_CurVPE);
700 newval = (env->CP0_TCBind[other_tc] & ~mask) | (T0 & mask);
701 env->CP0_TCBind[other_tc] = newval;
704 void do_mtc0_tcrestart (void)
706 env->PC[env->current_tc] = T0;
707 env->CP0_TCStatus[env->current_tc] &= ~(1 << CP0TCSt_TDS);
708 env->CP0_LLAddr = 0ULL;
709 /* MIPS16 not implemented. */
712 void do_mttc0_tcrestart (void)
714 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
716 env->PC[other_tc] = T0;
717 env->CP0_TCStatus[other_tc] &= ~(1 << CP0TCSt_TDS);
718 env->CP0_LLAddr = 0ULL;
719 /* MIPS16 not implemented. */
722 void do_mtc0_tchalt (void)
724 env->CP0_TCHalt[env->current_tc] = T0 & 0x1;
726 // TODO: Halt TC / Restart (if allocated+active) TC.
729 void do_mttc0_tchalt (void)
731 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
733 // TODO: Halt TC / Restart (if allocated+active) TC.
735 env->CP0_TCHalt[other_tc] = T0;
738 void do_mtc0_tccontext (void)
740 env->CP0_TCContext[env->current_tc] = T0;
743 void do_mttc0_tccontext (void)
745 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
747 env->CP0_TCContext[other_tc] = T0;
750 void do_mtc0_tcschedule (void)
752 env->CP0_TCSchedule[env->current_tc] = T0;
755 void do_mttc0_tcschedule (void)
757 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
759 env->CP0_TCSchedule[other_tc] = T0;
762 void do_mtc0_tcschefback (void)
764 env->CP0_TCScheFBack[env->current_tc] = T0;
767 void do_mttc0_tcschefback (void)
769 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
771 env->CP0_TCScheFBack[other_tc] = T0;
774 void do_mtc0_entrylo1 (void)
776 /* Large physaddr (PABITS) not implemented */
777 /* 1k pages not implemented */
778 env->CP0_EntryLo1 = T0 & 0x3FFFFFFF;
781 void do_mtc0_context (void)
783 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (T0 & ~0x007FFFFF);
786 void do_mtc0_pagemask (void)
788 /* 1k pages not implemented */
789 env->CP0_PageMask = T0 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
792 void do_mtc0_pagegrain (void)
794 /* SmartMIPS not implemented */
795 /* Large physaddr (PABITS) not implemented */
796 /* 1k pages not implemented */
797 env->CP0_PageGrain = 0;
800 void do_mtc0_wired (void)
802 env->CP0_Wired = T0 % env->tlb->nb_tlb;
805 void do_mtc0_srsconf0 (void)
807 env->CP0_SRSConf0 |= T0 & env->CP0_SRSConf0_rw_bitmask;
810 void do_mtc0_srsconf1 (void)
812 env->CP0_SRSConf1 |= T0 & env->CP0_SRSConf1_rw_bitmask;
815 void do_mtc0_srsconf2 (void)
817 env->CP0_SRSConf2 |= T0 & env->CP0_SRSConf2_rw_bitmask;
820 void do_mtc0_srsconf3 (void)
822 env->CP0_SRSConf3 |= T0 & env->CP0_SRSConf3_rw_bitmask;
825 void do_mtc0_srsconf4 (void)
827 env->CP0_SRSConf4 |= T0 & env->CP0_SRSConf4_rw_bitmask;
830 void do_mtc0_hwrena (void)
832 env->CP0_HWREna = T0 & 0x0000000F;
835 void do_mtc0_count (void)
837 cpu_mips_store_count(env, T0);
840 void do_mtc0_entryhi (void)
842 target_ulong old, val;
844 /* 1k pages not implemented */
845 val = T0 & ((TARGET_PAGE_MASK << 1) | 0xFF);
846 #if defined(TARGET_MIPS64)
849 old = env->CP0_EntryHi;
850 env->CP0_EntryHi = val;
851 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
852 uint32_t tcst = env->CP0_TCStatus[env->current_tc] & ~0xff;
853 env->CP0_TCStatus[env->current_tc] = tcst | (val & 0xff);
855 /* If the ASID changes, flush qemu's TLB. */
856 if ((old & 0xFF) != (val & 0xFF))
857 cpu_mips_tlb_flush(env, 1);
860 void do_mttc0_entryhi(void)
862 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
864 env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (T0 & ~0xff);
865 env->CP0_TCStatus[other_tc] = (env->CP0_TCStatus[other_tc] & ~0xff) | (T0 & 0xff);
868 void do_mtc0_compare (void)
870 cpu_mips_store_compare(env, T0);
873 void do_mtc0_status (void)
876 uint32_t mask = env->CP0_Status_rw_bitmask;
879 old = env->CP0_Status;
880 env->CP0_Status = (env->CP0_Status & ~mask) | val;
882 if (loglevel & CPU_LOG_EXEC)
883 do_mtc0_status_debug(old, val);
884 cpu_mips_update_irq(env);
887 void do_mttc0_status(void)
889 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
890 uint32_t tcstatus = env->CP0_TCStatus[other_tc];
892 env->CP0_Status = T0 & ~0xf1000018;
893 tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (T0 & (0xf << CP0St_CU0));
894 tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((T0 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX));
895 tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((T0 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU));
896 env->CP0_TCStatus[other_tc] = tcstatus;
899 void do_mtc0_intctl (void)
901 /* vectored interrupts not implemented, no performance counters. */
902 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (T0 & 0x000002e0);
905 void do_mtc0_srsctl (void)
907 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
908 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (T0 & mask);
911 void do_mtc0_cause (void)
913 uint32_t mask = 0x00C00300;
914 uint32_t old = env->CP0_Cause;
916 if (env->insn_flags & ISA_MIPS32R2)
917 mask |= 1 << CP0Ca_DC;
919 env->CP0_Cause = (env->CP0_Cause & ~mask) | (T0 & mask);
921 if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
922 if (env->CP0_Cause & (1 << CP0Ca_DC))
923 cpu_mips_stop_count(env);
925 cpu_mips_start_count(env);
928 /* Handle the software interrupt as an hardware one, as they
930 if (T0 & CP0Ca_IP_mask) {
931 cpu_mips_update_irq(env);
935 void do_mtc0_ebase (void)
937 /* vectored interrupts not implemented */
938 /* Multi-CPU not implemented */
939 env->CP0_EBase = 0x80000000 | (T0 & 0x3FFFF000);
942 void do_mtc0_config0 (void)
944 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (T0 & 0x00000007);
947 void do_mtc0_config2 (void)
949 /* tertiary/secondary caches not implemented */
950 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
953 void do_mtc0_watchlo (uint32_t sel)
955 /* Watch exceptions for instructions, data loads, data stores
957 env->CP0_WatchLo[sel] = (T0 & ~0x7);
960 void do_mtc0_watchhi (uint32_t sel)
962 env->CP0_WatchHi[sel] = (T0 & 0x40FF0FF8);
963 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & T0 & 0x7);
966 void do_mtc0_xcontext (void)
968 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
969 env->CP0_XContext = (env->CP0_XContext & mask) | (T0 & ~mask);
972 void do_mtc0_framemask (void)
974 env->CP0_Framemask = T0; /* XXX */
977 void do_mtc0_debug (void)
979 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (T0 & 0x13300120);
980 if (T0 & (1 << CP0DB_DM))
981 env->hflags |= MIPS_HFLAG_DM;
983 env->hflags &= ~MIPS_HFLAG_DM;
986 void do_mttc0_debug(void)
988 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
990 /* XXX: Might be wrong, check with EJTAG spec. */
991 env->CP0_Debug_tcstatus[other_tc] = T0 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
992 env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
993 (T0 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
996 void do_mtc0_performance0 (void)
998 env->CP0_Performance0 = T0 & 0x000007ff;
1001 void do_mtc0_taglo (void)
1003 env->CP0_TagLo = T0 & 0xFFFFFCF6;
1006 void do_mtc0_datalo (void)
1008 env->CP0_DataLo = T0; /* XXX */
1011 void do_mtc0_taghi (void)
1013 env->CP0_TagHi = T0; /* XXX */
1016 void do_mtc0_datahi (void)
1018 env->CP0_DataHi = T0; /* XXX */
1021 void do_mtc0_status_debug(uint32_t old, uint32_t val)
1023 fprintf(logfile, "Status %08x (%08x) => %08x (%08x) Cause %08x",
1024 old, old & env->CP0_Cause & CP0Ca_IP_mask,
1025 val, val & env->CP0_Cause & CP0Ca_IP_mask,
1027 switch (env->hflags & MIPS_HFLAG_KSU) {
1028 case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
1029 case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
1030 case MIPS_HFLAG_KM: fputs("\n", logfile); break;
1031 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1035 void do_mtc0_status_irqraise_debug(void)
1037 fprintf(logfile, "Raise pending IRQs\n");
1039 #endif /* !CONFIG_USER_ONLY */
1041 /* MIPS MT functions */
1042 void do_mftgpr(uint32_t sel)
1044 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1046 T0 = env->gpr[other_tc][sel];
1049 void do_mftlo(uint32_t sel)
1051 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1053 T0 = env->LO[other_tc][sel];
1056 void do_mfthi(uint32_t sel)
1058 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1060 T0 = env->HI[other_tc][sel];
1063 void do_mftacx(uint32_t sel)
1065 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1067 T0 = env->ACX[other_tc][sel];
1070 void do_mftdsp(void)
1072 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1074 T0 = env->DSPControl[other_tc];
1077 void do_mttgpr(uint32_t sel)
1079 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1081 T0 = env->gpr[other_tc][sel];
1084 void do_mttlo(uint32_t sel)
1086 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1088 T0 = env->LO[other_tc][sel];
1091 void do_mtthi(uint32_t sel)
1093 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1095 T0 = env->HI[other_tc][sel];
1098 void do_mttacx(uint32_t sel)
1100 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1102 T0 = env->ACX[other_tc][sel];
1105 void do_mttdsp(void)
1107 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1109 T0 = env->DSPControl[other_tc];
1112 /* MIPS MT functions */
1145 // TODO: store to TC register
1151 /* No scheduling policy implemented. */
1153 if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1154 env->CP0_TCStatus[env->current_tc] & (1 << CP0TCSt_DT)) {
1155 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1156 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1157 do_raise_exception(EXCP_THREAD);
1160 } else if (T0 == 0) {
1161 if (0 /* TODO: TC underflow */) {
1162 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1163 do_raise_exception(EXCP_THREAD);
1165 // TODO: Deallocate TC
1167 } else if (T0 > 0) {
1168 /* Yield qualifier inputs not implemented. */
1169 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1170 env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1171 do_raise_exception(EXCP_THREAD);
1173 T0 = env->CP0_YQMask;
1177 void fpu_handle_exception(void)
1179 #ifdef CONFIG_SOFTFLOAT
1180 int flags = get_float_exception_flags(&env->fpu->fp_status);
1181 unsigned int cpuflags = 0, enable, cause = 0;
1183 enable = GET_FP_ENABLE(env->fpu->fcr31);
1185 /* determine current flags */
1186 if (flags & float_flag_invalid) {
1187 cpuflags |= FP_INVALID;
1188 cause |= FP_INVALID & enable;
1190 if (flags & float_flag_divbyzero) {
1191 cpuflags |= FP_DIV0;
1192 cause |= FP_DIV0 & enable;
1194 if (flags & float_flag_overflow) {
1195 cpuflags |= FP_OVERFLOW;
1196 cause |= FP_OVERFLOW & enable;
1198 if (flags & float_flag_underflow) {
1199 cpuflags |= FP_UNDERFLOW;
1200 cause |= FP_UNDERFLOW & enable;
1202 if (flags & float_flag_inexact) {
1203 cpuflags |= FP_INEXACT;
1204 cause |= FP_INEXACT & enable;
1206 SET_FP_FLAGS(env->fpu->fcr31, cpuflags);
1207 SET_FP_CAUSE(env->fpu->fcr31, cause);
1209 SET_FP_FLAGS(env->fpu->fcr31, 0);
1210 SET_FP_CAUSE(env->fpu->fcr31, 0);
1214 #ifndef CONFIG_USER_ONLY
1215 /* TLB management */
1216 void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1218 /* Flush qemu's TLB and discard all shadowed entries. */
1219 tlb_flush (env, flush_global);
1220 env->tlb->tlb_in_use = env->tlb->nb_tlb;
1223 static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1225 /* Discard entries from env->tlb[first] onwards. */
1226 while (env->tlb->tlb_in_use > first) {
1227 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1231 static void r4k_fill_tlb (int idx)
1235 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1236 tlb = &env->tlb->mmu.r4k.tlb[idx];
1237 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1238 #if defined(TARGET_MIPS64)
1239 tlb->VPN &= env->SEGMask;
1241 tlb->ASID = env->CP0_EntryHi & 0xFF;
1242 tlb->PageMask = env->CP0_PageMask;
1243 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1244 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1245 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1246 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1247 tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1248 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1249 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1250 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1251 tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1254 void r4k_do_tlbwi (void)
1256 /* Discard cached TLB entries. We could avoid doing this if the
1257 tlbwi is just upgrading access permissions on the current entry;
1258 that might be a further win. */
1259 r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
1261 r4k_invalidate_tlb(env, env->CP0_Index % env->tlb->nb_tlb, 0);
1262 r4k_fill_tlb(env->CP0_Index % env->tlb->nb_tlb);
1265 void r4k_do_tlbwr (void)
1267 int r = cpu_mips_get_random(env);
1269 r4k_invalidate_tlb(env, r, 1);
1273 void r4k_do_tlbp (void)
1282 ASID = env->CP0_EntryHi & 0xFF;
1283 for (i = 0; i < env->tlb->nb_tlb; i++) {
1284 tlb = &env->tlb->mmu.r4k.tlb[i];
1285 /* 1k pages are not supported. */
1286 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1287 tag = env->CP0_EntryHi & ~mask;
1288 VPN = tlb->VPN & ~mask;
1289 /* Check ASID, virtual page number & size */
1290 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1296 if (i == env->tlb->nb_tlb) {
1297 /* No match. Discard any shadow entries, if any of them match. */
1298 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
1299 tlb = &env->tlb->mmu.r4k.tlb[i];
1300 /* 1k pages are not supported. */
1301 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1302 tag = env->CP0_EntryHi & ~mask;
1303 VPN = tlb->VPN & ~mask;
1304 /* Check ASID, virtual page number & size */
1305 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1306 r4k_mips_tlb_flush_extra (env, i);
1311 env->CP0_Index |= 0x80000000;
1315 void r4k_do_tlbr (void)
1320 ASID = env->CP0_EntryHi & 0xFF;
1321 tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
1323 /* If this will change the current ASID, flush qemu's TLB. */
1324 if (ASID != tlb->ASID)
1325 cpu_mips_tlb_flush (env, 1);
1327 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
1329 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1330 env->CP0_PageMask = tlb->PageMask;
1331 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1332 (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1333 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1334 (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
1337 #endif /* !CONFIG_USER_ONLY */
1339 void dump_ldst (const unsigned char *func)
1342 fprintf(logfile, "%s => " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, T0, T1);
1348 fprintf(logfile, "%s " TARGET_FMT_lx " at " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", __func__,
1349 T1, T0, env->CP0_LLAddr);
1356 T0 = env->CP0_Status;
1357 env->CP0_Status = T0 & ~(1 << CP0St_IE);
1358 cpu_mips_update_irq(env);
1363 T0 = env->CP0_Status;
1364 env->CP0_Status = T0 | (1 << CP0St_IE);
1365 cpu_mips_update_irq(env);
1368 void debug_pre_eret (void)
1370 fprintf(logfile, "ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1371 env->PC[env->current_tc], env->CP0_EPC);
1372 if (env->CP0_Status & (1 << CP0St_ERL))
1373 fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1374 if (env->hflags & MIPS_HFLAG_DM)
1375 fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1376 fputs("\n", logfile);
1379 void debug_post_eret (void)
1381 fprintf(logfile, " => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
1382 env->PC[env->current_tc], env->CP0_EPC);
1383 if (env->CP0_Status & (1 << CP0St_ERL))
1384 fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
1385 if (env->hflags & MIPS_HFLAG_DM)
1386 fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
1387 switch (env->hflags & MIPS_HFLAG_KSU) {
1388 case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
1389 case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
1390 case MIPS_HFLAG_KM: fputs("\n", logfile); break;
1391 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1397 if (loglevel & CPU_LOG_EXEC)
1399 if (env->CP0_Status & (1 << CP0St_ERL)) {
1400 env->PC[env->current_tc] = env->CP0_ErrorEPC;
1401 env->CP0_Status &= ~(1 << CP0St_ERL);
1403 env->PC[env->current_tc] = env->CP0_EPC;
1404 env->CP0_Status &= ~(1 << CP0St_EXL);
1406 compute_hflags(env);
1407 if (loglevel & CPU_LOG_EXEC)
1409 env->CP0_LLAddr = 1;
1412 void do_deret (void)
1414 if (loglevel & CPU_LOG_EXEC)
1416 env->PC[env->current_tc] = env->CP0_DEPC;
1417 env->hflags &= MIPS_HFLAG_DM;
1418 compute_hflags(env);
1419 if (loglevel & CPU_LOG_EXEC)
1421 env->CP0_LLAddr = 1;
1424 void do_rdhwr_cpunum(void)
1426 if ((env->hflags & MIPS_HFLAG_CP0) ||
1427 (env->CP0_HWREna & (1 << 0)))
1428 T0 = env->CP0_EBase & 0x3ff;
1430 do_raise_exception(EXCP_RI);
1433 void do_rdhwr_synci_step(void)
1435 if ((env->hflags & MIPS_HFLAG_CP0) ||
1436 (env->CP0_HWREna & (1 << 1)))
1437 T0 = env->SYNCI_Step;
1439 do_raise_exception(EXCP_RI);
1442 void do_rdhwr_cc(void)
1444 if ((env->hflags & MIPS_HFLAG_CP0) ||
1445 (env->CP0_HWREna & (1 << 2)))
1446 T0 = env->CP0_Count;
1448 do_raise_exception(EXCP_RI);
1451 void do_rdhwr_ccres(void)
1453 if ((env->hflags & MIPS_HFLAG_CP0) ||
1454 (env->CP0_HWREna & (1 << 3)))
1457 do_raise_exception(EXCP_RI);
1460 /* Bitfield operations. */
1461 void do_ext(uint32_t pos, uint32_t size)
1463 T0 = (int32_t)((T1 >> pos) & ((size < 32) ? ((1 << size) - 1) : ~0));
1466 void do_ins(uint32_t pos, uint32_t size)
1468 target_ulong mask = ((size < 32) ? ((1 << size) - 1) : ~0) << pos;
1470 T0 = (int32_t)((T0 & ~mask) | ((T1 << pos) & mask));
1475 T0 = (int32_t)(((T1 << 8) & ~0x00FF00FF) | ((T1 >> 8) & 0x00FF00FF));
1478 #if defined(TARGET_MIPS64)
1479 void do_dext(uint32_t pos, uint32_t size)
1481 T0 = (T1 >> pos) & ((size < 64) ? ((1ULL << size) - 1) : ~0ULL);
1484 void do_dins(uint32_t pos, uint32_t size)
1486 target_ulong mask = ((size < 64) ? ((1ULL << size) - 1) : ~0ULL) << pos;
1488 T0 = (T0 & ~mask) | ((T1 << pos) & mask);
1493 T0 = ((T1 << 8) & ~0x00FF00FF00FF00FFULL) | ((T1 >> 8) & 0x00FF00FF00FF00FFULL);
1498 T1 = ((T1 << 16) & ~0x0000FFFF0000FFFFULL) | ((T1 >> 16) & 0x0000FFFF0000FFFFULL);
1499 T0 = (T1 << 32) | (T1 >> 32);
1503 void do_pmon (int function)
1507 case 2: /* TODO: char inbyte(int waitflag); */
1508 if (env->gpr[env->current_tc][4] == 0)
1509 env->gpr[env->current_tc][2] = -1;
1511 case 11: /* TODO: char inbyte (void); */
1512 env->gpr[env->current_tc][2] = -1;
1516 printf("%c", (char)(env->gpr[env->current_tc][4] & 0xFF));
1522 unsigned char *fmt = (void *)(unsigned long)env->gpr[env->current_tc][4];
1532 do_raise_exception(EXCP_HLT);
1535 #if !defined(CONFIG_USER_ONLY)
1537 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
1539 #define MMUSUFFIX _mmu
1540 #define ALIGNED_ONLY
1543 #include "softmmu_template.h"
1546 #include "softmmu_template.h"
1549 #include "softmmu_template.h"
1552 #include "softmmu_template.h"
1554 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
1556 env->CP0_BadVAddr = addr;
1557 do_restore_state (retaddr);
1558 do_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
1561 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1563 TranslationBlock *tb;
1564 CPUState *saved_env;
1568 /* XXX: hack to restore env in all cases, even if not called from
1571 env = cpu_single_env;
1572 ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1575 /* now we have a real cpu fault */
1576 pc = (unsigned long)retaddr;
1577 tb = tb_find_pc(pc);
1579 /* the PC is inside the translated code. It means that we have
1580 a virtual CPU fault */
1581 cpu_restore_state(tb, env, pc, NULL);
1584 do_raise_exception_err(env->exception_index, env->error_code);
1589 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
1593 do_raise_exception(EXCP_IBE);
1595 do_raise_exception(EXCP_DBE);
1597 #endif /* !CONFIG_USER_ONLY */
1599 /* Complex FPU operations which may need stack space. */
1601 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
1602 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
1603 #define FLOAT_TWO32 make_float32(1 << 30)
1604 #define FLOAT_TWO64 make_float64(1ULL << 62)
1605 #define FLOAT_QNAN32 0x7fbfffff
1606 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
1607 #define FLOAT_SNAN32 0x7fffffff
1608 #define FLOAT_SNAN64 0x7fffffffffffffffULL
1610 /* convert MIPS rounding mode in FCR31 to IEEE library */
1611 unsigned int ieee_rm[] = {
1612 float_round_nearest_even,
1613 float_round_to_zero,
1618 #define RESTORE_ROUNDING_MODE \
1619 set_float_rounding_mode(ieee_rm[env->fpu->fcr31 & 3], &env->fpu->fp_status)
1621 void do_cfc1 (uint32_t reg)
1625 T0 = (int32_t)env->fpu->fcr0;
1628 T0 = ((env->fpu->fcr31 >> 24) & 0xfe) | ((env->fpu->fcr31 >> 23) & 0x1);
1631 T0 = env->fpu->fcr31 & 0x0003f07c;
1634 T0 = (env->fpu->fcr31 & 0x00000f83) | ((env->fpu->fcr31 >> 22) & 0x4);
1637 T0 = (int32_t)env->fpu->fcr31;
1642 void do_ctc1 (uint32_t reg)
1646 if (T0 & 0xffffff00)
1648 env->fpu->fcr31 = (env->fpu->fcr31 & 0x017fffff) | ((T0 & 0xfe) << 24) |
1652 if (T0 & 0x007c0000)
1654 env->fpu->fcr31 = (env->fpu->fcr31 & 0xfffc0f83) | (T0 & 0x0003f07c);
1657 if (T0 & 0x007c0000)
1659 env->fpu->fcr31 = (env->fpu->fcr31 & 0xfefff07c) | (T0 & 0x00000f83) |
1663 if (T0 & 0x007c0000)
1665 env->fpu->fcr31 = T0;
1670 /* set rounding mode */
1671 RESTORE_ROUNDING_MODE;
1672 set_float_exception_flags(0, &env->fpu->fp_status);
1673 if ((GET_FP_ENABLE(env->fpu->fcr31) | 0x20) & GET_FP_CAUSE(env->fpu->fcr31))
1674 do_raise_exception(EXCP_FPE);
1677 static always_inline char ieee_ex_to_mips(char xcpt)
1679 return (xcpt & float_flag_inexact) >> 5 |
1680 (xcpt & float_flag_underflow) >> 3 |
1681 (xcpt & float_flag_overflow) >> 1 |
1682 (xcpt & float_flag_divbyzero) << 1 |
1683 (xcpt & float_flag_invalid) << 4;
1686 static always_inline char mips_ex_to_ieee(char xcpt)
1688 return (xcpt & FP_INEXACT) << 5 |
1689 (xcpt & FP_UNDERFLOW) << 3 |
1690 (xcpt & FP_OVERFLOW) << 1 |
1691 (xcpt & FP_DIV0) >> 1 |
1692 (xcpt & FP_INVALID) >> 4;
1695 static always_inline void update_fcr31(void)
1697 int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->fpu->fp_status));
1699 SET_FP_CAUSE(env->fpu->fcr31, tmp);
1700 if (GET_FP_ENABLE(env->fpu->fcr31) & tmp)
1701 do_raise_exception(EXCP_FPE);
1703 UPDATE_FP_FLAGS(env->fpu->fcr31, tmp);
1707 Single precition routines have a "s" suffix, double precision a
1708 "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
1709 paired single lower "pl", paired single upper "pu". */
1711 #define FLOAT_OP(name, p) void do_float_##name##_##p(void)
1713 /* unary operations, modifying fp status */
1714 #define FLOAT_UNOP(name) \
1717 FDT2 = float64_ ## name(FDT0, &env->fpu->fp_status); \
1721 FST2 = float32_ ## name(FST0, &env->fpu->fp_status); \
1728 set_float_exception_flags(0, &env->fpu->fp_status);
1729 FDT2 = float32_to_float64(FST0, &env->fpu->fp_status);
1734 set_float_exception_flags(0, &env->fpu->fp_status);
1735 FDT2 = int32_to_float64(WT0, &env->fpu->fp_status);
1740 set_float_exception_flags(0, &env->fpu->fp_status);
1741 FDT2 = int64_to_float64(DT0, &env->fpu->fp_status);
1746 set_float_exception_flags(0, &env->fpu->fp_status);
1747 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
1749 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1754 set_float_exception_flags(0, &env->fpu->fp_status);
1755 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
1757 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1763 set_float_exception_flags(0, &env->fpu->fp_status);
1764 FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
1765 FSTH2 = int32_to_float32(WTH0, &env->fpu->fp_status);
1770 set_float_exception_flags(0, &env->fpu->fp_status);
1771 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
1772 WTH2 = float32_to_int32(FSTH0, &env->fpu->fp_status);
1774 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1779 set_float_exception_flags(0, &env->fpu->fp_status);
1780 FST2 = float64_to_float32(FDT0, &env->fpu->fp_status);
1785 set_float_exception_flags(0, &env->fpu->fp_status);
1786 FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
1791 set_float_exception_flags(0, &env->fpu->fp_status);
1792 FST2 = int64_to_float32(DT0, &env->fpu->fp_status);
1797 set_float_exception_flags(0, &env->fpu->fp_status);
1803 set_float_exception_flags(0, &env->fpu->fp_status);
1809 set_float_exception_flags(0, &env->fpu->fp_status);
1810 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
1812 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1817 set_float_exception_flags(0, &env->fpu->fp_status);
1818 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
1820 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1826 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
1827 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
1828 RESTORE_ROUNDING_MODE;
1830 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1835 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
1836 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
1837 RESTORE_ROUNDING_MODE;
1839 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1844 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
1845 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
1846 RESTORE_ROUNDING_MODE;
1848 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1853 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
1854 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
1855 RESTORE_ROUNDING_MODE;
1857 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1863 DT2 = float64_to_int64_round_to_zero(FDT0, &env->fpu->fp_status);
1865 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1870 DT2 = float32_to_int64_round_to_zero(FST0, &env->fpu->fp_status);
1872 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1877 WT2 = float64_to_int32_round_to_zero(FDT0, &env->fpu->fp_status);
1879 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1884 WT2 = float32_to_int32_round_to_zero(FST0, &env->fpu->fp_status);
1886 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1892 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
1893 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
1894 RESTORE_ROUNDING_MODE;
1896 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1901 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
1902 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
1903 RESTORE_ROUNDING_MODE;
1905 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1910 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
1911 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
1912 RESTORE_ROUNDING_MODE;
1914 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1919 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
1920 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
1921 RESTORE_ROUNDING_MODE;
1923 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1929 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1930 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
1931 RESTORE_ROUNDING_MODE;
1933 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1938 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1939 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
1940 RESTORE_ROUNDING_MODE;
1942 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1947 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1948 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
1949 RESTORE_ROUNDING_MODE;
1951 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1956 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1957 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
1958 RESTORE_ROUNDING_MODE;
1960 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1964 /* unary operations, not modifying fp status */
1965 #define FLOAT_UNOP(name) \
1968 FDT2 = float64_ ## name(FDT0); \
1972 FST2 = float32_ ## name(FST0); \
1974 FLOAT_OP(name, ps) \
1976 FST2 = float32_ ## name(FST0); \
1977 FSTH2 = float32_ ## name(FSTH0); \
1983 /* MIPS specific unary operations */
1986 set_float_exception_flags(0, &env->fpu->fp_status);
1987 FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
1992 set_float_exception_flags(0, &env->fpu->fp_status);
1993 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
1999 set_float_exception_flags(0, &env->fpu->fp_status);
2000 FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
2001 FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
2006 set_float_exception_flags(0, &env->fpu->fp_status);
2007 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
2008 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
2014 set_float_exception_flags(0, &env->fpu->fp_status);
2015 FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
2020 set_float_exception_flags(0, &env->fpu->fp_status);
2021 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
2024 FLOAT_OP(recip1, ps)
2026 set_float_exception_flags(0, &env->fpu->fp_status);
2027 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
2028 FSTH2 = float32_div(FLOAT_ONE32, FSTH0, &env->fpu->fp_status);
2034 set_float_exception_flags(0, &env->fpu->fp_status);
2035 FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
2036 FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
2041 set_float_exception_flags(0, &env->fpu->fp_status);
2042 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
2043 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
2046 FLOAT_OP(rsqrt1, ps)
2048 set_float_exception_flags(0, &env->fpu->fp_status);
2049 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
2050 FSTH2 = float32_sqrt(FSTH0, &env->fpu->fp_status);
2051 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
2052 FSTH2 = float32_div(FLOAT_ONE32, FSTH2, &env->fpu->fp_status);
2056 /* binary operations */
2057 #define FLOAT_BINOP(name) \
2060 set_float_exception_flags(0, &env->fpu->fp_status); \
2061 FDT2 = float64_ ## name (FDT0, FDT1, &env->fpu->fp_status); \
2063 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
2064 DT2 = FLOAT_QNAN64; \
2068 set_float_exception_flags(0, &env->fpu->fp_status); \
2069 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
2071 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
2072 WT2 = FLOAT_QNAN32; \
2074 FLOAT_OP(name, ps) \
2076 set_float_exception_flags(0, &env->fpu->fp_status); \
2077 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
2078 FSTH2 = float32_ ## name (FSTH0, FSTH1, &env->fpu->fp_status); \
2080 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) { \
2081 WT2 = FLOAT_QNAN32; \
2082 WTH2 = FLOAT_QNAN32; \
2091 /* ternary operations */
2092 #define FLOAT_TERNOP(name1, name2) \
2093 FLOAT_OP(name1 ## name2, d) \
2095 FDT0 = float64_ ## name1 (FDT0, FDT1, &env->fpu->fp_status); \
2096 FDT2 = float64_ ## name2 (FDT0, FDT2, &env->fpu->fp_status); \
2098 FLOAT_OP(name1 ## name2, s) \
2100 FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status); \
2101 FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status); \
2103 FLOAT_OP(name1 ## name2, ps) \
2105 FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status); \
2106 FSTH0 = float32_ ## name1 (FSTH0, FSTH1, &env->fpu->fp_status); \
2107 FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status); \
2108 FSTH2 = float32_ ## name2 (FSTH0, FSTH2, &env->fpu->fp_status); \
2110 FLOAT_TERNOP(mul, add)
2111 FLOAT_TERNOP(mul, sub)
2114 /* negated ternary operations */
2115 #define FLOAT_NTERNOP(name1, name2) \
2116 FLOAT_OP(n ## name1 ## name2, d) \
2118 FDT0 = float64_ ## name1 (FDT0, FDT1, &env->fpu->fp_status); \
2119 FDT2 = float64_ ## name2 (FDT0, FDT2, &env->fpu->fp_status); \
2120 FDT2 = float64_chs(FDT2); \
2122 FLOAT_OP(n ## name1 ## name2, s) \
2124 FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status); \
2125 FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status); \
2126 FST2 = float32_chs(FST2); \
2128 FLOAT_OP(n ## name1 ## name2, ps) \
2130 FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status); \
2131 FSTH0 = float32_ ## name1 (FSTH0, FSTH1, &env->fpu->fp_status); \
2132 FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status); \
2133 FSTH2 = float32_ ## name2 (FSTH0, FSTH2, &env->fpu->fp_status); \
2134 FST2 = float32_chs(FST2); \
2135 FSTH2 = float32_chs(FSTH2); \
2137 FLOAT_NTERNOP(mul, add)
2138 FLOAT_NTERNOP(mul, sub)
2139 #undef FLOAT_NTERNOP
2141 /* MIPS specific binary operations */
2144 set_float_exception_flags(0, &env->fpu->fp_status);
2145 FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
2146 FDT2 = float64_chs(float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status));
2151 set_float_exception_flags(0, &env->fpu->fp_status);
2152 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
2153 FST2 = float32_chs(float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status));
2156 FLOAT_OP(recip2, ps)
2158 set_float_exception_flags(0, &env->fpu->fp_status);
2159 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
2160 FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
2161 FST2 = float32_chs(float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status));
2162 FSTH2 = float32_chs(float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status));
2168 set_float_exception_flags(0, &env->fpu->fp_status);
2169 FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
2170 FDT2 = float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status);
2171 FDT2 = float64_chs(float64_div(FDT2, FLOAT_TWO64, &env->fpu->fp_status));
2176 set_float_exception_flags(0, &env->fpu->fp_status);
2177 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
2178 FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
2179 FST2 = float32_chs(float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status));
2182 FLOAT_OP(rsqrt2, ps)
2184 set_float_exception_flags(0, &env->fpu->fp_status);
2185 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
2186 FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
2187 FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
2188 FSTH2 = float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status);
2189 FST2 = float32_chs(float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status));
2190 FSTH2 = float32_chs(float32_div(FSTH2, FLOAT_TWO32, &env->fpu->fp_status));
2196 set_float_exception_flags(0, &env->fpu->fp_status);
2197 FST2 = float32_add (FST0, FSTH0, &env->fpu->fp_status);
2198 FSTH2 = float32_add (FST1, FSTH1, &env->fpu->fp_status);
2204 set_float_exception_flags(0, &env->fpu->fp_status);
2205 FST2 = float32_mul (FST0, FSTH0, &env->fpu->fp_status);
2206 FSTH2 = float32_mul (FST1, FSTH1, &env->fpu->fp_status);
2210 /* compare operations */
2211 #define FOP_COND_D(op, cond) \
2212 void do_cmp_d_ ## op (long cc) \
2217 SET_FP_COND(cc, env->fpu); \
2219 CLEAR_FP_COND(cc, env->fpu); \
2221 void do_cmpabs_d_ ## op (long cc) \
2224 FDT0 = float64_abs(FDT0); \
2225 FDT1 = float64_abs(FDT1); \
2229 SET_FP_COND(cc, env->fpu); \
2231 CLEAR_FP_COND(cc, env->fpu); \
2234 int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
2236 if (float64_is_signaling_nan(a) ||
2237 float64_is_signaling_nan(b) ||
2238 (sig && (float64_is_nan(a) || float64_is_nan(b)))) {
2239 float_raise(float_flag_invalid, status);
2241 } else if (float64_is_nan(a) || float64_is_nan(b)) {
2248 /* NOTE: the comma operator will make "cond" to eval to false,
2249 * but float*_is_unordered() is still called. */
2250 FOP_COND_D(f, (float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status), 0))
2251 FOP_COND_D(un, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status))
2252 FOP_COND_D(eq, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
2253 FOP_COND_D(ueq, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
2254 FOP_COND_D(olt, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
2255 FOP_COND_D(ult, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
2256 FOP_COND_D(ole, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
2257 FOP_COND_D(ule, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_le(FDT0, FDT1, &env->fpu->fp_status))
2258 /* NOTE: the comma operator will make "cond" to eval to false,
2259 * but float*_is_unordered() is still called. */
2260 FOP_COND_D(sf, (float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status), 0))
2261 FOP_COND_D(ngle,float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status))
2262 FOP_COND_D(seq, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
2263 FOP_COND_D(ngl, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
2264 FOP_COND_D(lt, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
2265 FOP_COND_D(nge, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
2266 FOP_COND_D(le, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
2267 FOP_COND_D(ngt, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_le(FDT0, FDT1, &env->fpu->fp_status))
2269 #define FOP_COND_S(op, cond) \
2270 void do_cmp_s_ ## op (long cc) \
2275 SET_FP_COND(cc, env->fpu); \
2277 CLEAR_FP_COND(cc, env->fpu); \
2279 void do_cmpabs_s_ ## op (long cc) \
2282 FST0 = float32_abs(FST0); \
2283 FST1 = float32_abs(FST1); \
2287 SET_FP_COND(cc, env->fpu); \
2289 CLEAR_FP_COND(cc, env->fpu); \
2292 flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
2294 if (float32_is_signaling_nan(a) ||
2295 float32_is_signaling_nan(b) ||
2296 (sig && (float32_is_nan(a) || float32_is_nan(b)))) {
2297 float_raise(float_flag_invalid, status);
2299 } else if (float32_is_nan(a) || float32_is_nan(b)) {
2306 /* NOTE: the comma operator will make "cond" to eval to false,
2307 * but float*_is_unordered() is still called. */
2308 FOP_COND_S(f, (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0))
2309 FOP_COND_S(un, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status))
2310 FOP_COND_S(eq, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
2311 FOP_COND_S(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status))
2312 FOP_COND_S(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
2313 FOP_COND_S(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status))
2314 FOP_COND_S(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
2315 FOP_COND_S(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status))
2316 /* NOTE: the comma operator will make "cond" to eval to false,
2317 * but float*_is_unordered() is still called. */
2318 FOP_COND_S(sf, (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0))
2319 FOP_COND_S(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status))
2320 FOP_COND_S(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
2321 FOP_COND_S(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status))
2322 FOP_COND_S(lt, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
2323 FOP_COND_S(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status))
2324 FOP_COND_S(le, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
2325 FOP_COND_S(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status))
2327 #define FOP_COND_PS(op, condl, condh) \
2328 void do_cmp_ps_ ## op (long cc) \
2334 SET_FP_COND(cc, env->fpu); \
2336 CLEAR_FP_COND(cc, env->fpu); \
2338 SET_FP_COND(cc + 1, env->fpu); \
2340 CLEAR_FP_COND(cc + 1, env->fpu); \
2342 void do_cmpabs_ps_ ## op (long cc) \
2345 FST0 = float32_abs(FST0); \
2346 FSTH0 = float32_abs(FSTH0); \
2347 FST1 = float32_abs(FST1); \
2348 FSTH1 = float32_abs(FSTH1); \
2353 SET_FP_COND(cc, env->fpu); \
2355 CLEAR_FP_COND(cc, env->fpu); \
2357 SET_FP_COND(cc + 1, env->fpu); \
2359 CLEAR_FP_COND(cc + 1, env->fpu); \
2362 /* NOTE: the comma operator will make "cond" to eval to false,
2363 * but float*_is_unordered() is still called. */
2364 FOP_COND_PS(f, (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0),
2365 (float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status), 0))
2366 FOP_COND_PS(un, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status),
2367 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status))
2368 FOP_COND_PS(eq, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status),
2369 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
2370 FOP_COND_PS(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status),
2371 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
2372 FOP_COND_PS(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status),
2373 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
2374 FOP_COND_PS(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status),
2375 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
2376 FOP_COND_PS(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status),
2377 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
2378 FOP_COND_PS(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status),
2379 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
2380 /* NOTE: the comma operator will make "cond" to eval to false,
2381 * but float*_is_unordered() is still called. */
2382 FOP_COND_PS(sf, (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0),
2383 (float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status), 0))
2384 FOP_COND_PS(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status),
2385 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status))
2386 FOP_COND_PS(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status),
2387 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
2388 FOP_COND_PS(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status),
2389 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
2390 FOP_COND_PS(lt, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status),
2391 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
2392 FOP_COND_PS(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status),
2393 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
2394 FOP_COND_PS(le, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status),
2395 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
2396 FOP_COND_PS(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status),
2397 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))