15 return (*((
RV64IMACFD*)cpu)->CSR[769LL] >> (extension - 65ULL)) & 1ULL;
28 rm = ((((((
RV64IMACFD*)cpu)->FCSR) >> (5ULL)) & 7ULL)) & 0x7;
55 mask = mask | 5767458ULL;
66 mask = mask | 24576LL;
72 mask = mask | 98304LL;
78 mask = mask | 262144LL;
118 return (*((
RV64IMACFD*)cpu)->CSR[3LL] >> 5ULL) & 7ULL;
164 if (csr == 768LL || csr == 256LL) {
167 return *((
RV64IMACFD*)cpu)->CSR[768LL] | 8589934592ULL | 34359738368ULL;
174 return (((2ULL) << 62) | ((((*((
RV64IMACFD*)cpu)->CSR[769LL]) >> (0LL)) & 4611686018427387903ULL)));
192 etiss_coverage_count(10, 520, 505, 519, 513, 508, 514, 517, 515, 516, 518);
194 else if (csr == 2LL) {
196 *((
RV64IMACFD*)cpu)->CSR[3LL] = ((val & 7ULL) << 5ULL) | (*((
RV64IMACFD*)cpu)->CSR[3LL] & 31ULL);
197 etiss_coverage_count(14, 538, 523, 537, 529, 526, 524, 525, 527, 528, 530, 535, 533, 534, 536);
199 else if (csr == 3LL) {
204 else if (csr == 768LL) {
209 else if (csr == 256LL) {
214 else if (csr != 769LL) {
235 return (reg & mask) / (mask & ~((mask << 1ULL)));
236 etiss_coverage_count(13, 583, 573, 571, 572, 574, 581, 575, 580, 578, 576, 577, 579, 582);
246 return ((reg & ~(mask)) | ((val * (mask & ~((mask << 1ULL)))) & mask));
247 etiss_coverage_count(21, 605, 589, 586, 588, 587, 590, 603, 600, 591, 598, 592, 597, 595, 593, 594, 596, 599, 601, 602, 604, 606);
266 if ((val << 32ULL) == 0LL) {
277 if ((val << 48ULL) == 0LL) {
288 if ((val << 56ULL) == 0LL) {
299 if ((val << 60ULL) == 0LL) {
310 if ((val << 62ULL) == 0LL) {
321 if ((val << 63ULL) == 0LL) {
352 etiss_int32 irq2 = (mcause & 9223372036854775808ULL) != 0LL;
361 bit = bit & 9223372036854775807ULL;
373 if (((
RV64IMACFD*)cpu)->PRIV <= 1LL && (deleg >> bit) & 1ULL) {
377 vector = ((*((
RV64IMACFD*)cpu)->CSR[261LL] & 1ULL) && irq2) ? (bit * 4ULL) : (0LL);
378 etiss_coverage_count(13, 792, 778, 791, 786, 783, 781, 782, 784, 785, 789, 787, 788, 790);
379 cpu->nextPc = (*((
RV64IMACFD*)cpu)->CSR[261LL] & -2LL) + vector;
402 vector = ((*((
RV64IMACFD*)cpu)->CSR[773LL] & 1ULL) && irq2) ? (bit * 4ULL) : (0LL);
403 etiss_coverage_count(13, 861, 847, 860, 855, 852, 850, 851, 853, 854, 858, 856, 857, 859);
404 cpu->nextPc = (*((
RV64IMACFD*)cpu)->CSR[773LL] & -2LL) + vector;
435 if (cause == -2147483648LL) {
440 else if (cause == -5LL) {
445 else if (cause == -14LL) {
450 else if (cause == -6LL) {
455 else if (cause == -15LL) {
460 else if (cause == -7LL) {
465 else if (cause == -9LL) {
496 if (!(pending_interrupts)) {
509 if (enabled_interrupts == 0LL) {
519 enabled_interrupts = pending_interrupts & deleg & -(s_enabled);
524 if (enabled_interrupts) {
529 if (enabled_interrupts >> 12ULL) {
531 enabled_interrupts = enabled_interrupts >> 12ULL << 12ULL;
534 else if (enabled_interrupts & 2048LL) {
536 enabled_interrupts = 2048LL;
539 else if (enabled_interrupts & 8LL) {
541 enabled_interrupts = 8LL;
544 else if (enabled_interrupts & 128LL) {
546 enabled_interrupts = 128LL;
549 else if (enabled_interrupts & 512LL) {
551 enabled_interrupts = 512LL;
554 else if (enabled_interrupts & 2LL) {
556 enabled_interrupts = 2LL;
559 else if (enabled_interrupts & 32LL) {
561 enabled_interrupts = 32LL;
564 else if (enabled_interrupts & 8192LL) {
566 enabled_interrupts = 8192LL;
569 else if (enabled_interrupts & 1024LL) {
571 enabled_interrupts = 1024LL;
574 else if (enabled_interrupts & 4LL) {
576 enabled_interrupts = 4LL;
579 else if (enabled_interrupts & 64LL) {
581 enabled_interrupts = 64LL;
590 return 9223372036854775808ULL |
RV64IMACFD_ctz(enabled_interrupts);
621 etiss_int128 res = (etiss_int128)(
x) * (etiss_int128)(y);
634 etiss_int128 res = (etiss_int128)(
x) * (etiss_uint128)(y);
647 etiss_uint128 res = (etiss_uint128)(
x) * (etiss_uint128)(y);
etiss_uint64 RV64IMACFD_sstatus_mask(ETISS_CPU *const cpu, ETISS_System *const system, void *const *const plugin_pointers)
etiss_uint64 RV64IMACFD_set_field(etiss_uint64 reg, etiss_uint64 mask, etiss_uint64 val)
void RV64IMACFD_translate_exc_code(ETISS_CPU *const cpu, ETISS_System *const system, void *const *const plugin_pointers, etiss_int32 cause)
etiss_uint64 RV64IMACFD_mulhu(etiss_uint64 x, etiss_uint64 y)
etiss_uint64 RV64IMACFD_csr_read(ETISS_CPU *const cpu, ETISS_System *const system, void *const *const plugin_pointers, etiss_uint32 csr)
etiss_uint64 RV64IMACFD_calc_irq_mcause(ETISS_CPU *const cpu, ETISS_System *const system, void *const *const plugin_pointers)
etiss_uint64 RV64IMACFD_mstatus_mask(ETISS_CPU *const cpu, ETISS_System *const system, void *const *const plugin_pointers)
void RV64IMACFD_csr_write(ETISS_CPU *const cpu, ETISS_System *const system, void *const *const plugin_pointers, etiss_uint32 csr, etiss_uint64 val)
etiss_uint64 RV64IMACFD_get_field(etiss_uint64 reg, etiss_uint64 mask)
etiss_uint8 RV64IMACFD_extension_enabled(ETISS_CPU *const cpu, ETISS_System *const system, void *const *const plugin_pointers, etiss_int8 extension)
Generated on Thu, 24 Oct 2024 10:16:12 +0200.
void RV64IMACFD_check_irq(ETISS_CPU *const cpu, ETISS_System *const system, void *const *const plugin_pointers)
etiss_int64 RV64IMACFD_mulhsu(etiss_int64 x, etiss_uint64 y)
etiss_int64 RV64IMACFD_mulh(etiss_int64 x, etiss_int64 y)
etiss_uint8 RV64IMACFD_ctz(etiss_uint64 val)
void RV64IMACFD_raise(ETISS_CPU *const cpu, ETISS_System *const system, void *const *const plugin_pointers, etiss_int32 irq, etiss_uint64 mcause)
etiss_uint8 RV64IMACFD_get_rm(ETISS_CPU *const cpu, ETISS_System *const system, void *const *const plugin_pointers, etiss_uint8 rm)
__device__ __2f16 float bool s
#define etiss_coverage_count(...)
etiss_uint64 etiss_get_instret(ETISS_CPU *const cpu, ETISS_System *const system, void *const *const plugin_pointers)
etiss_uint64 etiss_get_time()
etiss_uint64 etiss_get_cycles(ETISS_CPU *const cpu, ETISS_System *const system, void *const *const plugin_pointers)
basic cpu state structure needed for execution of any cpu architecture.
etiss_uint64 instructionPointer
pointer to next instruction.
etiss_uint32 return_pending
memory access and time synchronization functions.
Generated on Thu, 24 Oct 2024 10:16:12 +0200.