cpuport.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2011-10-21 Bernard the first version.
  9. * 2011-10-27 aozima update for cortex-M4 FPU.
  10. * 2011-12-31 aozima fixed stack align issues.
  11. * 2012-01-01 aozima support context switch load/store FPU register.
  12. * 2012-12-11 lgnq fixed the coding style.
  13. * 2012-12-23 aozima stack addr align to 8byte.
  14. * 2012-12-29 Bernard Add exception hook.
  15. * 2013-06-23 aozima support lazy stack optimized.
  16. * 2018-07-24 aozima enhancement hard fault exception handler.
  17. * 2019-07-03 yangjie add __rt_ffs() for armclang.
  18. * 2022-06-12 jonas fixed __rt_ffs() for armclang.
  19. */
  20. #include <rtthread.h>
  21. #define DBG_TAG "cortex.m4"
  22. #define DBG_LVL DBG_INFO
  23. #include <rtdbg.h>
  24. #if /* ARMCC */ ( (defined ( __CC_ARM ) && defined ( __TARGET_FPU_VFP )) \
  25. /* Clang */ || (defined ( __clang__ ) && defined ( __VFP_FP__ ) && !defined(__SOFTFP__)) \
  26. /* IAR */ || (defined ( __ICCARM__ ) && defined ( __ARMVFP__ )) \
  27. /* GNU */ || (defined ( __GNUC__ ) && defined ( __VFP_FP__ ) && !defined(__SOFTFP__)) )
  28. #define USE_FPU 1
  29. #else
  30. #define USE_FPU 0
  31. #endif
  32. /* exception and interrupt handler table */
  33. rt_uint32_t rt_interrupt_from_thread;
  34. rt_uint32_t rt_interrupt_to_thread;
  35. rt_uint32_t rt_thread_switch_interrupt_flag;
  36. /* exception hook */
  37. static rt_err_t (*rt_exception_hook)(void *context) = RT_NULL;
  38. struct exception_stack_frame
  39. {
  40. rt_uint32_t r0;
  41. rt_uint32_t r1;
  42. rt_uint32_t r2;
  43. rt_uint32_t r3;
  44. rt_uint32_t r12;
  45. rt_uint32_t lr;
  46. rt_uint32_t pc;
  47. rt_uint32_t psr;
  48. };
  49. struct stack_frame
  50. {
  51. #if USE_FPU
  52. rt_uint32_t flag;
  53. #endif /* USE_FPU */
  54. /* r4 ~ r11 register */
  55. rt_uint32_t r4;
  56. rt_uint32_t r5;
  57. rt_uint32_t r6;
  58. rt_uint32_t r7;
  59. rt_uint32_t r8;
  60. rt_uint32_t r9;
  61. rt_uint32_t r10;
  62. rt_uint32_t r11;
  63. struct exception_stack_frame exception_stack_frame;
  64. };
  65. struct exception_stack_frame_fpu
  66. {
  67. rt_uint32_t r0;
  68. rt_uint32_t r1;
  69. rt_uint32_t r2;
  70. rt_uint32_t r3;
  71. rt_uint32_t r12;
  72. rt_uint32_t lr;
  73. rt_uint32_t pc;
  74. rt_uint32_t psr;
  75. #if USE_FPU
  76. /* FPU register */
  77. rt_uint32_t S0;
  78. rt_uint32_t S1;
  79. rt_uint32_t S2;
  80. rt_uint32_t S3;
  81. rt_uint32_t S4;
  82. rt_uint32_t S5;
  83. rt_uint32_t S6;
  84. rt_uint32_t S7;
  85. rt_uint32_t S8;
  86. rt_uint32_t S9;
  87. rt_uint32_t S10;
  88. rt_uint32_t S11;
  89. rt_uint32_t S12;
  90. rt_uint32_t S13;
  91. rt_uint32_t S14;
  92. rt_uint32_t S15;
  93. rt_uint32_t FPSCR;
  94. rt_uint32_t NO_NAME;
  95. #endif
  96. };
  97. struct stack_frame_fpu
  98. {
  99. rt_uint32_t flag;
  100. /* r4 ~ r11 register */
  101. rt_uint32_t r4;
  102. rt_uint32_t r5;
  103. rt_uint32_t r6;
  104. rt_uint32_t r7;
  105. rt_uint32_t r8;
  106. rt_uint32_t r9;
  107. rt_uint32_t r10;
  108. rt_uint32_t r11;
  109. #if USE_FPU
  110. /* FPU register s16 ~ s31 */
  111. rt_uint32_t s16;
  112. rt_uint32_t s17;
  113. rt_uint32_t s18;
  114. rt_uint32_t s19;
  115. rt_uint32_t s20;
  116. rt_uint32_t s21;
  117. rt_uint32_t s22;
  118. rt_uint32_t s23;
  119. rt_uint32_t s24;
  120. rt_uint32_t s25;
  121. rt_uint32_t s26;
  122. rt_uint32_t s27;
  123. rt_uint32_t s28;
  124. rt_uint32_t s29;
  125. rt_uint32_t s30;
  126. rt_uint32_t s31;
  127. #endif
  128. struct exception_stack_frame_fpu exception_stack_frame;
  129. };
  130. rt_uint8_t *rt_hw_stack_init(void *tentry,
  131. void *parameter,
  132. rt_uint8_t *stack_addr,
  133. void *texit)
  134. {
  135. struct stack_frame *stack_frame;
  136. rt_uint8_t *stk;
  137. unsigned long i;
  138. stk = stack_addr + sizeof(rt_uint32_t);
  139. stk = (rt_uint8_t *)RT_ALIGN_DOWN((rt_uint32_t)stk, 8);
  140. stk -= sizeof(struct stack_frame);
  141. stack_frame = (struct stack_frame *)stk;
  142. /* init all register */
  143. for (i = 0; i < sizeof(struct stack_frame) / sizeof(rt_uint32_t); i ++)
  144. {
  145. ((rt_uint32_t *)stack_frame)[i] = 0xdeadbeef;
  146. }
  147. stack_frame->exception_stack_frame.r0 = (unsigned long)parameter; /* r0 : argument */
  148. stack_frame->exception_stack_frame.r1 = 0; /* r1 */
  149. stack_frame->exception_stack_frame.r2 = 0; /* r2 */
  150. stack_frame->exception_stack_frame.r3 = 0; /* r3 */
  151. stack_frame->exception_stack_frame.r12 = 0; /* r12 */
  152. stack_frame->exception_stack_frame.lr = (unsigned long)texit; /* lr */
  153. stack_frame->exception_stack_frame.pc = (unsigned long)tentry; /* entry point, pc */
  154. stack_frame->exception_stack_frame.psr = 0x01000000L; /* PSR */
  155. #if USE_FPU
  156. stack_frame->flag = 0;
  157. #endif /* USE_FPU */
  158. /* return task's current stack address */
  159. return stk;
  160. }
  161. /**
  162. * This function set the hook, which is invoked on fault exception handling.
  163. *
  164. * @param exception_handle the exception handling hook function.
  165. */
  166. void rt_hw_exception_install(rt_err_t (*exception_handle)(void *context))
  167. {
  168. rt_exception_hook = exception_handle;
  169. }
  170. #define SCB_CFSR (*(volatile const unsigned *)0xE000ED28) /* Configurable Fault Status Register */
  171. #define SCB_HFSR (*(volatile const unsigned *)0xE000ED2C) /* HardFault Status Register */
  172. #define SCB_MMAR (*(volatile const unsigned *)0xE000ED34) /* MemManage Fault Address register */
  173. #define SCB_BFAR (*(volatile const unsigned *)0xE000ED38) /* Bus Fault Address Register */
  174. #define SCB_AIRCR (*(volatile unsigned long *)0xE000ED0C) /* Reset control Address Register */
  175. #define SCB_RESET_VALUE 0x05FA0004 /* Reset value, write to SCB_AIRCR can reset cpu */
  176. #define SCB_CFSR_MFSR (*(volatile const unsigned char*)0xE000ED28) /* Memory-management Fault Status Register */
  177. #define SCB_CFSR_BFSR (*(volatile const unsigned char*)0xE000ED29) /* Bus Fault Status Register */
  178. #define SCB_CFSR_UFSR (*(volatile const unsigned short*)0xE000ED2A) /* Usage Fault Status Register */
  179. #ifdef RT_USING_FINSH
  180. static void usage_fault_track(void)
  181. {
  182. rt_kprintf("usage fault:\n");
  183. rt_kprintf("SCB_CFSR_UFSR:0x%02X ", SCB_CFSR_UFSR);
  184. if(SCB_CFSR_UFSR & (1<<0))
  185. {
  186. /* [0]:UNDEFINSTR */
  187. rt_kprintf("UNDEFINSTR ");
  188. }
  189. if(SCB_CFSR_UFSR & (1<<1))
  190. {
  191. /* [1]:INVSTATE */
  192. rt_kprintf("INVSTATE ");
  193. }
  194. if(SCB_CFSR_UFSR & (1<<2))
  195. {
  196. /* [2]:INVPC */
  197. rt_kprintf("INVPC ");
  198. }
  199. if(SCB_CFSR_UFSR & (1<<3))
  200. {
  201. /* [3]:NOCP */
  202. rt_kprintf("NOCP ");
  203. }
  204. if(SCB_CFSR_UFSR & (1<<8))
  205. {
  206. /* [8]:UNALIGNED */
  207. rt_kprintf("UNALIGNED ");
  208. }
  209. if(SCB_CFSR_UFSR & (1<<9))
  210. {
  211. /* [9]:DIVBYZERO */
  212. rt_kprintf("DIVBYZERO ");
  213. }
  214. rt_kprintf("\n");
  215. }
  216. static void bus_fault_track(void)
  217. {
  218. rt_kprintf("bus fault:\n");
  219. rt_kprintf("SCB_CFSR_BFSR:0x%02X ", SCB_CFSR_BFSR);
  220. if(SCB_CFSR_BFSR & (1<<0))
  221. {
  222. /* [0]:IBUSERR */
  223. rt_kprintf("IBUSERR ");
  224. }
  225. if(SCB_CFSR_BFSR & (1<<1))
  226. {
  227. /* [1]:PRECISERR */
  228. rt_kprintf("PRECISERR ");
  229. }
  230. if(SCB_CFSR_BFSR & (1<<2))
  231. {
  232. /* [2]:IMPRECISERR */
  233. rt_kprintf("IMPRECISERR ");
  234. }
  235. if(SCB_CFSR_BFSR & (1<<3))
  236. {
  237. /* [3]:UNSTKERR */
  238. rt_kprintf("UNSTKERR ");
  239. }
  240. if(SCB_CFSR_BFSR & (1<<4))
  241. {
  242. /* [4]:STKERR */
  243. rt_kprintf("STKERR ");
  244. }
  245. if(SCB_CFSR_BFSR & (1<<7))
  246. {
  247. rt_kprintf("SCB->BFAR:%08X\n", SCB_BFAR);
  248. }
  249. else
  250. {
  251. rt_kprintf("\n");
  252. }
  253. }
  254. static void mem_manage_fault_track(void)
  255. {
  256. rt_kprintf("mem manage fault:\n");
  257. rt_kprintf("SCB_CFSR_MFSR:0x%02X ", SCB_CFSR_MFSR);
  258. if(SCB_CFSR_MFSR & (1<<0))
  259. {
  260. /* [0]:IACCVIOL */
  261. rt_kprintf("IACCVIOL ");
  262. }
  263. if(SCB_CFSR_MFSR & (1<<1))
  264. {
  265. /* [1]:DACCVIOL */
  266. rt_kprintf("DACCVIOL ");
  267. }
  268. if(SCB_CFSR_MFSR & (1<<3))
  269. {
  270. /* [3]:MUNSTKERR */
  271. rt_kprintf("MUNSTKERR ");
  272. }
  273. if(SCB_CFSR_MFSR & (1<<4))
  274. {
  275. /* [4]:MSTKERR */
  276. rt_kprintf("MSTKERR ");
  277. }
  278. if(SCB_CFSR_MFSR & (1<<7))
  279. {
  280. /* [7]:MMARVALID */
  281. rt_kprintf("SCB->MMAR:%08X\n", SCB_MMAR);
  282. }
  283. else
  284. {
  285. rt_kprintf("\n");
  286. }
  287. }
  288. static void hard_fault_track(void)
  289. {
  290. if(SCB_HFSR & (1UL<<1))
  291. {
  292. /* [1]:VECTBL, Indicates hard fault is caused by failed vector fetch. */
  293. rt_kprintf("failed vector fetch\n");
  294. }
  295. if(SCB_HFSR & (1UL<<30))
  296. {
  297. /* [30]:FORCED, Indicates hard fault is taken because of bus fault,
  298. memory management fault, or usage fault. */
  299. if(SCB_CFSR_BFSR)
  300. {
  301. bus_fault_track();
  302. }
  303. if(SCB_CFSR_MFSR)
  304. {
  305. mem_manage_fault_track();
  306. }
  307. if(SCB_CFSR_UFSR)
  308. {
  309. usage_fault_track();
  310. }
  311. }
  312. if(SCB_HFSR & (1UL<<31))
  313. {
  314. /* [31]:DEBUGEVT, Indicates hard fault is triggered by debug event. */
  315. rt_kprintf("debug event\n");
  316. }
  317. }
  318. #endif /* RT_USING_FINSH */
  319. struct exception_info
  320. {
  321. rt_uint32_t exc_return;
  322. struct stack_frame stack_frame;
  323. };
  324. void rt_hw_hard_fault_exception(struct exception_info *exception_info)
  325. {
  326. #if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
  327. extern long list_thread(void);
  328. #endif
  329. struct exception_stack_frame *exception_stack = &exception_info->stack_frame.exception_stack_frame;
  330. struct stack_frame *context = &exception_info->stack_frame;
  331. if (rt_exception_hook != RT_NULL)
  332. {
  333. rt_err_t result;
  334. result = rt_exception_hook(exception_stack);
  335. if (result == RT_EOK) return;
  336. }
  337. rt_kprintf("psr: 0x%08x\n", context->exception_stack_frame.psr);
  338. rt_kprintf("r00: 0x%08x\n", context->exception_stack_frame.r0);
  339. rt_kprintf("r01: 0x%08x\n", context->exception_stack_frame.r1);
  340. rt_kprintf("r02: 0x%08x\n", context->exception_stack_frame.r2);
  341. rt_kprintf("r03: 0x%08x\n", context->exception_stack_frame.r3);
  342. rt_kprintf("r04: 0x%08x\n", context->r4);
  343. rt_kprintf("r05: 0x%08x\n", context->r5);
  344. rt_kprintf("r06: 0x%08x\n", context->r6);
  345. rt_kprintf("r07: 0x%08x\n", context->r7);
  346. rt_kprintf("r08: 0x%08x\n", context->r8);
  347. rt_kprintf("r09: 0x%08x\n", context->r9);
  348. rt_kprintf("r10: 0x%08x\n", context->r10);
  349. rt_kprintf("r11: 0x%08x\n", context->r11);
  350. rt_kprintf("r12: 0x%08x\n", context->exception_stack_frame.r12);
  351. rt_kprintf(" lr: 0x%08x\n", context->exception_stack_frame.lr);
  352. rt_kprintf(" pc: 0x%08x\n", context->exception_stack_frame.pc);
  353. if (exception_info->exc_return & (1 << 2))
  354. {
  355. rt_kprintf("hard fault on thread: %s\r\n\r\n", rt_thread_self()->parent.name);
  356. #if defined(RT_USING_FINSH) && defined(MSH_USING_BUILT_IN_COMMANDS)
  357. list_thread();
  358. #endif
  359. }
  360. else
  361. {
  362. rt_kprintf("hard fault on handler\r\n\r\n");
  363. }
  364. if ( (exception_info->exc_return & 0x10) == 0)
  365. {
  366. rt_kprintf("FPU active!\r\n");
  367. }
  368. #ifdef RT_USING_FINSH
  369. hard_fault_track();
  370. #endif /* RT_USING_FINSH */
  371. while (1);
  372. }
  373. /**
  374. * reset CPU
  375. */
  376. void rt_hw_cpu_reset(void)
  377. {
  378. SCB_AIRCR = SCB_RESET_VALUE;
  379. }
  380. /**
  381. \brief Get IPSR Register
  382. \details Returns the content of the IPSR Register.
  383. \return IPSR Register value
  384. */
  385. rt_inline rt_uint32_t rt_hw_get_ipsr(void)
  386. {
  387. #if defined(__CC_ARM)
  388. register uint32_t __regIPSR __asm("ipsr");
  389. return(__regIPSR);
  390. #elif defined(__clang__)
  391. uint32_t result;
  392. __asm volatile ("MRS %0, ipsr" : "=r" (result) );
  393. return(result);
  394. #elif defined(__IAR_SYSTEMS_ICC__)
  395. return __iar_builtin_rsr("IPSR");
  396. #elif defined ( __GNUC__ )
  397. uint32_t result;
  398. __asm volatile ("MRS %0, ipsr" : "=r" (result) );
  399. return(result);
  400. #endif
  401. }
  402. /**
  403. * @brief This function will be invoked by BSP, when enter interrupt service routine
  404. *
  405. * @note Please don't invoke this routine in application
  406. *
  407. * @see rt_interrupt_leave
  408. */
  409. void rt_interrupt_enter(void)
  410. {
  411. extern void (*rt_interrupt_enter_hook)(void);
  412. RT_OBJECT_HOOK_CALL(rt_interrupt_enter_hook,());
  413. LOG_D("irq has come...");
  414. }
  415. /**
  416. * @brief This function will be invoked by BSP, when leave interrupt service routine
  417. *
  418. * @note Please don't invoke this routine in application
  419. *
  420. * @see rt_interrupt_enter
  421. */
  422. void rt_interrupt_leave(void)
  423. {
  424. extern void (*rt_interrupt_leave_hook)(void);
  425. LOG_D("irq is going to leave");
  426. RT_OBJECT_HOOK_CALL(rt_interrupt_leave_hook,());
  427. }
  428. /**
  429. * @brief This function will return the nest of interrupt.
  430. *
  431. * User application can invoke this function to get whether current
  432. * context is interrupt context.
  433. *
  434. * @return the number of nested interrupts.
  435. */
  436. rt_uint8_t rt_interrupt_get_nest(void)
  437. {
  438. return (rt_hw_get_ipsr() != 0);
  439. }
  440. /**
  441. \brief Get PRIMASK Register
  442. \details Returns the content of the PRIMASK Register.
  443. \return PRIMASK Register value
  444. */
  445. rt_inline rt_uint32_t rt_hw_get_primask_value(void)
  446. {
  447. #if defined(__CC_ARM)
  448. register uint32_t __regPRIMASK __asm("primask");
  449. return (__regPRIMASK);
  450. #elif defined(__clang__)
  451. uint32_t result;
  452. __asm volatile ("MRS %0, primask" : "=r" (result));
  453. return result;
  454. #elif defined(__IAR_SYSTEMS_ICC__)
  455. return __iar_builtin_rsr("PRIMASK");
  456. #elif defined(__GNUC__)
  457. uint32_t result;
  458. __asm volatile ("MRS %0, primask" : "=r" (result));
  459. return result;
  460. #endif
  461. }
  462. /**
  463. * @brief Check whether maskable interrupts are currently disabled.
  464. *
  465. * @details
  466. * For Cortex-M4, interrupts are considered disabled when either:
  467. * - PRIMASK masks all configurable interrupts.
  468. *
  469. * @return RT_TRUE if interrupts are masked; otherwise RT_FALSE.
  470. */
  471. rt_bool_t rt_hw_interrupt_is_disabled(void)
  472. {
  473. return ((rt_hw_get_primask_value() & 0x1UL) != 0UL);
  474. }
  475. #ifdef RT_USING_CPU_FFS
  476. /**
  477. * This function finds the first bit set (beginning with the least significant bit)
  478. * in value and return the index of that bit.
  479. *
  480. * Bits are numbered starting at 1 (the least significant bit). A return value of
  481. * zero from any of these functions means that the argument was zero.
  482. *
  483. * @return return the index of the first bit set. If value is 0, then this function
  484. * shall return 0.
  485. */
  486. #if defined(__CC_ARM)
  487. __asm int __rt_ffs(int value)
  488. {
  489. CMP r0, #0x00
  490. BEQ exit
  491. RBIT r0, r0
  492. CLZ r0, r0
  493. ADDS r0, r0, #0x01
  494. exit
  495. BX lr
  496. }
  497. #elif defined(__clang__)
  498. int __rt_ffs(int value)
  499. {
  500. __asm volatile(
  501. "CMP %1, #0x00 \n"
  502. "BEQ 1f \n"
  503. "RBIT %1, %1 \n"
  504. "CLZ %0, %1 \n"
  505. "ADDS %0, %0, #0x01 \n"
  506. "1: \n"
  507. : "=r"(value)
  508. : "r"(value)
  509. );
  510. return value;
  511. }
  512. #elif defined(__IAR_SYSTEMS_ICC__)
  513. int __rt_ffs(int value)
  514. {
  515. if (value == 0) return value;
  516. asm("RBIT %0, %1" : "=r"(value) : "r"(value));
  517. asm("CLZ %0, %1" : "=r"(value) : "r"(value));
  518. asm("ADDS %0, %1, #0x01" : "=r"(value) : "r"(value));
  519. return value;
  520. }
  521. #elif defined(__GNUC__)
  522. int __rt_ffs(int value)
  523. {
  524. return __builtin_ffs(value);
  525. }
  526. #endif
  527. #endif