irq_profile_stats.c 2.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. /*
  2. * Copyright (c) 2018 Nordic Semiconductor ASA
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <sys/printk.h>
  7. #include <kernel_internal.h>
  8. #include <ksched.h>
  9. #ifdef CONFIG_TRACING_IRQ_PROFILER
  10. #define RUNNING_CYCLES(end, start) ((uint32_t)((long)(end) - (long)(start)))
  11. #ifdef CONFIG_TRACING_IRQ_PROFILER_IRQLOADING
  12. #define RUNNING_TIMES(end, start) ((uint32_t)((long)(end) - (long)(start)))
  13. extern void idle(void *unused1, void *unused2, void *unused3);
  14. static u32_t cpuload_isr_total_cycles;
  15. static u32_t cpuload_isr_in_idle_cycles;
  16. static u32_t irq_pre_total_us[IRQ_TABLE_SIZE];
  17. #endif
  18. void isr_wrapper_with_profiler(u32_t irqnum)
  19. {
  20. u32_t ts_irq_enter, irq_cycles;
  21. u32_t stop_cycle;
  22. #ifdef CONFIG_TRACING_IRQ_PROFILER_IRQLOADING
  23. static u32_t pre_time, curr_time;
  24. u32_t i, print_flag = 0;
  25. #endif
  26. struct _isr_table_entry *ite;
  27. irqnum >>= 3;
  28. ts_irq_enter = k_cycle_get_32();
  29. ite = &_sw_isr_table[irqnum];
  30. if(ite->isr){
  31. ite->isr(ite->arg);
  32. }
  33. ite->irq_cnt++;
  34. stop_cycle = k_cycle_get_32();
  35. irq_cycles = RUNNING_CYCLES(stop_cycle, ts_irq_enter);
  36. ite->irq_total_us += k_cyc_to_ns_floor64(irq_cycles) / 1000;
  37. if (irq_cycles > ite->max_irq_cycles)
  38. ite->max_irq_cycles = irq_cycles;
  39. #ifdef CONFIG_TRACING_IRQ_PROFILER_MAX_LATENCY
  40. if(irq_cycles >= CONFIG_TRACING_IRQ_PROFILE_MAX_LATENCY_CYCLES){
  41. printk("irq@%d run %dus not meet latency\n", irqnum, (u32_t)(k_cyc_to_ns_floor64(irq_cycles) / 1000));
  42. }
  43. #endif
  44. #ifdef CONFIG_TRACING_IRQ_PROFILER_IRQLOADING
  45. cpuload_isr_total_cycles += RUNNING_CYCLES(stop_cycle, ts_irq_enter);
  46. if (_current->entry.pEntry == idle) {
  47. cpuload_isr_in_idle_cycles += RUNNING_CYCLES(stop_cycle, ts_irq_enter);
  48. }
  49. curr_time = k_uptime_get_32();
  50. if ((curr_time - pre_time) > 1000) {
  51. pre_time = curr_time;
  52. for (i=0; i<IRQ_TABLE_SIZE; i++) {
  53. ite = &_sw_isr_table[i];
  54. if (ite->isr != z_irq_spurious) {
  55. if ((RUNNING_TIMES(ite->irq_total_us, irq_pre_total_us[i]) > 60000)) {
  56. printk("current irq@%d overtime run %dus\n", i, RUNNING_TIMES(ite->irq_total_us, irq_pre_total_us[i]));
  57. print_flag = 1;
  58. }
  59. irq_pre_total_us[i] = ite->irq_total_us;
  60. }
  61. }
  62. /* > (300ms) (9677000/1000)*31 */
  63. /* SYS_CLOCK_HW_CYCLES_TO_NS_AVG(1000, 1000) = 31us */
  64. if ((cpuload_isr_total_cycles > 9677000) || print_flag) {
  65. printk("(%u)total isr %d us, in idle %d us\n", curr_time,
  66. SYS_CLOCK_HW_CYCLES_TO_NS_AVG(cpuload_isr_total_cycles, 1000),
  67. SYS_CLOCK_HW_CYCLES_TO_NS_AVG(cpuload_isr_in_idle_cycles, 1000));
  68. }
  69. cpuload_isr_total_cycles = 0;
  70. cpuload_isr_in_idle_cycles = 0;
  71. }
  72. #endif
  73. }
  74. #endif /* CONFIG_TRACING_CPU_STATS_LOG */