kernel.h 167 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704
  1. /*
  2. * Copyright (c) 2016, Wind River Systems, Inc.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. /**
  7. * @file
  8. *
  9. * @brief Public kernel APIs.
  10. */
  11. #ifndef ZEPHYR_INCLUDE_KERNEL_H_
  12. #define ZEPHYR_INCLUDE_KERNEL_H_
  13. #if !defined(_ASMLANGUAGE)
  14. #include <kernel_includes.h>
  15. #include <errno.h>
  16. #include <limits.h>
  17. #include <stdbool.h>
  18. #include <toolchain.h>
  19. #include <tracing/tracing_macros.h>
  20. #ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
  21. #include <timing/timing.h>
  22. #endif
  23. #ifdef __cplusplus
  24. extern "C" {
  25. #endif
  26. /**
  27. * @brief Kernel APIs
  28. * @defgroup kernel_apis Kernel APIs
  29. * @{
  30. * @}
  31. */
  32. #define K_ANY NULL
  33. #define K_END NULL
  34. #if CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES == 0
  35. #error Zero available thread priorities defined!
  36. #endif
  37. #define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
  38. #define K_PRIO_PREEMPT(x) (x)
  39. #define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
  40. #define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
  41. #define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
  42. #define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
  43. #define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
  44. #ifdef CONFIG_POLL
  45. #define _POLL_EVENT_OBJ_INIT(obj) \
  46. .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
  47. #define _POLL_EVENT sys_dlist_t poll_events
  48. #else
  49. #define _POLL_EVENT_OBJ_INIT(obj)
  50. #define _POLL_EVENT
  51. #endif
  52. struct k_thread;
  53. struct k_mutex;
  54. struct k_sem;
  55. struct k_msgq;
  56. struct k_mbox;
  57. struct k_pipe;
  58. struct k_queue;
  59. struct k_fifo;
  60. struct k_lifo;
  61. struct k_stack;
  62. struct k_mem_slab;
  63. struct k_mem_pool;
  64. struct k_timer;
  65. struct k_poll_event;
  66. struct k_poll_signal;
  67. struct k_mem_domain;
  68. struct k_mem_partition;
  69. struct k_futex;
  70. enum execution_context_types {
  71. K_ISR = 0,
  72. K_COOP_THREAD,
  73. K_PREEMPT_THREAD,
  74. };
  75. /* private, used by k_poll and k_work_poll */
  76. struct k_work_poll;
  77. typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
  78. /**
  79. * @addtogroup thread_apis
  80. * @{
  81. */
  82. typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
  83. void *user_data);
  84. /**
  85. * @brief Iterate over all the threads in the system.
  86. *
  87. * This routine iterates over all the threads in the system and
  88. * calls the user_cb function for each thread.
  89. *
  90. * @param user_cb Pointer to the user callback function.
  91. * @param user_data Pointer to user data.
  92. *
  93. * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
  94. * to be effective.
  95. * @note This API uses @ref k_spin_lock to protect the _kernel.threads
  96. * list which means creation of new threads and terminations of existing
  97. * threads are blocked until this API returns.
  98. *
  99. * @return N/A
  100. */
  101. extern void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
  102. /**
  103. * @brief Iterate over all the threads in the system without locking.
  104. *
  105. * This routine works exactly the same like @ref k_thread_foreach
  106. * but unlocks interrupts when user_cb is executed.
  107. *
  108. * @param user_cb Pointer to the user callback function.
  109. * @param user_data Pointer to user data.
  110. *
  111. * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
  112. * to be effective.
  113. * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
  114. * queue elements. It unlocks it during user callback function processing.
  115. * If a new task is created when this @c foreach function is in progress,
  116. * the added new task would not be included in the enumeration.
  117. * If a task is aborted during this enumeration, there would be a race here
  118. * and there is a possibility that this aborted task would be included in the
  119. * enumeration.
  120. * @note If the task is aborted and the memory occupied by its @c k_thread
  121. * structure is reused when this @c k_thread_foreach_unlocked is in progress
  122. * it might even lead to the system behave unstable.
  123. * This function may never return, as it would follow some @c next task
  124. * pointers treating given pointer as a pointer to the k_thread structure
  125. * while it is something different right now.
  126. * Do not reuse the memory that was occupied by k_thread structure of aborted
  127. * task if it was aborted after this function was called in any context.
  128. */
  129. extern void k_thread_foreach_unlocked(
  130. k_thread_user_cb_t user_cb, void *user_data);
  131. /** @} */
  132. /**
  133. * @defgroup thread_apis Thread APIs
  134. * @ingroup kernel_apis
  135. * @{
  136. */
  137. #endif /* !_ASMLANGUAGE */
  138. /*
  139. * Thread user options. May be needed by assembly code. Common part uses low
  140. * bits, arch-specific use high bits.
  141. */
  142. /**
  143. * @brief system thread that must not abort
  144. * */
  145. #define K_ESSENTIAL (BIT(0))
  146. #if defined(CONFIG_FPU_SHARING)
  147. /**
  148. * @brief FPU registers are managed by context switch
  149. *
  150. * @details
  151. * This option indicates that the thread uses the CPU's floating point
  152. * registers. This instructs the kernel to take additional steps to save
  153. * and restore the contents of these registers when scheduling the thread.
  154. * No effect if @kconfig{CONFIG_FPU_SHARING} is not enabled.
  155. */
  156. #define K_FP_REGS (BIT(1))
  157. #endif
  158. /**
  159. * @brief user mode thread
  160. *
  161. * This thread has dropped from supervisor mode to user mode and consequently
  162. * has additional restrictions
  163. */
  164. #define K_USER (BIT(2))
  165. /**
  166. * @brief Inherit Permissions
  167. *
  168. * @details
  169. * Indicates that the thread being created should inherit all kernel object
  170. * permissions from the thread that created it. No effect if
  171. * @kconfig{CONFIG_USERSPACE} is not enabled.
  172. */
  173. #define K_INHERIT_PERMS (BIT(3))
  174. /**
  175. * @brief Callback item state
  176. *
  177. * @details
  178. * This is a single bit of state reserved for "callback manager"
  179. * utilities (p4wq initially) who need to track operations invoked
  180. * from within a user-provided callback they have been invoked.
  181. * Effectively it serves as a tiny bit of zero-overhead TLS data.
  182. */
  183. #define K_CALLBACK_STATE (BIT(4))
  184. #ifdef CONFIG_X86
  185. /* x86 Bitmask definitions for threads user options */
  186. #if defined(CONFIG_FPU_SHARING) && defined(CONFIG_X86_SSE)
  187. /**
  188. * @brief FP and SSE registers are managed by context switch on x86
  189. *
  190. * @details
  191. * This option indicates that the thread uses the x86 CPU's floating point
  192. * and SSE registers. This instructs the kernel to take additional steps to
  193. * save and restore the contents of these registers when scheduling
  194. * the thread. No effect if @kconfig{CONFIG_X86_SSE} is not enabled.
  195. */
  196. #define K_SSE_REGS (BIT(7))
  197. #endif
  198. #endif
  199. /* end - thread options */
  200. #if !defined(_ASMLANGUAGE)
  201. /**
  202. * @brief Create a thread.
  203. *
  204. * This routine initializes a thread, then schedules it for execution.
  205. *
  206. * The new thread may be scheduled for immediate execution or a delayed start.
  207. * If the newly spawned thread does not have a delayed start the kernel
  208. * scheduler may preempt the current thread to allow the new thread to
  209. * execute.
  210. *
  211. * Thread options are architecture-specific, and can include K_ESSENTIAL,
  212. * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
  213. * them using "|" (the logical OR operator).
  214. *
  215. * Stack objects passed to this function must be originally defined with
  216. * either of these macros in order to be portable:
  217. *
  218. * - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
  219. * supervisor threads.
  220. * - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
  221. * threads only. These stacks use less memory if CONFIG_USERSPACE is
  222. * enabled.
  223. *
  224. * The stack_size parameter has constraints. It must either be:
  225. *
  226. * - The original size value passed to K_THREAD_STACK_DEFINE() or
  227. * K_KERNEL_STACK_DEFINE()
  228. * - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
  229. * defined with K_THREAD_STACK_DEFINE()
  230. * - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
  231. * defined with K_KERNEL_STACK_DEFINE().
  232. *
  233. * Using other values, or sizeof(stack) may produce undefined behavior.
  234. *
  235. * @param new_thread Pointer to uninitialized struct k_thread
  236. * @param stack Pointer to the stack space.
  237. * @param stack_size Stack size in bytes.
  238. * @param entry Thread entry function.
  239. * @param p1 1st entry point parameter.
  240. * @param p2 2nd entry point parameter.
  241. * @param p3 3rd entry point parameter.
  242. * @param prio Thread priority.
  243. * @param options Thread options.
  244. * @param delay Scheduling delay, or K_NO_WAIT (for no delay).
  245. *
  246. * @return ID of new thread.
  247. *
  248. */
  249. __syscall k_tid_t k_thread_create(struct k_thread *new_thread,
  250. k_thread_stack_t *stack,
  251. size_t stack_size,
  252. k_thread_entry_t entry,
  253. void *p1, void *p2, void *p3,
  254. int prio, uint32_t options, k_timeout_t delay);
  255. /**
  256. * @brief Drop a thread's privileges permanently to user mode
  257. *
  258. * This allows a supervisor thread to be re-used as a user thread.
  259. * This function does not return, but control will transfer to the provided
  260. * entry point as if this was a new user thread.
  261. *
  262. * The implementation ensures that the stack buffer contents are erased.
  263. * Any thread-local storage will be reverted to a pristine state.
  264. *
  265. * Memory domain membership, resource pool assignment, kernel object
  266. * permissions, priority, and thread options are preserved.
  267. *
  268. * A common use of this function is to re-use the main thread as a user thread
  269. * once all supervisor mode-only tasks have been completed.
  270. *
  271. * @param entry Function to start executing from
  272. * @param p1 1st entry point parameter
  273. * @param p2 2nd entry point parameter
  274. * @param p3 3rd entry point parameter
  275. */
  276. extern FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
  277. void *p1, void *p2,
  278. void *p3);
  279. /**
  280. * @brief Grant a thread access to a set of kernel objects
  281. *
  282. * This is a convenience function. For the provided thread, grant access to
  283. * the remaining arguments, which must be pointers to kernel objects.
  284. *
  285. * The thread object must be initialized (i.e. running). The objects don't
  286. * need to be.
  287. * Note that NULL shouldn't be passed as an argument.
  288. *
  289. * @param thread Thread to grant access to objects
  290. * @param ... list of kernel object pointers
  291. */
  292. #define k_thread_access_grant(thread, ...) \
  293. FOR_EACH_FIXED_ARG(k_object_access_grant, (;), thread, __VA_ARGS__)
  294. /**
  295. * @brief Assign a resource memory pool to a thread
  296. *
  297. * By default, threads have no resource pool assigned unless their parent
  298. * thread has a resource pool, in which case it is inherited. Multiple
  299. * threads may be assigned to the same memory pool.
  300. *
  301. * Changing a thread's resource pool will not migrate allocations from the
  302. * previous pool.
  303. *
  304. * @param thread Target thread to assign a memory pool for resource requests.
  305. * @param heap Heap object to use for resources,
  306. * or NULL if the thread should no longer have a memory pool.
  307. */
  308. static inline void k_thread_heap_assign(struct k_thread *thread,
  309. struct k_heap *heap)
  310. {
  311. thread->resource_pool = heap;
  312. }
  313. #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
  314. /**
  315. * @brief Obtain stack usage information for the specified thread
  316. *
  317. * User threads will need to have permission on the target thread object.
  318. *
  319. * Some hardware may prevent inspection of a stack buffer currently in use.
  320. * If this API is called from supervisor mode, on the currently running thread,
  321. * on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
  322. * error will be generated.
  323. *
  324. * @param thread Thread to inspect stack information
  325. * @param unused_ptr Output parameter, filled in with the unused stack space
  326. * of the target thread in bytes.
  327. * @return 0 on success
  328. * @return -EBADF Bad thread object (user mode only)
  329. * @return -EPERM No permissions on thread object (user mode only)
  330. * #return -ENOTSUP Forbidden by hardware policy
  331. * @return -EINVAL Thread is uninitialized or exited (user mode only)
  332. * @return -EFAULT Bad memory address for unused_ptr (user mode only)
  333. */
  334. __syscall int k_thread_stack_space_get(const struct k_thread *thread,
  335. size_t *unused_ptr);
  336. #endif
  337. #if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
  338. /**
  339. * @brief Assign the system heap as a thread's resource pool
  340. *
  341. * Similar to z_thread_heap_assign(), but the thread will use
  342. * the kernel heap to draw memory.
  343. *
  344. * Use with caution, as a malicious thread could perform DoS attacks on the
  345. * kernel heap.
  346. *
  347. * @param thread Target thread to assign the system heap for resource requests
  348. *
  349. */
  350. void k_thread_system_pool_assign(struct k_thread *thread);
  351. #endif /* (CONFIG_HEAP_MEM_POOL_SIZE > 0) */
  352. /**
  353. * @brief Sleep until a thread exits
  354. *
  355. * The caller will be put to sleep until the target thread exits, either due
  356. * to being aborted, self-exiting, or taking a fatal error. This API returns
  357. * immediately if the thread isn't running.
  358. *
  359. * This API may only be called from ISRs with a K_NO_WAIT timeout,
  360. * where it can be useful as a predicate to detect when a thread has
  361. * aborted.
  362. *
  363. * @param thread Thread to wait to exit
  364. * @param timeout upper bound time to wait for the thread to exit.
  365. * @retval 0 success, target thread has exited or wasn't running
  366. * @retval -EBUSY returned without waiting
  367. * @retval -EAGAIN waiting period timed out
  368. * @retval -EDEADLK target thread is joining on the caller, or target thread
  369. * is the caller
  370. */
  371. __syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
  372. /**
  373. * @brief Put the current thread to sleep.
  374. *
  375. * This routine puts the current thread to sleep for @a duration,
  376. * specified as a k_timeout_t object.
  377. *
  378. * @note if @a timeout is set to K_FOREVER then the thread is suspended.
  379. *
  380. * @param timeout Desired duration of sleep.
  381. *
  382. * @return Zero if the requested time has elapsed or the number of milliseconds
  383. * left to sleep, if thread was woken up by \ref k_wakeup call.
  384. */
  385. __syscall int32_t k_sleep(k_timeout_t timeout);
  386. /**
  387. * @brief Put the current thread to sleep.
  388. *
  389. * This routine puts the current thread to sleep for @a duration milliseconds.
  390. *
  391. * @param ms Number of milliseconds to sleep.
  392. *
  393. * @return Zero if the requested time has elapsed or the number of milliseconds
  394. * left to sleep, if thread was woken up by \ref k_wakeup call.
  395. */
  396. static inline int32_t k_msleep(int32_t ms)
  397. {
  398. return k_sleep(Z_TIMEOUT_MS(ms));
  399. }
  400. /**
  401. * @brief Put the current thread to sleep with microsecond resolution.
  402. *
  403. * This function is unlikely to work as expected without kernel tuning.
  404. * In particular, because the lower bound on the duration of a sleep is
  405. * the duration of a tick, @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
  406. * adjusted to achieve the resolution desired. The implications of doing
  407. * this must be understood before attempting to use k_usleep(). Use with
  408. * caution.
  409. *
  410. * @param us Number of microseconds to sleep.
  411. *
  412. * @return Zero if the requested time has elapsed or the number of microseconds
  413. * left to sleep, if thread was woken up by \ref k_wakeup call.
  414. */
  415. __syscall int32_t k_usleep(int32_t us);
  416. /**
  417. * @brief Cause the current thread to busy wait.
  418. *
  419. * This routine causes the current thread to execute a "do nothing" loop for
  420. * @a usec_to_wait microseconds.
  421. *
  422. * @note The clock used for the microsecond-resolution delay here may
  423. * be skewed relative to the clock used for system timeouts like
  424. * k_sleep(). For example k_busy_wait(1000) may take slightly more or
  425. * less time than k_sleep(K_MSEC(1)), with the offset dependent on
  426. * clock tolerances.
  427. *
  428. * @return N/A
  429. */
  430. __syscall void k_busy_wait(uint32_t usec_to_wait);
  431. /**
  432. * @brief Yield the current thread.
  433. *
  434. * This routine causes the current thread to yield execution to another
  435. * thread of the same or higher priority. If there are no other ready threads
  436. * of the same or higher priority, the routine returns immediately.
  437. *
  438. * @return N/A
  439. */
  440. __syscall void k_yield(void);
  441. /**
  442. * @brief Wake up a sleeping thread.
  443. *
  444. * This routine prematurely wakes up @a thread from sleeping.
  445. *
  446. * If @a thread is not currently sleeping, the routine has no effect.
  447. *
  448. * @param thread ID of thread to wake.
  449. *
  450. * @return N/A
  451. */
  452. __syscall void k_wakeup(k_tid_t thread);
  453. /**
  454. * @brief Get thread ID of the current thread.
  455. *
  456. * This unconditionally queries the kernel via a system call.
  457. *
  458. * @return ID of current thread.
  459. */
  460. __attribute_const__
  461. __syscall k_tid_t z_current_get(void);
  462. #ifdef CONFIG_THREAD_LOCAL_STORAGE
  463. /* Thread-local cache of current thread ID, set in z_thread_entry() */
  464. extern __thread k_tid_t z_tls_current;
  465. #endif
  466. /**
  467. * @brief Get thread ID of the current thread.
  468. *
  469. * @return ID of current thread.
  470. *
  471. */
  472. __attribute_const__
  473. static inline k_tid_t k_current_get(void)
  474. {
  475. #ifdef CONFIG_THREAD_LOCAL_STORAGE
  476. return z_tls_current;
  477. #else
  478. return z_current_get();
  479. #endif
  480. }
  481. /**
  482. * @brief Abort a thread.
  483. *
  484. * This routine permanently stops execution of @a thread. The thread is taken
  485. * off all kernel queues it is part of (i.e. the ready queue, the timeout
  486. * queue, or a kernel object wait queue). However, any kernel resources the
  487. * thread might currently own (such as mutexes or memory blocks) are not
  488. * released. It is the responsibility of the caller of this routine to ensure
  489. * all necessary cleanup is performed.
  490. *
  491. * After k_thread_abort() returns, the thread is guaranteed not to be
  492. * running or to become runnable anywhere on the system. Normally
  493. * this is done via blocking the caller (in the same manner as
  494. * k_thread_join()), but in interrupt context on SMP systems the
  495. * implementation is required to spin for threads that are running on
  496. * other CPUs. Note that as specified, this means that on SMP
  497. * platforms it is possible for application code to create a deadlock
  498. * condition by simultaneously aborting a cycle of threads using at
  499. * least one termination from interrupt context. Zephyr cannot detect
  500. * all such conditions.
  501. *
  502. * @param thread ID of thread to abort.
  503. *
  504. * @return N/A
  505. */
  506. __syscall void k_thread_abort(k_tid_t thread);
  507. /**
  508. * @brief Start an inactive thread
  509. *
  510. * If a thread was created with K_FOREVER in the delay parameter, it will
  511. * not be added to the scheduling queue until this function is called
  512. * on it.
  513. *
  514. * @param thread thread to start
  515. */
  516. __syscall void k_thread_start(k_tid_t thread);
  517. extern k_ticks_t z_timeout_expires(const struct _timeout *timeout);
  518. extern k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
  519. #ifdef CONFIG_SYS_CLOCK_EXISTS
  520. /**
  521. * @brief Get time when a thread wakes up, in system ticks
  522. *
  523. * This routine computes the system uptime when a waiting thread next
  524. * executes, in units of system ticks. If the thread is not waiting,
  525. * it returns current system time.
  526. */
  527. __syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *t);
  528. static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
  529. const struct k_thread *t)
  530. {
  531. return z_timeout_expires(&t->base.timeout);
  532. }
  533. /**
  534. * @brief Get time remaining before a thread wakes up, in system ticks
  535. *
  536. * This routine computes the time remaining before a waiting thread
  537. * next executes, in units of system ticks. If the thread is not
  538. * waiting, it returns zero.
  539. */
  540. __syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *t);
  541. static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
  542. const struct k_thread *t)
  543. {
  544. return z_timeout_remaining(&t->base.timeout);
  545. }
  546. #endif /* CONFIG_SYS_CLOCK_EXISTS */
  547. /**
  548. * @cond INTERNAL_HIDDEN
  549. */
  550. /* timeout has timed out and is not on _timeout_q anymore */
  551. #define _EXPIRED (-2)
  552. struct _static_thread_data {
  553. struct k_thread *init_thread;
  554. k_thread_stack_t *init_stack;
  555. unsigned int init_stack_size;
  556. k_thread_entry_t init_entry;
  557. void *init_p1;
  558. void *init_p2;
  559. void *init_p3;
  560. int init_prio;
  561. uint32_t init_options;
  562. int32_t init_delay;
  563. void (*init_abort)(void);
  564. const char *init_name;
  565. };
  566. #define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
  567. entry, p1, p2, p3, \
  568. prio, options, delay, abort, tname) \
  569. { \
  570. .init_thread = (thread), \
  571. .init_stack = (stack), \
  572. .init_stack_size = (stack_size), \
  573. .init_entry = (k_thread_entry_t)entry, \
  574. .init_p1 = (void *)p1, \
  575. .init_p2 = (void *)p2, \
  576. .init_p3 = (void *)p3, \
  577. .init_prio = (prio), \
  578. .init_options = (options), \
  579. .init_delay = (delay), \
  580. .init_abort = (abort), \
  581. .init_name = STRINGIFY(tname), \
  582. }
  583. /**
  584. * INTERNAL_HIDDEN @endcond
  585. */
  586. /**
  587. * @brief Statically define and initialize a thread.
  588. *
  589. * The thread may be scheduled for immediate execution or a delayed start.
  590. *
  591. * Thread options are architecture-specific, and can include K_ESSENTIAL,
  592. * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
  593. * them using "|" (the logical OR operator).
  594. *
  595. * The ID of the thread can be accessed using:
  596. *
  597. * @code extern const k_tid_t <name>; @endcode
  598. *
  599. * @param name Name of the thread.
  600. * @param stack_size Stack size in bytes.
  601. * @param entry Thread entry function.
  602. * @param p1 1st entry point parameter.
  603. * @param p2 2nd entry point parameter.
  604. * @param p3 3rd entry point parameter.
  605. * @param prio Thread priority.
  606. * @param options Thread options.
  607. * @param delay Scheduling delay (in milliseconds), zero for no delay.
  608. *
  609. *
  610. * @internal It has been observed that the x86 compiler by default aligns
  611. * these _static_thread_data structures to 32-byte boundaries, thereby
  612. * wasting space. To work around this, force a 4-byte alignment.
  613. *
  614. */
  615. #define K_THREAD_DEFINE(name, stack_size, \
  616. entry, p1, p2, p3, \
  617. prio, options, delay) \
  618. K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
  619. struct k_thread _k_thread_obj_##name; \
  620. STRUCT_SECTION_ITERABLE(_static_thread_data, _k_thread_data_##name) = \
  621. Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
  622. _k_thread_stack_##name, stack_size, \
  623. entry, p1, p2, p3, prio, options, delay, \
  624. NULL, name); \
  625. const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
  626. /**
  627. * @brief Get a thread's priority.
  628. *
  629. * This routine gets the priority of @a thread.
  630. *
  631. * @param thread ID of thread whose priority is needed.
  632. *
  633. * @return Priority of @a thread.
  634. */
  635. __syscall int k_thread_priority_get(k_tid_t thread);
  636. /**
  637. * @brief Set a thread's priority.
  638. *
  639. * This routine immediately changes the priority of @a thread.
  640. *
  641. * Rescheduling can occur immediately depending on the priority @a thread is
  642. * set to:
  643. *
  644. * - If its priority is raised above the priority of the caller of this
  645. * function, and the caller is preemptible, @a thread will be scheduled in.
  646. *
  647. * - If the caller operates on itself, it lowers its priority below that of
  648. * other threads in the system, and the caller is preemptible, the thread of
  649. * highest priority will be scheduled in.
  650. *
  651. * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
  652. * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
  653. * highest priority.
  654. *
  655. * @param thread ID of thread whose priority is to be set.
  656. * @param prio New priority.
  657. *
  658. * @warning Changing the priority of a thread currently involved in mutex
  659. * priority inheritance may result in undefined behavior.
  660. *
  661. * @return N/A
  662. */
  663. __syscall void k_thread_priority_set(k_tid_t thread, int prio);
  664. #ifdef CONFIG_SCHED_DEADLINE
  665. /**
  666. * @brief Set deadline expiration time for scheduler
  667. *
  668. * This sets the "deadline" expiration as a time delta from the
  669. * current time, in the same units used by k_cycle_get_32(). The
  670. * scheduler (when deadline scheduling is enabled) will choose the
  671. * next expiring thread when selecting between threads at the same
  672. * static priority. Threads at different priorities will be scheduled
  673. * according to their static priority.
  674. *
  675. * @note Deadlines are stored internally using 32 bit unsigned
  676. * integers. The number of cycles between the "first" deadline in the
  677. * scheduler queue and the "last" deadline must be less than 2^31 (i.e
  678. * a signed non-negative quantity). Failure to adhere to this rule
  679. * may result in scheduled threads running in an incorrect deadline
  680. * order.
  681. *
  682. * @note Despite the API naming, the scheduler makes no guarantees the
  683. * the thread WILL be scheduled within that deadline, nor does it take
  684. * extra metadata (like e.g. the "runtime" and "period" parameters in
  685. * Linux sched_setattr()) that allows the kernel to validate the
  686. * scheduling for achievability. Such features could be implemented
  687. * above this call, which is simply input to the priority selection
  688. * logic.
  689. *
  690. * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
  691. * configuration.
  692. *
  693. * @param thread A thread on which to set the deadline
  694. * @param deadline A time delta, in cycle units
  695. *
  696. */
  697. __syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
  698. #endif
  699. #ifdef CONFIG_SCHED_CPU_MASK
  700. /**
  701. * @brief Sets all CPU enable masks to zero
  702. *
  703. * After this returns, the thread will no longer be schedulable on any
  704. * CPUs. The thread must not be currently runnable.
  705. *
  706. * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
  707. * configuration.
  708. *
  709. * @param thread Thread to operate upon
  710. * @return Zero on success, otherwise error code
  711. */
  712. int k_thread_cpu_mask_clear(k_tid_t thread);
  713. /**
  714. * @brief Sets all CPU enable masks to one
  715. *
  716. * After this returns, the thread will be schedulable on any CPU. The
  717. * thread must not be currently runnable.
  718. *
  719. * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
  720. * configuration.
  721. *
  722. * @param thread Thread to operate upon
  723. * @return Zero on success, otherwise error code
  724. */
  725. int k_thread_cpu_mask_enable_all(k_tid_t thread);
  726. /**
  727. * @brief Enable thread to run on specified CPU
  728. *
  729. * The thread must not be currently runnable.
  730. *
  731. * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
  732. * configuration.
  733. *
  734. * @param thread Thread to operate upon
  735. * @param cpu CPU index
  736. * @return Zero on success, otherwise error code
  737. */
  738. int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
  739. /**
  740. * @brief Prevent thread to run on specified CPU
  741. *
  742. * The thread must not be currently runnable.
  743. *
  744. * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
  745. * configuration.
  746. *
  747. * @param thread Thread to operate upon
  748. * @param cpu CPU index
  749. * @return Zero on success, otherwise error code
  750. */
  751. int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
  752. #endif
  753. /**
  754. * @brief Suspend a thread.
  755. *
  756. * This routine prevents the kernel scheduler from making @a thread
  757. * the current thread. All other internal operations on @a thread are
  758. * still performed; for example, kernel objects it is waiting on are
  759. * still handed to it. Note that any existing timeouts
  760. * (e.g. k_sleep(), or a timeout argument to k_sem_take() et. al.)
  761. * will be canceled. On resume, the thread will begin running
  762. * immediately and return from the blocked call.
  763. *
  764. * If @a thread is already suspended, the routine has no effect.
  765. *
  766. * @param thread ID of thread to suspend.
  767. *
  768. * @return N/A
  769. */
  770. __syscall void k_thread_suspend(k_tid_t thread);
  771. /**
  772. * @brief Resume a suspended thread.
  773. *
  774. * This routine allows the kernel scheduler to make @a thread the current
  775. * thread, when it is next eligible for that role.
  776. *
  777. * If @a thread is not currently suspended, the routine has no effect.
  778. *
  779. * @param thread ID of thread to resume.
  780. *
  781. * @return N/A
  782. */
  783. __syscall void k_thread_resume(k_tid_t thread);
  784. /**
  785. * @brief Set time-slicing period and scope.
  786. *
  787. * This routine specifies how the scheduler will perform time slicing of
  788. * preemptible threads.
  789. *
  790. * To enable time slicing, @a slice must be non-zero. The scheduler
  791. * ensures that no thread runs for more than the specified time limit
  792. * before other threads of that priority are given a chance to execute.
  793. * Any thread whose priority is higher than @a prio is exempted, and may
  794. * execute as long as desired without being preempted due to time slicing.
  795. *
  796. * Time slicing only limits the maximum amount of time a thread may continuously
  797. * execute. Once the scheduler selects a thread for execution, there is no
  798. * minimum guaranteed time the thread will execute before threads of greater or
  799. * equal priority are scheduled.
  800. *
  801. * When the current thread is the only one of that priority eligible
  802. * for execution, this routine has no effect; the thread is immediately
  803. * rescheduled after the slice period expires.
  804. *
  805. * To disable timeslicing, set both @a slice and @a prio to zero.
  806. *
  807. * @param slice Maximum time slice length (in milliseconds).
  808. * @param prio Highest thread priority level eligible for time slicing.
  809. *
  810. * @return N/A
  811. */
  812. extern void k_sched_time_slice_set(int32_t slice, int prio);
  813. /** @} */
  814. /**
  815. * @addtogroup isr_apis
  816. * @{
  817. */
  818. /**
  819. * @brief Determine if code is running at interrupt level.
  820. *
  821. * This routine allows the caller to customize its actions, depending on
  822. * whether it is a thread or an ISR.
  823. *
  824. * @funcprops \isr_ok
  825. *
  826. * @return false if invoked by a thread.
  827. * @return true if invoked by an ISR.
  828. */
  829. extern bool k_is_in_isr(void);
  830. /**
  831. * @brief Determine if code is running in a preemptible thread.
  832. *
  833. * This routine allows the caller to customize its actions, depending on
  834. * whether it can be preempted by another thread. The routine returns a 'true'
  835. * value if all of the following conditions are met:
  836. *
  837. * - The code is running in a thread, not at ISR.
  838. * - The thread's priority is in the preemptible range.
  839. * - The thread has not locked the scheduler.
  840. *
  841. * @funcprops \isr_ok
  842. *
  843. * @return 0 if invoked by an ISR or by a cooperative thread.
  844. * @return Non-zero if invoked by a preemptible thread.
  845. */
  846. __syscall int k_is_preempt_thread(void);
  847. /**
  848. * @brief Test whether startup is in the before-main-task phase.
  849. *
  850. * This routine allows the caller to customize its actions, depending on
  851. * whether it being invoked before the kernel is fully active.
  852. *
  853. * @funcprops \isr_ok
  854. *
  855. * @return true if invoked before post-kernel initialization
  856. * @return false if invoked during/after post-kernel initialization
  857. */
  858. static inline bool k_is_pre_kernel(void)
  859. {
  860. extern bool z_sys_post_kernel; /* in init.c */
  861. return !z_sys_post_kernel;
  862. }
  863. /**
  864. * @}
  865. */
  866. /**
  867. * @addtogroup thread_apis
  868. * @{
  869. */
  870. /**
  871. * @brief Lock the scheduler.
  872. *
  873. * This routine prevents the current thread from being preempted by another
  874. * thread by instructing the scheduler to treat it as a cooperative thread.
  875. * If the thread subsequently performs an operation that makes it unready,
  876. * it will be context switched out in the normal manner. When the thread
  877. * again becomes the current thread, its non-preemptible status is maintained.
  878. *
  879. * This routine can be called recursively.
  880. *
  881. * @note k_sched_lock() and k_sched_unlock() should normally be used
  882. * when the operation being performed can be safely interrupted by ISRs.
  883. * However, if the amount of processing involved is very small, better
  884. * performance may be obtained by using irq_lock() and irq_unlock().
  885. *
  886. * @return N/A
  887. */
  888. extern void k_sched_lock(void);
  889. /**
  890. * @brief Unlock the scheduler.
  891. *
  892. * This routine reverses the effect of a previous call to k_sched_lock().
  893. * A thread must call the routine once for each time it called k_sched_lock()
  894. * before the thread becomes preemptible.
  895. *
  896. * @return N/A
  897. */
  898. extern void k_sched_unlock(void);
  899. /**
  900. * @brief Set current thread's custom data.
  901. *
  902. * This routine sets the custom data for the current thread to @ value.
  903. *
  904. * Custom data is not used by the kernel itself, and is freely available
  905. * for a thread to use as it sees fit. It can be used as a framework
  906. * upon which to build thread-local storage.
  907. *
  908. * @param value New custom data value.
  909. *
  910. * @return N/A
  911. *
  912. */
  913. __syscall void k_thread_custom_data_set(void *value);
  914. /**
  915. * @brief Get current thread's custom data.
  916. *
  917. * This routine returns the custom data for the current thread.
  918. *
  919. * @return Current custom data value.
  920. */
  921. __syscall void *k_thread_custom_data_get(void);
  922. /**
  923. * @brief Set current thread name
  924. *
  925. * Set the name of the thread to be used when @kconfig{CONFIG_THREAD_MONITOR}
  926. * is enabled for tracing and debugging.
  927. *
  928. * @param thread Thread to set name, or NULL to set the current thread
  929. * @param str Name string
  930. * @retval 0 on success
  931. * @retval -EFAULT Memory access error with supplied string
  932. * @retval -ENOSYS Thread name configuration option not enabled
  933. * @retval -EINVAL Thread name too long
  934. */
  935. __syscall int k_thread_name_set(k_tid_t thread, const char *str);
  936. /**
  937. * @brief Get thread name
  938. *
  939. * Get the name of a thread
  940. *
  941. * @param thread Thread ID
  942. * @retval Thread name, or NULL if configuration not enabled
  943. */
  944. const char *k_thread_name_get(k_tid_t thread);
  945. /**
  946. * @brief Copy the thread name into a supplied buffer
  947. *
  948. * @param thread Thread to obtain name information
  949. * @param buf Destination buffer
  950. * @param size Destination buffer size
  951. * @retval -ENOSPC Destination buffer too small
  952. * @retval -EFAULT Memory access error
  953. * @retval -ENOSYS Thread name feature not enabled
  954. * @retval 0 Success
  955. */
  956. __syscall int k_thread_name_copy(k_tid_t thread, char *buf,
  957. size_t size);
  958. /**
  959. * @brief Get thread state string
  960. *
  961. * Get the human friendly thread state string
  962. *
  963. * @param thread_id Thread ID
  964. * @retval Thread state string, empty if no state flag is set
  965. */
  966. const char *k_thread_state_str(k_tid_t thread_id);
  967. /**
  968. * @}
  969. */
  970. /**
  971. * @addtogroup clock_apis
  972. * @{
  973. */
  974. /**
  975. * @brief Generate null timeout delay.
  976. *
  977. * This macro generates a timeout delay that instructs a kernel API
  978. * not to wait if the requested operation cannot be performed immediately.
  979. *
  980. * @return Timeout delay value.
  981. */
  982. #define K_NO_WAIT Z_TIMEOUT_NO_WAIT
  983. /**
  984. * @brief Generate timeout delay from nanoseconds.
  985. *
  986. * This macro generates a timeout delay that instructs a kernel API to
  987. * wait up to @a t nanoseconds to perform the requested operation.
  988. * Note that timer precision is limited to the tick rate, not the
  989. * requested value.
  990. *
  991. * @param t Duration in nanoseconds.
  992. *
  993. * @return Timeout delay value.
  994. */
  995. #define K_NSEC(t) Z_TIMEOUT_NS(t)
  996. /**
  997. * @brief Generate timeout delay from microseconds.
  998. *
  999. * This macro generates a timeout delay that instructs a kernel API
  1000. * to wait up to @a t microseconds to perform the requested operation.
  1001. * Note that timer precision is limited to the tick rate, not the
  1002. * requested value.
  1003. *
  1004. * @param t Duration in microseconds.
  1005. *
  1006. * @return Timeout delay value.
  1007. */
  1008. #define K_USEC(t) Z_TIMEOUT_US(t)
  1009. /**
  1010. * @brief Generate timeout delay from cycles.
  1011. *
  1012. * This macro generates a timeout delay that instructs a kernel API
  1013. * to wait up to @a t cycles to perform the requested operation.
  1014. *
  1015. * @param t Duration in cycles.
  1016. *
  1017. * @return Timeout delay value.
  1018. */
  1019. #define K_CYC(t) Z_TIMEOUT_CYC(t)
  1020. /**
  1021. * @brief Generate timeout delay from system ticks.
  1022. *
  1023. * This macro generates a timeout delay that instructs a kernel API
  1024. * to wait up to @a t ticks to perform the requested operation.
  1025. *
  1026. * @param t Duration in system ticks.
  1027. *
  1028. * @return Timeout delay value.
  1029. */
  1030. #define K_TICKS(t) Z_TIMEOUT_TICKS(t)
  1031. /**
  1032. * @brief Generate timeout delay from milliseconds.
  1033. *
  1034. * This macro generates a timeout delay that instructs a kernel API
  1035. * to wait up to @a ms milliseconds to perform the requested operation.
  1036. *
  1037. * @param ms Duration in milliseconds.
  1038. *
  1039. * @return Timeout delay value.
  1040. */
  1041. #define K_MSEC(ms) Z_TIMEOUT_MS(ms)
  1042. /**
  1043. * @brief Generate timeout delay from seconds.
  1044. *
  1045. * This macro generates a timeout delay that instructs a kernel API
  1046. * to wait up to @a s seconds to perform the requested operation.
  1047. *
  1048. * @param s Duration in seconds.
  1049. *
  1050. * @return Timeout delay value.
  1051. */
  1052. #define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
  1053. /**
  1054. * @brief Generate timeout delay from minutes.
  1055. * This macro generates a timeout delay that instructs a kernel API
  1056. * to wait up to @a m minutes to perform the requested operation.
  1057. *
  1058. * @param m Duration in minutes.
  1059. *
  1060. * @return Timeout delay value.
  1061. */
  1062. #define K_MINUTES(m) K_SECONDS((m) * 60)
  1063. /**
  1064. * @brief Generate timeout delay from hours.
  1065. *
  1066. * This macro generates a timeout delay that instructs a kernel API
  1067. * to wait up to @a h hours to perform the requested operation.
  1068. *
  1069. * @param h Duration in hours.
  1070. *
  1071. * @return Timeout delay value.
  1072. */
  1073. #define K_HOURS(h) K_MINUTES((h) * 60)
  1074. /**
  1075. * @brief Generate infinite timeout delay.
  1076. *
  1077. * This macro generates a timeout delay that instructs a kernel API
  1078. * to wait as long as necessary to perform the requested operation.
  1079. *
  1080. * @return Timeout delay value.
  1081. */
  1082. #define K_FOREVER Z_FOREVER
  1083. #ifdef CONFIG_TIMEOUT_64BIT
  1084. /**
  1085. * @brief Generates an absolute/uptime timeout value from system ticks
  1086. *
  1087. * This macro generates a timeout delay that represents an expiration
  1088. * at the absolute uptime value specified, in system ticks. That is, the
  1089. * timeout will expire immediately after the system uptime reaches the
  1090. * specified tick count.
  1091. *
  1092. * @param t Tick uptime value
  1093. * @return Timeout delay value
  1094. */
  1095. #define K_TIMEOUT_ABS_TICKS(t) \
  1096. Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)MAX(t, 0)))
  1097. /**
  1098. * @brief Generates an absolute/uptime timeout value from milliseconds
  1099. *
  1100. * This macro generates a timeout delay that represents an expiration
  1101. * at the absolute uptime value specified, in milliseconds. That is,
  1102. * the timeout will expire immediately after the system uptime reaches
  1103. * the specified tick count.
  1104. *
  1105. * @param t Millisecond uptime value
  1106. * @return Timeout delay value
  1107. */
  1108. #define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
  1109. /**
  1110. * @brief Generates an absolute/uptime timeout value from microseconds
  1111. *
  1112. * This macro generates a timeout delay that represents an expiration
  1113. * at the absolute uptime value specified, in microseconds. That is,
  1114. * the timeout will expire immediately after the system uptime reaches
  1115. * the specified time. Note that timer precision is limited by the
  1116. * system tick rate and not the requested timeout value.
  1117. *
  1118. * @param t Microsecond uptime value
  1119. * @return Timeout delay value
  1120. */
  1121. #define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
  1122. /**
  1123. * @brief Generates an absolute/uptime timeout value from nanoseconds
  1124. *
  1125. * This macro generates a timeout delay that represents an expiration
  1126. * at the absolute uptime value specified, in nanoseconds. That is,
  1127. * the timeout will expire immediately after the system uptime reaches
  1128. * the specified time. Note that timer precision is limited by the
  1129. * system tick rate and not the requested timeout value.
  1130. *
  1131. * @param t Nanosecond uptime value
  1132. * @return Timeout delay value
  1133. */
  1134. #define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
  1135. /**
  1136. * @brief Generates an absolute/uptime timeout value from system cycles
  1137. *
  1138. * This macro generates a timeout delay that represents an expiration
  1139. * at the absolute uptime value specified, in cycles. That is, the
  1140. * timeout will expire immediately after the system uptime reaches the
  1141. * specified time. Note that timer precision is limited by the system
  1142. * tick rate and not the requested timeout value.
  1143. *
  1144. * @param t Cycle uptime value
  1145. * @return Timeout delay value
  1146. */
  1147. #define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
  1148. #endif
  1149. /**
  1150. * @}
  1151. */
  1152. /**
  1153. * @cond INTERNAL_HIDDEN
  1154. */
  1155. struct k_timer {
  1156. /*
  1157. * _timeout structure must be first here if we want to use
  1158. * dynamic timer allocation. timeout.node is used in the double-linked
  1159. * list of free timers
  1160. */
  1161. struct _timeout timeout;
  1162. /* wait queue for the (single) thread waiting on this timer */
  1163. _wait_q_t wait_q;
  1164. /* runs in ISR context */
  1165. void (*expiry_fn)(struct k_timer *timer);
  1166. /* runs in the context of the thread that calls k_timer_stop() */
  1167. void (*stop_fn)(struct k_timer *timer);
  1168. /* timer period */
  1169. k_timeout_t period;
  1170. /* timer status */
  1171. uint32_t status;
  1172. /* user-specific data, also used to support legacy features */
  1173. void *user_data;
  1174. };
  1175. #define Z_TIMER_INITIALIZER(obj, expiry, stop) \
  1176. { \
  1177. .timeout = { \
  1178. .node = {},\
  1179. .fn = z_timer_expiration_handler, \
  1180. .dticks = 0, \
  1181. }, \
  1182. .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
  1183. .expiry_fn = expiry, \
  1184. .stop_fn = stop, \
  1185. .status = 0, \
  1186. .user_data = 0, \
  1187. }
  1188. /**
  1189. * INTERNAL_HIDDEN @endcond
  1190. */
  1191. /**
  1192. * @defgroup timer_apis Timer APIs
  1193. * @ingroup kernel_apis
  1194. * @{
  1195. */
  1196. /**
  1197. * @typedef k_timer_expiry_t
  1198. * @brief Timer expiry function type.
  1199. *
  1200. * A timer's expiry function is executed by the system clock interrupt handler
  1201. * each time the timer expires. The expiry function is optional, and is only
  1202. * invoked if the timer has been initialized with one.
  1203. *
  1204. * @param timer Address of timer.
  1205. *
  1206. * @return N/A
  1207. */
  1208. typedef void (*k_timer_expiry_t)(struct k_timer *timer);
  1209. /**
  1210. * @typedef k_timer_stop_t
  1211. * @brief Timer stop function type.
  1212. *
  1213. * A timer's stop function is executed if the timer is stopped prematurely.
  1214. * The function runs in the context of call that stops the timer. As
  1215. * k_timer_stop() can be invoked from an ISR, the stop function must be
  1216. * callable from interrupt context (isr-ok).
  1217. *
  1218. * The stop function is optional, and is only invoked if the timer has been
  1219. * initialized with one.
  1220. *
  1221. * @param timer Address of timer.
  1222. *
  1223. * @return N/A
  1224. */
  1225. typedef void (*k_timer_stop_t)(struct k_timer *timer);
  1226. /**
  1227. * @brief Statically define and initialize a timer.
  1228. *
  1229. * The timer can be accessed outside the module where it is defined using:
  1230. *
  1231. * @code extern struct k_timer <name>; @endcode
  1232. *
  1233. * @param name Name of the timer variable.
  1234. * @param expiry_fn Function to invoke each time the timer expires.
  1235. * @param stop_fn Function to invoke if the timer is stopped while running.
  1236. */
  1237. #define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
  1238. STRUCT_SECTION_ITERABLE(k_timer, name) = \
  1239. Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
  1240. /**
  1241. * @brief Initialize a timer.
  1242. *
  1243. * This routine initializes a timer, prior to its first use.
  1244. *
  1245. * @param timer Address of timer.
  1246. * @param expiry_fn Function to invoke each time the timer expires.
  1247. * @param stop_fn Function to invoke if the timer is stopped while running.
  1248. *
  1249. * @return N/A
  1250. */
  1251. extern void k_timer_init(struct k_timer *timer,
  1252. k_timer_expiry_t expiry_fn,
  1253. k_timer_stop_t stop_fn);
  1254. /**
  1255. * @brief Start a timer.
  1256. *
  1257. * This routine starts a timer, and resets its status to zero. The timer
  1258. * begins counting down using the specified duration and period values.
  1259. *
  1260. * Attempting to start a timer that is already running is permitted.
  1261. * The timer's status is reset to zero and the timer begins counting down
  1262. * using the new duration and period values.
  1263. *
  1264. * @param timer Address of timer.
  1265. * @param duration Initial timer duration.
  1266. * @param period Timer period.
  1267. *
  1268. * @return N/A
  1269. */
  1270. __syscall void k_timer_start(struct k_timer *timer,
  1271. k_timeout_t duration, k_timeout_t period);
  1272. /**
  1273. * @brief Stop a timer.
  1274. *
  1275. * This routine stops a running timer prematurely. The timer's stop function,
  1276. * if one exists, is invoked by the caller.
  1277. *
  1278. * Attempting to stop a timer that is not running is permitted, but has no
  1279. * effect on the timer.
  1280. *
  1281. * @note The stop handler has to be callable from ISRs if @a k_timer_stop is to
  1282. * be called from ISRs.
  1283. *
  1284. * @funcprops \isr_ok
  1285. *
  1286. * @param timer Address of timer.
  1287. *
  1288. * @return N/A
  1289. */
  1290. __syscall void k_timer_stop(struct k_timer *timer);
  1291. /**
  1292. * @brief Read timer status.
  1293. *
  1294. * This routine reads the timer's status, which indicates the number of times
  1295. * it has expired since its status was last read.
  1296. *
  1297. * Calling this routine resets the timer's status to zero.
  1298. *
  1299. * @param timer Address of timer.
  1300. *
  1301. * @return Timer status.
  1302. */
  1303. __syscall uint32_t k_timer_status_get(struct k_timer *timer);
  1304. /**
  1305. * @brief Synchronize thread to timer expiration.
  1306. *
  1307. * This routine blocks the calling thread until the timer's status is non-zero
  1308. * (indicating that it has expired at least once since it was last examined)
  1309. * or the timer is stopped. If the timer status is already non-zero,
  1310. * or the timer is already stopped, the caller continues without waiting.
  1311. *
  1312. * Calling this routine resets the timer's status to zero.
  1313. *
  1314. * This routine must not be used by interrupt handlers, since they are not
  1315. * allowed to block.
  1316. *
  1317. * @param timer Address of timer.
  1318. *
  1319. * @return Timer status.
  1320. */
  1321. __syscall uint32_t k_timer_status_sync(struct k_timer *timer);
  1322. #ifdef CONFIG_SYS_CLOCK_EXISTS
  1323. /**
  1324. * @brief Get next expiration time of a timer, in system ticks
  1325. *
  1326. * This routine returns the future system uptime reached at the next
  1327. * time of expiration of the timer, in units of system ticks. If the
  1328. * timer is not running, current system time is returned.
  1329. *
  1330. * @param timer The timer object
  1331. * @return Uptime of expiration, in ticks
  1332. */
  1333. __syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
  1334. static inline k_ticks_t z_impl_k_timer_expires_ticks(
  1335. const struct k_timer *timer)
  1336. {
  1337. return z_timeout_expires(&timer->timeout);
  1338. }
  1339. /**
  1340. * @brief Get time remaining before a timer next expires, in system ticks
  1341. *
  1342. * This routine computes the time remaining before a running timer
  1343. * next expires, in units of system ticks. If the timer is not
  1344. * running, it returns zero.
  1345. */
  1346. __syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
  1347. static inline k_ticks_t z_impl_k_timer_remaining_ticks(
  1348. const struct k_timer *timer)
  1349. {
  1350. return z_timeout_remaining(&timer->timeout);
  1351. }
  1352. /**
  1353. * @brief Get time remaining before a timer next expires.
  1354. *
  1355. * This routine computes the (approximate) time remaining before a running
  1356. * timer next expires. If the timer is not running, it returns zero.
  1357. *
  1358. * @param timer Address of timer.
  1359. *
  1360. * @return Remaining time (in milliseconds).
  1361. */
  1362. static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
  1363. {
  1364. return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
  1365. }
  1366. #endif /* CONFIG_SYS_CLOCK_EXISTS */
  1367. /**
  1368. * @brief Associate user-specific data with a timer.
  1369. *
  1370. * This routine records the @a user_data with the @a timer, to be retrieved
  1371. * later.
  1372. *
  1373. * It can be used e.g. in a timer handler shared across multiple subsystems to
  1374. * retrieve data specific to the subsystem this timer is associated with.
  1375. *
  1376. * @param timer Address of timer.
  1377. * @param user_data User data to associate with the timer.
  1378. *
  1379. * @return N/A
  1380. */
  1381. __syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
  1382. /**
  1383. * @internal
  1384. */
  1385. static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
  1386. void *user_data)
  1387. {
  1388. timer->user_data = user_data;
  1389. }
  1390. /**
  1391. * @brief Retrieve the user-specific data from a timer.
  1392. *
  1393. * @param timer Address of timer.
  1394. *
  1395. * @return The user data.
  1396. */
  1397. __syscall void *k_timer_user_data_get(const struct k_timer *timer);
  1398. static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
  1399. {
  1400. return timer->user_data;
  1401. }
  1402. /** @} */
  1403. /**
  1404. * @addtogroup clock_apis
  1405. * @ingroup kernel_apis
  1406. * @{
  1407. */
  1408. /**
  1409. * @brief Get system uptime, in system ticks.
  1410. *
  1411. * This routine returns the elapsed time since the system booted, in
  1412. * ticks (c.f. @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC}), which is the
  1413. * fundamental unit of resolution of kernel timekeeping.
  1414. *
  1415. * @return Current uptime in ticks.
  1416. */
  1417. __syscall int64_t k_uptime_ticks(void);
  1418. /**
  1419. * @brief Get system uptime.
  1420. *
  1421. * This routine returns the elapsed time since the system booted,
  1422. * in milliseconds.
  1423. *
  1424. * @note
  1425. * While this function returns time in milliseconds, it does
  1426. * not mean it has millisecond resolution. The actual resolution depends on
  1427. * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option.
  1428. *
  1429. * @return Current uptime in milliseconds.
  1430. */
  1431. static inline int64_t k_uptime_get(void)
  1432. {
  1433. return k_ticks_to_ms_floor64(k_uptime_ticks());
  1434. }
  1435. /**
  1436. * @brief Get system uptime (32-bit version).
  1437. *
  1438. * This routine returns the lower 32 bits of the system uptime in
  1439. * milliseconds.
  1440. *
  1441. * Because correct conversion requires full precision of the system
  1442. * clock there is no benefit to using this over k_uptime_get() unless
  1443. * you know the application will never run long enough for the system
  1444. * clock to approach 2^32 ticks. Calls to this function may involve
  1445. * interrupt blocking and 64-bit math.
  1446. *
  1447. * @note
  1448. * While this function returns time in milliseconds, it does
  1449. * not mean it has millisecond resolution. The actual resolution depends on
  1450. * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option
  1451. *
  1452. * @return The low 32 bits of the current uptime, in milliseconds.
  1453. */
  1454. static inline uint32_t k_uptime_get_32(void)
  1455. {
  1456. return (uint32_t)k_uptime_get();
  1457. }
  1458. /**
  1459. * @brief Get elapsed time.
  1460. *
  1461. * This routine computes the elapsed time between the current system uptime
  1462. * and an earlier reference time, in milliseconds.
  1463. *
  1464. * @param reftime Pointer to a reference time, which is updated to the current
  1465. * uptime upon return.
  1466. *
  1467. * @return Elapsed time.
  1468. */
  1469. static inline int64_t k_uptime_delta(int64_t *reftime)
  1470. {
  1471. int64_t uptime, delta;
  1472. uptime = k_uptime_get();
  1473. delta = uptime - *reftime;
  1474. *reftime = uptime;
  1475. return delta;
  1476. }
  1477. /**
  1478. * @brief Read the hardware clock.
  1479. *
  1480. * This routine returns the current time, as measured by the system's hardware
  1481. * clock.
  1482. *
  1483. * @return Current hardware clock up-counter (in cycles).
  1484. */
  1485. static inline uint32_t k_cycle_get_32(void)
  1486. {
  1487. return arch_k_cycle_get_32();
  1488. }
  1489. /**
  1490. * @}
  1491. */
  1492. /**
  1493. * @cond INTERNAL_HIDDEN
  1494. */
  1495. struct k_queue {
  1496. sys_sflist_t data_q;
  1497. struct k_spinlock lock;
  1498. _wait_q_t wait_q;
  1499. _POLL_EVENT;
  1500. };
  1501. #define Z_QUEUE_INITIALIZER(obj) \
  1502. { \
  1503. .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
  1504. .lock = { }, \
  1505. .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
  1506. _POLL_EVENT_OBJ_INIT(obj) \
  1507. }
  1508. extern void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free);
  1509. /**
  1510. * INTERNAL_HIDDEN @endcond
  1511. */
  1512. /**
  1513. * @defgroup queue_apis Queue APIs
  1514. * @ingroup kernel_apis
  1515. * @{
  1516. */
  1517. /**
  1518. * @brief Initialize a queue.
  1519. *
  1520. * This routine initializes a queue object, prior to its first use.
  1521. *
  1522. * @param queue Address of the queue.
  1523. *
  1524. * @return N/A
  1525. */
  1526. __syscall void k_queue_init(struct k_queue *queue);
  1527. /**
  1528. * @brief Cancel waiting on a queue.
  1529. *
  1530. * This routine causes first thread pending on @a queue, if any, to
  1531. * return from k_queue_get() call with NULL value (as if timeout expired).
  1532. * If the queue is being waited on by k_poll(), it will return with
  1533. * -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
  1534. * k_queue_get() will return NULL).
  1535. *
  1536. * @funcprops \isr_ok
  1537. *
  1538. * @param queue Address of the queue.
  1539. *
  1540. * @return N/A
  1541. */
  1542. __syscall void k_queue_cancel_wait(struct k_queue *queue);
  1543. /**
  1544. * @brief Append an element to the end of a queue.
  1545. *
  1546. * This routine appends a data item to @a queue. A queue data item must be
  1547. * aligned on a word boundary, and the first word of the item is reserved
  1548. * for the kernel's use.
  1549. *
  1550. * @funcprops \isr_ok
  1551. *
  1552. * @param queue Address of the queue.
  1553. * @param data Address of the data item.
  1554. *
  1555. * @return N/A
  1556. */
  1557. extern void k_queue_append(struct k_queue *queue, void *data);
  1558. /**
  1559. * @brief Append an element to a queue.
  1560. *
  1561. * This routine appends a data item to @a queue. There is an implicit memory
  1562. * allocation to create an additional temporary bookkeeping data structure from
  1563. * the calling thread's resource pool, which is automatically freed when the
  1564. * item is removed. The data itself is not copied.
  1565. *
  1566. * @funcprops \isr_ok
  1567. *
  1568. * @param queue Address of the queue.
  1569. * @param data Address of the data item.
  1570. *
  1571. * @retval 0 on success
  1572. * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
  1573. */
  1574. __syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
  1575. /**
  1576. * @brief Prepend an element to a queue.
  1577. *
  1578. * This routine prepends a data item to @a queue. A queue data item must be
  1579. * aligned on a word boundary, and the first word of the item is reserved
  1580. * for the kernel's use.
  1581. *
  1582. * @funcprops \isr_ok
  1583. *
  1584. * @param queue Address of the queue.
  1585. * @param data Address of the data item.
  1586. *
  1587. * @return N/A
  1588. */
  1589. extern void k_queue_prepend(struct k_queue *queue, void *data);
  1590. /**
  1591. * @brief Prepend an element to a queue.
  1592. *
  1593. * This routine prepends a data item to @a queue. There is an implicit memory
  1594. * allocation to create an additional temporary bookkeeping data structure from
  1595. * the calling thread's resource pool, which is automatically freed when the
  1596. * item is removed. The data itself is not copied.
  1597. *
  1598. * @funcprops \isr_ok
  1599. *
  1600. * @param queue Address of the queue.
  1601. * @param data Address of the data item.
  1602. *
  1603. * @retval 0 on success
  1604. * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
  1605. */
  1606. __syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
  1607. /**
  1608. * @brief Inserts an element to a queue.
  1609. *
  1610. * This routine inserts a data item to @a queue after previous item. A queue
  1611. * data item must be aligned on a word boundary, and the first word of
  1612. * the item is reserved for the kernel's use.
  1613. *
  1614. * @funcprops \isr_ok
  1615. *
  1616. * @param queue Address of the queue.
  1617. * @param prev Address of the previous data item.
  1618. * @param data Address of the data item.
  1619. *
  1620. * @return N/A
  1621. */
  1622. extern void k_queue_insert(struct k_queue *queue, void *prev, void *data);
  1623. /**
  1624. * @brief Atomically append a list of elements to a queue.
  1625. *
  1626. * This routine adds a list of data items to @a queue in one operation.
  1627. * The data items must be in a singly-linked list, with the first word
  1628. * in each data item pointing to the next data item; the list must be
  1629. * NULL-terminated.
  1630. *
  1631. * @funcprops \isr_ok
  1632. *
  1633. * @param queue Address of the queue.
  1634. * @param head Pointer to first node in singly-linked list.
  1635. * @param tail Pointer to last node in singly-linked list.
  1636. *
  1637. * @retval 0 on success
  1638. * @retval -EINVAL on invalid supplied data
  1639. *
  1640. */
  1641. extern int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
  1642. /**
  1643. * @brief Atomically add a list of elements to a queue.
  1644. *
  1645. * This routine adds a list of data items to @a queue in one operation.
  1646. * The data items must be in a singly-linked list implemented using a
  1647. * sys_slist_t object. Upon completion, the original list is empty.
  1648. *
  1649. * @funcprops \isr_ok
  1650. *
  1651. * @param queue Address of the queue.
  1652. * @param list Pointer to sys_slist_t object.
  1653. *
  1654. * @retval 0 on success
  1655. * @retval -EINVAL on invalid data
  1656. */
  1657. extern int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
  1658. /**
  1659. * @brief Get an element from a queue.
  1660. *
  1661. * This routine removes first data item from @a queue. The first word of the
  1662. * data item is reserved for the kernel's use.
  1663. *
  1664. * @note @a timeout must be set to K_NO_WAIT if called from ISR.
  1665. *
  1666. * @funcprops \isr_ok
  1667. *
  1668. * @param queue Address of the queue.
  1669. * @param timeout Non-negative waiting period to obtain a data item
  1670. * or one of the special values K_NO_WAIT and
  1671. * K_FOREVER.
  1672. *
  1673. * @return Address of the data item if successful; NULL if returned
  1674. * without waiting, or waiting period timed out.
  1675. */
  1676. __syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
  1677. /**
  1678. * @brief Remove an element from a queue.
  1679. *
  1680. * This routine removes data item from @a queue. The first word of the
  1681. * data item is reserved for the kernel's use. Removing elements from k_queue
  1682. * rely on sys_slist_find_and_remove which is not a constant time operation.
  1683. *
  1684. * @note @a timeout must be set to K_NO_WAIT if called from ISR.
  1685. *
  1686. * @funcprops \isr_ok
  1687. *
  1688. * @param queue Address of the queue.
  1689. * @param data Address of the data item.
  1690. *
  1691. * @return true if data item was removed
  1692. */
  1693. bool k_queue_remove(struct k_queue *queue, void *data);
  1694. /**
  1695. * @brief Append an element to a queue only if it's not present already.
  1696. *
  1697. * This routine appends data item to @a queue. The first word of the data
  1698. * item is reserved for the kernel's use. Appending elements to k_queue
  1699. * relies on sys_slist_is_node_in_list which is not a constant time operation.
  1700. *
  1701. * @funcprops \isr_ok
  1702. *
  1703. * @param queue Address of the queue.
  1704. * @param data Address of the data item.
  1705. *
  1706. * @return true if data item was added, false if not
  1707. */
  1708. bool k_queue_unique_append(struct k_queue *queue, void *data);
  1709. /**
  1710. * @brief Query a queue to see if it has data available.
  1711. *
  1712. * Note that the data might be already gone by the time this function returns
  1713. * if other threads are also trying to read from the queue.
  1714. *
  1715. * @funcprops \isr_ok
  1716. *
  1717. * @param queue Address of the queue.
  1718. *
  1719. * @return Non-zero if the queue is empty.
  1720. * @return 0 if data is available.
  1721. */
  1722. __syscall int k_queue_is_empty(struct k_queue *queue);
  1723. static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
  1724. {
  1725. return (int)sys_sflist_is_empty(&queue->data_q);
  1726. }
  1727. /**
  1728. * @brief Peek element at the head of queue.
  1729. *
  1730. * Return element from the head of queue without removing it.
  1731. *
  1732. * @param queue Address of the queue.
  1733. *
  1734. * @return Head element, or NULL if queue is empty.
  1735. */
  1736. __syscall void *k_queue_peek_head(struct k_queue *queue);
  1737. /**
  1738. * @brief Peek element at the tail of queue.
  1739. *
  1740. * Return element from the tail of queue without removing it.
  1741. *
  1742. * @param queue Address of the queue.
  1743. *
  1744. * @return Tail element, or NULL if queue is empty.
  1745. */
  1746. __syscall void *k_queue_peek_tail(struct k_queue *queue);
  1747. /**
  1748. * @brief Statically define and initialize a queue.
  1749. *
  1750. * The queue can be accessed outside the module where it is defined using:
  1751. *
  1752. * @code extern struct k_queue <name>; @endcode
  1753. *
  1754. * @param name Name of the queue.
  1755. */
  1756. #define K_QUEUE_DEFINE(name) \
  1757. STRUCT_SECTION_ITERABLE(k_queue, name) = \
  1758. Z_QUEUE_INITIALIZER(name)
  1759. /** @} */
  1760. #ifdef CONFIG_USERSPACE
  1761. /**
  1762. * @brief futex structure
  1763. *
  1764. * A k_futex is a lightweight mutual exclusion primitive designed
  1765. * to minimize kernel involvement. Uncontended operation relies
  1766. * only on atomic access to shared memory. k_futex are tracked as
  1767. * kernel objects and can live in user memory so that any access
  1768. * bypasses the kernel object permission management mechanism.
  1769. */
  1770. struct k_futex {
  1771. atomic_t val;
  1772. };
  1773. /**
  1774. * @brief futex kernel data structure
  1775. *
  1776. * z_futex_data are the helper data structure for k_futex to complete
  1777. * futex contended operation on kernel side, structure z_futex_data
  1778. * of every futex object is invisible in user mode.
  1779. */
  1780. struct z_futex_data {
  1781. _wait_q_t wait_q;
  1782. struct k_spinlock lock;
  1783. };
  1784. #define Z_FUTEX_DATA_INITIALIZER(obj) \
  1785. { \
  1786. .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
  1787. }
  1788. /**
  1789. * @defgroup futex_apis FUTEX APIs
  1790. * @ingroup kernel_apis
  1791. * @{
  1792. */
  1793. /**
  1794. * @brief Pend the current thread on a futex
  1795. *
  1796. * Tests that the supplied futex contains the expected value, and if so,
  1797. * goes to sleep until some other thread calls k_futex_wake() on it.
  1798. *
  1799. * @param futex Address of the futex.
  1800. * @param expected Expected value of the futex, if it is different the caller
  1801. * will not wait on it.
  1802. * @param timeout Non-negative waiting period on the futex, or
  1803. * one of the special values K_NO_WAIT or K_FOREVER.
  1804. * @retval -EACCES Caller does not have read access to futex address.
  1805. * @retval -EAGAIN If the futex value did not match the expected parameter.
  1806. * @retval -EINVAL Futex parameter address not recognized by the kernel.
  1807. * @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
  1808. * @retval 0 if the caller went to sleep and was woken up. The caller
  1809. * should check the futex's value on wakeup to determine if it needs
  1810. * to block again.
  1811. */
  1812. __syscall int k_futex_wait(struct k_futex *futex, int expected,
  1813. k_timeout_t timeout);
  1814. /**
  1815. * @brief Wake one/all threads pending on a futex
  1816. *
  1817. * Wake up the highest priority thread pending on the supplied futex, or
  1818. * wakeup all the threads pending on the supplied futex, and the behavior
  1819. * depends on wake_all.
  1820. *
  1821. * @param futex Futex to wake up pending threads.
  1822. * @param wake_all If true, wake up all pending threads; If false,
  1823. * wakeup the highest priority thread.
  1824. * @retval -EACCES Caller does not have access to the futex address.
  1825. * @retval -EINVAL Futex parameter address not recognized by the kernel.
  1826. * @retval Number of threads that were woken up.
  1827. */
  1828. __syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
  1829. /** @} */
  1830. #endif
  1831. struct k_fifo {
  1832. struct k_queue _queue;
  1833. };
  1834. /**
  1835. * @cond INTERNAL_HIDDEN
  1836. */
  1837. #define Z_FIFO_INITIALIZER(obj) \
  1838. { \
  1839. ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
  1840. }
  1841. /**
  1842. * INTERNAL_HIDDEN @endcond
  1843. */
  1844. /**
  1845. * @defgroup fifo_apis FIFO APIs
  1846. * @ingroup kernel_apis
  1847. * @{
  1848. */
  1849. /**
  1850. * @brief Initialize a FIFO queue.
  1851. *
  1852. * This routine initializes a FIFO queue, prior to its first use.
  1853. *
  1854. * @param fifo Address of the FIFO queue.
  1855. *
  1856. * @return N/A
  1857. */
  1858. #define k_fifo_init(fifo) \
  1859. ({ \
  1860. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
  1861. k_queue_init(&(fifo)->_queue); \
  1862. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
  1863. })
  1864. /**
  1865. * @brief Cancel waiting on a FIFO queue.
  1866. *
  1867. * This routine causes first thread pending on @a fifo, if any, to
  1868. * return from k_fifo_get() call with NULL value (as if timeout
  1869. * expired).
  1870. *
  1871. * @funcprops \isr_ok
  1872. *
  1873. * @param fifo Address of the FIFO queue.
  1874. *
  1875. * @return N/A
  1876. */
  1877. #define k_fifo_cancel_wait(fifo) \
  1878. ({ \
  1879. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
  1880. k_queue_cancel_wait(&(fifo)->_queue); \
  1881. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
  1882. })
  1883. /**
  1884. * @brief Add an element to a FIFO queue.
  1885. *
  1886. * This routine adds a data item to @a fifo. A FIFO data item must be
  1887. * aligned on a word boundary, and the first word of the item is reserved
  1888. * for the kernel's use.
  1889. *
  1890. * @funcprops \isr_ok
  1891. *
  1892. * @param fifo Address of the FIFO.
  1893. * @param data Address of the data item.
  1894. *
  1895. * @return N/A
  1896. */
  1897. #define k_fifo_put(fifo, data) \
  1898. ({ \
  1899. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, data); \
  1900. k_queue_append(&(fifo)->_queue, data); \
  1901. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, data); \
  1902. })
  1903. /**
  1904. * @brief Add an element to a FIFO queue.
  1905. *
  1906. * This routine adds a data item to @a fifo. There is an implicit memory
  1907. * allocation to create an additional temporary bookkeeping data structure from
  1908. * the calling thread's resource pool, which is automatically freed when the
  1909. * item is removed. The data itself is not copied.
  1910. *
  1911. * @funcprops \isr_ok
  1912. *
  1913. * @param fifo Address of the FIFO.
  1914. * @param data Address of the data item.
  1915. *
  1916. * @retval 0 on success
  1917. * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
  1918. */
  1919. #define k_fifo_alloc_put(fifo, data) \
  1920. ({ \
  1921. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, data); \
  1922. int ret = k_queue_alloc_append(&(fifo)->_queue, data); \
  1923. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, data, ret); \
  1924. ret; \
  1925. })
  1926. /**
  1927. * @brief Atomically add a list of elements to a FIFO.
  1928. *
  1929. * This routine adds a list of data items to @a fifo in one operation.
  1930. * The data items must be in a singly-linked list, with the first word of
  1931. * each data item pointing to the next data item; the list must be
  1932. * NULL-terminated.
  1933. *
  1934. * @funcprops \isr_ok
  1935. *
  1936. * @param fifo Address of the FIFO queue.
  1937. * @param head Pointer to first node in singly-linked list.
  1938. * @param tail Pointer to last node in singly-linked list.
  1939. *
  1940. * @return N/A
  1941. */
  1942. #define k_fifo_put_list(fifo, head, tail) \
  1943. ({ \
  1944. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
  1945. k_queue_append_list(&(fifo)->_queue, head, tail); \
  1946. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
  1947. })
  1948. /**
  1949. * @brief Atomically add a list of elements to a FIFO queue.
  1950. *
  1951. * This routine adds a list of data items to @a fifo in one operation.
  1952. * The data items must be in a singly-linked list implemented using a
  1953. * sys_slist_t object. Upon completion, the sys_slist_t object is invalid
  1954. * and must be re-initialized via sys_slist_init().
  1955. *
  1956. * @funcprops \isr_ok
  1957. *
  1958. * @param fifo Address of the FIFO queue.
  1959. * @param list Pointer to sys_slist_t object.
  1960. *
  1961. * @return N/A
  1962. */
  1963. #define k_fifo_put_slist(fifo, list) \
  1964. ({ \
  1965. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
  1966. k_queue_merge_slist(&(fifo)->_queue, list); \
  1967. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
  1968. })
  1969. /**
  1970. * @brief Get an element from a FIFO queue.
  1971. *
  1972. * This routine removes a data item from @a fifo in a "first in, first out"
  1973. * manner. The first word of the data item is reserved for the kernel's use.
  1974. *
  1975. * @note @a timeout must be set to K_NO_WAIT if called from ISR.
  1976. *
  1977. * @funcprops \isr_ok
  1978. *
  1979. * @param fifo Address of the FIFO queue.
  1980. * @param timeout Waiting period to obtain a data item,
  1981. * or one of the special values K_NO_WAIT and K_FOREVER.
  1982. *
  1983. * @return Address of the data item if successful; NULL if returned
  1984. * without waiting, or waiting period timed out.
  1985. */
  1986. #define k_fifo_get(fifo, timeout) \
  1987. ({ \
  1988. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
  1989. void *ret = k_queue_get(&(fifo)->_queue, timeout); \
  1990. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, ret); \
  1991. ret; \
  1992. })
  1993. /**
  1994. * @brief Query a FIFO queue to see if it has data available.
  1995. *
  1996. * Note that the data might be already gone by the time this function returns
  1997. * if other threads is also trying to read from the FIFO.
  1998. *
  1999. * @funcprops \isr_ok
  2000. *
  2001. * @param fifo Address of the FIFO queue.
  2002. *
  2003. * @return Non-zero if the FIFO queue is empty.
  2004. * @return 0 if data is available.
  2005. */
  2006. #define k_fifo_is_empty(fifo) \
  2007. k_queue_is_empty(&(fifo)->_queue)
  2008. /**
  2009. * @brief Peek element at the head of a FIFO queue.
  2010. *
  2011. * Return element from the head of FIFO queue without removing it. A usecase
  2012. * for this is if elements of the FIFO object are themselves containers. Then
  2013. * on each iteration of processing, a head container will be peeked,
  2014. * and some data processed out of it, and only if the container is empty,
  2015. * it will be completely remove from the FIFO queue.
  2016. *
  2017. * @param fifo Address of the FIFO queue.
  2018. *
  2019. * @return Head element, or NULL if the FIFO queue is empty.
  2020. */
  2021. #define k_fifo_peek_head(fifo) \
  2022. ({ \
  2023. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
  2024. void *ret = k_queue_peek_head(&(fifo)->_queue); \
  2025. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, ret); \
  2026. ret; \
  2027. })
  2028. /**
  2029. * @brief Peek element at the tail of FIFO queue.
  2030. *
  2031. * Return element from the tail of FIFO queue (without removing it). A usecase
  2032. * for this is if elements of the FIFO queue are themselves containers. Then
  2033. * it may be useful to add more data to the last container in a FIFO queue.
  2034. *
  2035. * @param fifo Address of the FIFO queue.
  2036. *
  2037. * @return Tail element, or NULL if a FIFO queue is empty.
  2038. */
  2039. #define k_fifo_peek_tail(fifo) \
  2040. ({ \
  2041. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
  2042. void *ret = k_queue_peek_tail(&(fifo)->_queue); \
  2043. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, ret); \
  2044. ret; \
  2045. })
  2046. /**
  2047. * @brief Statically define and initialize a FIFO queue.
  2048. *
  2049. * The FIFO queue can be accessed outside the module where it is defined using:
  2050. *
  2051. * @code extern struct k_fifo <name>; @endcode
  2052. *
  2053. * @param name Name of the FIFO queue.
  2054. */
  2055. #define K_FIFO_DEFINE(name) \
  2056. STRUCT_SECTION_ITERABLE_ALTERNATE(k_queue, k_fifo, name) = \
  2057. Z_FIFO_INITIALIZER(name)
  2058. /** @} */
  2059. struct k_lifo {
  2060. struct k_queue _queue;
  2061. };
  2062. /**
  2063. * @cond INTERNAL_HIDDEN
  2064. */
  2065. #define Z_LIFO_INITIALIZER(obj) \
  2066. { \
  2067. ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
  2068. }
  2069. /**
  2070. * INTERNAL_HIDDEN @endcond
  2071. */
  2072. /**
  2073. * @defgroup lifo_apis LIFO APIs
  2074. * @ingroup kernel_apis
  2075. * @{
  2076. */
  2077. /**
  2078. * @brief Initialize a LIFO queue.
  2079. *
  2080. * This routine initializes a LIFO queue object, prior to its first use.
  2081. *
  2082. * @param lifo Address of the LIFO queue.
  2083. *
  2084. * @return N/A
  2085. */
  2086. #define k_lifo_init(lifo) \
  2087. ({ \
  2088. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
  2089. k_queue_init(&(lifo)->_queue); \
  2090. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
  2091. })
  2092. /**
  2093. * @brief Add an element to a LIFO queue.
  2094. *
  2095. * This routine adds a data item to @a lifo. A LIFO queue data item must be
  2096. * aligned on a word boundary, and the first word of the item is
  2097. * reserved for the kernel's use.
  2098. *
  2099. * @funcprops \isr_ok
  2100. *
  2101. * @param lifo Address of the LIFO queue.
  2102. * @param data Address of the data item.
  2103. *
  2104. * @return N/A
  2105. */
  2106. #define k_lifo_put(lifo, data) \
  2107. ({ \
  2108. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, data); \
  2109. k_queue_prepend(&(lifo)->_queue, data); \
  2110. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, data); \
  2111. })
  2112. /**
  2113. * @brief Add an element to a LIFO queue.
  2114. *
  2115. * This routine adds a data item to @a lifo. There is an implicit memory
  2116. * allocation to create an additional temporary bookkeeping data structure from
  2117. * the calling thread's resource pool, which is automatically freed when the
  2118. * item is removed. The data itself is not copied.
  2119. *
  2120. * @funcprops \isr_ok
  2121. *
  2122. * @param lifo Address of the LIFO.
  2123. * @param data Address of the data item.
  2124. *
  2125. * @retval 0 on success
  2126. * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
  2127. */
  2128. #define k_lifo_alloc_put(lifo, data) \
  2129. ({ \
  2130. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, data); \
  2131. int ret = k_queue_alloc_prepend(&(lifo)->_queue, data); \
  2132. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, data, ret); \
  2133. ret; \
  2134. })
  2135. /**
  2136. * @brief Get an element from a LIFO queue.
  2137. *
  2138. * This routine removes a data item from @a LIFO in a "last in, first out"
  2139. * manner. The first word of the data item is reserved for the kernel's use.
  2140. *
  2141. * @note @a timeout must be set to K_NO_WAIT if called from ISR.
  2142. *
  2143. * @funcprops \isr_ok
  2144. *
  2145. * @param lifo Address of the LIFO queue.
  2146. * @param timeout Waiting period to obtain a data item,
  2147. * or one of the special values K_NO_WAIT and K_FOREVER.
  2148. *
  2149. * @return Address of the data item if successful; NULL if returned
  2150. * without waiting, or waiting period timed out.
  2151. */
  2152. #define k_lifo_get(lifo, timeout) \
  2153. ({ \
  2154. SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
  2155. void *ret = k_queue_get(&(lifo)->_queue, timeout); \
  2156. SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, ret); \
  2157. ret; \
  2158. })
  2159. /**
  2160. * @brief Statically define and initialize a LIFO queue.
  2161. *
  2162. * The LIFO queue can be accessed outside the module where it is defined using:
  2163. *
  2164. * @code extern struct k_lifo <name>; @endcode
  2165. *
  2166. * @param name Name of the fifo.
  2167. */
  2168. #define K_LIFO_DEFINE(name) \
  2169. STRUCT_SECTION_ITERABLE_ALTERNATE(k_queue, k_lifo, name) = \
  2170. Z_LIFO_INITIALIZER(name)
  2171. /** @} */
  2172. /**
  2173. * @cond INTERNAL_HIDDEN
  2174. */
  2175. #define K_STACK_FLAG_ALLOC ((uint8_t)1) /* Buffer was allocated */
  2176. typedef uintptr_t stack_data_t;
  2177. struct k_stack {
  2178. _wait_q_t wait_q;
  2179. struct k_spinlock lock;
  2180. stack_data_t *base, *next, *top;
  2181. uint8_t flags;
  2182. };
  2183. #define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
  2184. { \
  2185. .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
  2186. .base = stack_buffer, \
  2187. .next = stack_buffer, \
  2188. .top = stack_buffer + stack_num_entries, \
  2189. }
  2190. /**
  2191. * INTERNAL_HIDDEN @endcond
  2192. */
  2193. /**
  2194. * @defgroup stack_apis Stack APIs
  2195. * @ingroup kernel_apis
  2196. * @{
  2197. */
  2198. /**
  2199. * @brief Initialize a stack.
  2200. *
  2201. * This routine initializes a stack object, prior to its first use.
  2202. *
  2203. * @param stack Address of the stack.
  2204. * @param buffer Address of array used to hold stacked values.
  2205. * @param num_entries Maximum number of values that can be stacked.
  2206. *
  2207. * @return N/A
  2208. */
  2209. void k_stack_init(struct k_stack *stack,
  2210. stack_data_t *buffer, uint32_t num_entries);
  2211. /**
  2212. * @brief Initialize a stack.
  2213. *
  2214. * This routine initializes a stack object, prior to its first use. Internal
  2215. * buffers will be allocated from the calling thread's resource pool.
  2216. * This memory will be released if k_stack_cleanup() is called, or
  2217. * userspace is enabled and the stack object loses all references to it.
  2218. *
  2219. * @param stack Address of the stack.
  2220. * @param num_entries Maximum number of values that can be stacked.
  2221. *
  2222. * @return -ENOMEM if memory couldn't be allocated
  2223. */
  2224. __syscall int32_t k_stack_alloc_init(struct k_stack *stack,
  2225. uint32_t num_entries);
  2226. /**
  2227. * @brief Release a stack's allocated buffer
  2228. *
  2229. * If a stack object was given a dynamically allocated buffer via
  2230. * k_stack_alloc_init(), this will free it. This function does nothing
  2231. * if the buffer wasn't dynamically allocated.
  2232. *
  2233. * @param stack Address of the stack.
  2234. * @retval 0 on success
  2235. * @retval -EAGAIN when object is still in use
  2236. */
  2237. int k_stack_cleanup(struct k_stack *stack);
  2238. /**
  2239. * @brief Push an element onto a stack.
  2240. *
  2241. * This routine adds a stack_data_t value @a data to @a stack.
  2242. *
  2243. * @funcprops \isr_ok
  2244. *
  2245. * @param stack Address of the stack.
  2246. * @param data Value to push onto the stack.
  2247. *
  2248. * @retval 0 on success
  2249. * @retval -ENOMEM if stack is full
  2250. */
  2251. __syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
  2252. /**
  2253. * @brief Pop an element from a stack.
  2254. *
  2255. * This routine removes a stack_data_t value from @a stack in a "last in,
  2256. * first out" manner and stores the value in @a data.
  2257. *
  2258. * @note @a timeout must be set to K_NO_WAIT if called from ISR.
  2259. *
  2260. * @funcprops \isr_ok
  2261. *
  2262. * @param stack Address of the stack.
  2263. * @param data Address of area to hold the value popped from the stack.
  2264. * @param timeout Waiting period to obtain a value,
  2265. * or one of the special values K_NO_WAIT and
  2266. * K_FOREVER.
  2267. *
  2268. * @retval 0 Element popped from stack.
  2269. * @retval -EBUSY Returned without waiting.
  2270. * @retval -EAGAIN Waiting period timed out.
  2271. */
  2272. __syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
  2273. k_timeout_t timeout);
  2274. /**
  2275. * @brief Statically define and initialize a stack
  2276. *
  2277. * The stack can be accessed outside the module where it is defined using:
  2278. *
  2279. * @code extern struct k_stack <name>; @endcode
  2280. *
  2281. * @param name Name of the stack.
  2282. * @param stack_num_entries Maximum number of values that can be stacked.
  2283. */
  2284. #define K_STACK_DEFINE(name, stack_num_entries) \
  2285. stack_data_t __noinit \
  2286. _k_stack_buf_##name[stack_num_entries]; \
  2287. STRUCT_SECTION_ITERABLE(k_stack, name) = \
  2288. Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
  2289. stack_num_entries)
  2290. /** @} */
  2291. /**
  2292. * @cond INTERNAL_HIDDEN
  2293. */
  2294. struct k_work;
  2295. struct k_work_q;
  2296. struct k_work_queue_config;
  2297. struct k_delayed_work;
  2298. extern struct k_work_q k_sys_work_q;
  2299. /**
  2300. * INTERNAL_HIDDEN @endcond
  2301. */
  2302. /**
  2303. * @defgroup mutex_apis Mutex APIs
  2304. * @ingroup kernel_apis
  2305. * @{
  2306. */
  2307. /**
  2308. * Mutex Structure
  2309. * @ingroup mutex_apis
  2310. */
  2311. struct k_mutex {
  2312. /** Mutex wait queue */
  2313. _wait_q_t wait_q;
  2314. /** Mutex owner */
  2315. struct k_thread *owner;
  2316. /** Current lock count */
  2317. uint32_t lock_count;
  2318. /** Original thread priority */
  2319. int owner_orig_prio;
  2320. };
  2321. /**
  2322. * @cond INTERNAL_HIDDEN
  2323. */
  2324. #define Z_MUTEX_INITIALIZER(obj) \
  2325. { \
  2326. .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
  2327. .owner = NULL, \
  2328. .lock_count = 0, \
  2329. .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
  2330. }
  2331. /**
  2332. * INTERNAL_HIDDEN @endcond
  2333. */
  2334. /**
  2335. * @brief Statically define and initialize a mutex.
  2336. *
  2337. * The mutex can be accessed outside the module where it is defined using:
  2338. *
  2339. * @code extern struct k_mutex <name>; @endcode
  2340. *
  2341. * @param name Name of the mutex.
  2342. */
  2343. #define K_MUTEX_DEFINE(name) \
  2344. STRUCT_SECTION_ITERABLE(k_mutex, name) = \
  2345. Z_MUTEX_INITIALIZER(name)
  2346. /**
  2347. * @brief Initialize a mutex.
  2348. *
  2349. * This routine initializes a mutex object, prior to its first use.
  2350. *
  2351. * Upon completion, the mutex is available and does not have an owner.
  2352. *
  2353. * @param mutex Address of the mutex.
  2354. *
  2355. * @retval 0 Mutex object created
  2356. *
  2357. */
  2358. __syscall int k_mutex_init(struct k_mutex *mutex);
  2359. /**
  2360. * @brief Lock a mutex.
  2361. *
  2362. * This routine locks @a mutex. If the mutex is locked by another thread,
  2363. * the calling thread waits until the mutex becomes available or until
  2364. * a timeout occurs.
  2365. *
  2366. * A thread is permitted to lock a mutex it has already locked. The operation
  2367. * completes immediately and the lock count is increased by 1.
  2368. *
  2369. * Mutexes may not be locked in ISRs.
  2370. *
  2371. * @param mutex Address of the mutex.
  2372. * @param timeout Waiting period to lock the mutex,
  2373. * or one of the special values K_NO_WAIT and
  2374. * K_FOREVER.
  2375. *
  2376. * @retval 0 Mutex locked.
  2377. * @retval -EBUSY Returned without waiting.
  2378. * @retval -EAGAIN Waiting period timed out.
  2379. */
  2380. __syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
  2381. /**
  2382. * @brief Unlock a mutex.
  2383. *
  2384. * This routine unlocks @a mutex. The mutex must already be locked by the
  2385. * calling thread.
  2386. *
  2387. * The mutex cannot be claimed by another thread until it has been unlocked by
  2388. * the calling thread as many times as it was previously locked by that
  2389. * thread.
  2390. *
  2391. * Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated
  2392. * in thread context due to ownership and priority inheritance semantics.
  2393. *
  2394. * @param mutex Address of the mutex.
  2395. *
  2396. * @retval 0 Mutex unlocked.
  2397. * @retval -EPERM The current thread does not own the mutex
  2398. * @retval -EINVAL The mutex is not locked
  2399. *
  2400. */
  2401. __syscall int k_mutex_unlock(struct k_mutex *mutex);
  2402. /**
  2403. * @}
  2404. */
  2405. struct k_condvar {
  2406. _wait_q_t wait_q;
  2407. };
  2408. #define Z_CONDVAR_INITIALIZER(obj) \
  2409. { \
  2410. .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
  2411. }
  2412. /**
  2413. * @defgroup condvar_apis Condition Variables APIs
  2414. * @ingroup kernel_apis
  2415. * @{
  2416. */
  2417. /**
  2418. * @brief Initialize a condition variable
  2419. *
  2420. * @param condvar pointer to a @p k_condvar structure
  2421. * @retval 0 Condition variable created successfully
  2422. */
  2423. __syscall int k_condvar_init(struct k_condvar *condvar);
  2424. /**
  2425. * @brief Signals one thread that is pending on the condition variable
  2426. *
  2427. * @param condvar pointer to a @p k_condvar structure
  2428. * @retval 0 On success
  2429. */
  2430. __syscall int k_condvar_signal(struct k_condvar *condvar);
  2431. /**
  2432. * @brief Unblock all threads that are pending on the condition
  2433. * variable
  2434. *
  2435. * @param condvar pointer to a @p k_condvar structure
  2436. * @return An integer with number of woken threads on success
  2437. */
  2438. __syscall int k_condvar_broadcast(struct k_condvar *condvar);
  2439. /**
  2440. * @brief Waits on the condition variable releasing the mutex lock
  2441. *
  2442. * Automically releases the currently owned mutex, blocks the current thread
  2443. * waiting on the condition variable specified by @a condvar,
  2444. * and finally acquires the mutex again.
  2445. *
  2446. * The waiting thread unblocks only after another thread calls
  2447. * k_condvar_signal, or k_condvar_broadcast with the same condition variable.
  2448. *
  2449. * @param condvar pointer to a @p k_condvar structure
  2450. * @param mutex Address of the mutex.
  2451. * @param timeout Waiting period for the condition variable
  2452. * or one of the special values K_NO_WAIT and K_FOREVER.
  2453. * @retval 0 On success
  2454. * @retval -EAGAIN Waiting period timed out.
  2455. */
  2456. __syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
  2457. k_timeout_t timeout);
  2458. /**
  2459. * @brief Statically define and initialize a condition variable.
  2460. *
  2461. * The condition variable can be accessed outside the module where it is
  2462. * defined using:
  2463. *
  2464. * @code extern struct k_condvar <name>; @endcode
  2465. *
  2466. * @param name Name of the condition variable.
  2467. */
  2468. #define K_CONDVAR_DEFINE(name) \
  2469. STRUCT_SECTION_ITERABLE(k_condvar, name) = \
  2470. Z_CONDVAR_INITIALIZER(name)
  2471. /**
  2472. * @}
  2473. */
  2474. /**
  2475. * @cond INTERNAL_HIDDEN
  2476. */
  2477. struct k_sem {
  2478. _wait_q_t wait_q;
  2479. unsigned int count;
  2480. unsigned int limit;
  2481. _POLL_EVENT;
  2482. };
  2483. #define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
  2484. { \
  2485. .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
  2486. .count = initial_count, \
  2487. .limit = count_limit, \
  2488. _POLL_EVENT_OBJ_INIT(obj) \
  2489. }
  2490. /**
  2491. * INTERNAL_HIDDEN @endcond
  2492. */
  2493. /**
  2494. * @defgroup semaphore_apis Semaphore APIs
  2495. * @ingroup kernel_apis
  2496. * @{
  2497. */
  2498. /**
  2499. * @brief Maximum limit value allowed for a semaphore.
  2500. *
  2501. * This is intended for use when a semaphore does not have
  2502. * an explicit maximum limit, and instead is just used for
  2503. * counting purposes.
  2504. *
  2505. */
  2506. #define K_SEM_MAX_LIMIT UINT_MAX
  2507. /**
  2508. * @brief Initialize a semaphore.
  2509. *
  2510. * This routine initializes a semaphore object, prior to its first use.
  2511. *
  2512. * @param sem Address of the semaphore.
  2513. * @param initial_count Initial semaphore count.
  2514. * @param limit Maximum permitted semaphore count.
  2515. *
  2516. * @see K_SEM_MAX_LIMIT
  2517. *
  2518. * @retval 0 Semaphore created successfully
  2519. * @retval -EINVAL Invalid values
  2520. *
  2521. */
  2522. __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
  2523. unsigned int limit);
  2524. /**
  2525. * @brief Take a semaphore.
  2526. *
  2527. * This routine takes @a sem.
  2528. *
  2529. * @note @a timeout must be set to K_NO_WAIT if called from ISR.
  2530. *
  2531. * @funcprops \isr_ok
  2532. *
  2533. * @param sem Address of the semaphore.
  2534. * @param timeout Waiting period to take the semaphore,
  2535. * or one of the special values K_NO_WAIT and K_FOREVER.
  2536. *
  2537. * @retval 0 Semaphore taken.
  2538. * @retval -EBUSY Returned without waiting.
  2539. * @retval -EAGAIN Waiting period timed out,
  2540. * or the semaphore was reset during the waiting period.
  2541. */
  2542. __syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
  2543. /**
  2544. * @brief Give a semaphore.
  2545. *
  2546. * This routine gives @a sem, unless the semaphore is already at its maximum
  2547. * permitted count.
  2548. *
  2549. * @funcprops \isr_ok
  2550. *
  2551. * @param sem Address of the semaphore.
  2552. *
  2553. * @return N/A
  2554. */
  2555. __syscall void k_sem_give(struct k_sem *sem);
  2556. /**
  2557. * @brief Resets a semaphore's count to zero.
  2558. *
  2559. * This routine sets the count of @a sem to zero.
  2560. * Any outstanding semaphore takes will be aborted
  2561. * with -EAGAIN.
  2562. *
  2563. * @param sem Address of the semaphore.
  2564. *
  2565. * @return N/A
  2566. */
  2567. __syscall void k_sem_reset(struct k_sem *sem);
  2568. /**
  2569. * @brief Get a semaphore's count.
  2570. *
  2571. * This routine returns the current count of @a sem.
  2572. *
  2573. * @param sem Address of the semaphore.
  2574. *
  2575. * @return Current semaphore count.
  2576. */
  2577. __syscall unsigned int k_sem_count_get(struct k_sem *sem);
  2578. /**
  2579. * @internal
  2580. */
  2581. static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
  2582. {
  2583. return sem->count;
  2584. }
  2585. /**
  2586. * @brief Statically define and initialize a semaphore.
  2587. *
  2588. * The semaphore can be accessed outside the module where it is defined using:
  2589. *
  2590. * @code extern struct k_sem <name>; @endcode
  2591. *
  2592. * @param name Name of the semaphore.
  2593. * @param initial_count Initial semaphore count.
  2594. * @param count_limit Maximum permitted semaphore count.
  2595. */
  2596. #define K_SEM_DEFINE(name, initial_count, count_limit) \
  2597. STRUCT_SECTION_ITERABLE(k_sem, name) = \
  2598. Z_SEM_INITIALIZER(name, initial_count, count_limit); \
  2599. BUILD_ASSERT(((count_limit) != 0) && \
  2600. ((initial_count) <= (count_limit)) && \
  2601. ((count_limit) <= K_SEM_MAX_LIMIT));
  2602. /** @} */
  2603. /**
  2604. * @cond INTERNAL_HIDDEN
  2605. */
  2606. struct k_work_delayable;
  2607. struct k_work_sync;
  2608. /**
  2609. * INTERNAL_HIDDEN @endcond
  2610. */
  2611. /**
  2612. * @defgroup workqueue_apis Work Queue APIs
  2613. * @ingroup kernel_apis
  2614. * @{
  2615. */
  2616. /** @brief The signature for a work item handler function.
  2617. *
  2618. * The function will be invoked by the thread animating a work queue.
  2619. *
  2620. * @param work the work item that provided the handler.
  2621. */
  2622. typedef void (*k_work_handler_t)(struct k_work *work);
  2623. /** @brief Initialize a (non-delayable) work structure.
  2624. *
  2625. * This must be invoked before submitting a work structure for the first time.
  2626. * It need not be invoked again on the same work structure. It can be
  2627. * re-invoked to change the associated handler, but this must be done when the
  2628. * work item is idle.
  2629. *
  2630. * @funcprops \isr_ok
  2631. *
  2632. * @param work the work structure to be initialized.
  2633. *
  2634. * @param handler the handler to be invoked by the work item.
  2635. */
  2636. void k_work_init(struct k_work *work,
  2637. k_work_handler_t handler);
  2638. /** @brief Busy state flags from the work item.
  2639. *
  2640. * A zero return value indicates the work item appears to be idle.
  2641. *
  2642. * @note This is a live snapshot of state, which may change before the result
  2643. * is checked. Use locks where appropriate.
  2644. *
  2645. * @funcprops \isr_ok
  2646. *
  2647. * @param work pointer to the work item.
  2648. *
  2649. * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED,
  2650. * K_WORK_RUNNING, and K_WORK_CANCELING.
  2651. */
  2652. int k_work_busy_get(const struct k_work *work);
  2653. /** @brief Test whether a work item is currently pending.
  2654. *
  2655. * Wrapper to determine whether a work item is in a non-idle dstate.
  2656. *
  2657. * @note This is a live snapshot of state, which may change before the result
  2658. * is checked. Use locks where appropriate.
  2659. *
  2660. * @funcprops \isr_ok
  2661. *
  2662. * @param work pointer to the work item.
  2663. *
  2664. * @return true if and only if k_work_busy_get() returns a non-zero value.
  2665. */
  2666. static inline bool k_work_is_pending(const struct k_work *work);
  2667. /** @brief Submit a work item to a queue.
  2668. *
  2669. * @param queue pointer to the work queue on which the item should run. If
  2670. * NULL the queue from the most recent submission will be used.
  2671. *
  2672. * @funcprops \isr_ok
  2673. *
  2674. * @param work pointer to the work item.
  2675. *
  2676. * @retval 0 if work was already submitted to a queue
  2677. * @retval 1 if work was not submitted and has been queued to @p queue
  2678. * @retval 2 if work was running and has been queued to the queue that was
  2679. * running it
  2680. * @retval -EBUSY
  2681. * * if work submission was rejected because the work item is cancelling; or
  2682. * * @p queue is draining; or
  2683. * * @p queue is plugged.
  2684. * @retval -EINVAL if @p queue is null and the work item has never been run.
  2685. * @retval -ENODEV if @p queue has not been started.
  2686. */
  2687. int k_work_submit_to_queue(struct k_work_q *queue,
  2688. struct k_work *work);
  2689. /** @brief Submit a work item to the system queue.
  2690. *
  2691. * @funcprops \isr_ok
  2692. *
  2693. * @param work pointer to the work item.
  2694. *
  2695. * @return as with k_work_submit_to_queue().
  2696. */
  2697. extern int k_work_submit(struct k_work *work);
  2698. /** @brief Wait for last-submitted instance to complete.
  2699. *
  2700. * Resubmissions may occur while waiting, including chained submissions (from
  2701. * within the handler).
  2702. *
  2703. * @note Be careful of caller and work queue thread relative priority. If
  2704. * this function sleeps it will not return until the work queue thread
  2705. * completes the tasks that allow this thread to resume.
  2706. *
  2707. * @note Behavior is undefined if this function is invoked on @p work from a
  2708. * work queue running @p work.
  2709. *
  2710. * @param work pointer to the work item.
  2711. *
  2712. * @param sync pointer to an opaque item containing state related to the
  2713. * pending cancellation. The object must persist until the call returns, and
  2714. * be accessible from both the caller thread and the work queue thread. The
  2715. * object must not be used for any other flush or cancel operation until this
  2716. * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
  2717. * must be allocated in coherent memory.
  2718. *
  2719. * @retval true if call had to wait for completion
  2720. * @retval false if work was already idle
  2721. */
  2722. bool k_work_flush(struct k_work *work,
  2723. struct k_work_sync *sync);
  2724. /** @brief Cancel a work item.
  2725. *
  2726. * This attempts to prevent a pending (non-delayable) work item from being
  2727. * processed by removing it from the work queue. If the item is being
  2728. * processed, the work item will continue to be processed, but resubmissions
  2729. * are rejected until cancellation completes.
  2730. *
  2731. * If this returns zero cancellation is complete, otherwise something
  2732. * (probably a work queue thread) is still referencing the item.
  2733. *
  2734. * See also k_work_cancel_sync().
  2735. *
  2736. * @funcprops \isr_ok
  2737. *
  2738. * @param work pointer to the work item.
  2739. *
  2740. * @return the k_work_busy_get() status indicating the state of the item after all
  2741. * cancellation steps performed by this call are completed.
  2742. */
  2743. int k_work_cancel(struct k_work *work);
  2744. /** @brief Cancel a work item and wait for it to complete.
  2745. *
  2746. * Same as k_work_cancel() but does not return until cancellation is complete.
  2747. * This can be invoked by a thread after k_work_cancel() to synchronize with a
  2748. * previous cancellation.
  2749. *
  2750. * On return the work structure will be idle unless something submits it after
  2751. * the cancellation was complete.
  2752. *
  2753. * @note Be careful of caller and work queue thread relative priority. If
  2754. * this function sleeps it will not return until the work queue thread
  2755. * completes the tasks that allow this thread to resume.
  2756. *
  2757. * @note Behavior is undefined if this function is invoked on @p work from a
  2758. * work queue running @p work.
  2759. *
  2760. * @param work pointer to the work item.
  2761. *
  2762. * @param sync pointer to an opaque item containing state related to the
  2763. * pending cancellation. The object must persist until the call returns, and
  2764. * be accessible from both the caller thread and the work queue thread. The
  2765. * object must not be used for any other flush or cancel operation until this
  2766. * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
  2767. * must be allocated in coherent memory.
  2768. *
  2769. * @retval true if work was pending (call had to wait for cancellation of a
  2770. * running handler to complete, or scheduled or submitted operations were
  2771. * cancelled);
  2772. * @retval false otherwise
  2773. */
  2774. bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
  2775. /** @brief Initialize a work queue structure.
  2776. *
  2777. * This must be invoked before starting a work queue structure for the first time.
  2778. * It need not be invoked again on the same work queue structure.
  2779. *
  2780. * @funcprops \isr_ok
  2781. *
  2782. * @param queue the queue structure to be initialized.
  2783. */
  2784. void k_work_queue_init(struct k_work_q *queue);
  2785. /** @brief Initialize a work queue.
  2786. *
  2787. * This configures the work queue thread and starts it running. The function
  2788. * should not be re-invoked on a queue.
  2789. *
  2790. * @param queue pointer to the queue structure. It must be initialized
  2791. * in zeroed/bss memory or with @ref k_work_queue_init before
  2792. * use.
  2793. *
  2794. * @param stack pointer to the work thread stack area.
  2795. *
  2796. * @param stack_size size of the the work thread stack area, in bytes.
  2797. *
  2798. * @param prio initial thread priority
  2799. *
  2800. * @param cfg optional additional configuration parameters. Pass @c
  2801. * NULL if not required, to use the defaults documented in
  2802. * k_work_queue_config.
  2803. */
  2804. void k_work_queue_start(struct k_work_q *queue,
  2805. k_thread_stack_t *stack, size_t stack_size,
  2806. int prio, const struct k_work_queue_config *cfg);
  2807. /** @brief Access the thread that animates a work queue.
  2808. *
  2809. * This is necessary to grant a work queue thread access to things the work
  2810. * items it will process are expected to use.
  2811. *
  2812. * @param queue pointer to the queue structure.
  2813. *
  2814. * @return the thread associated with the work queue.
  2815. */
  2816. static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
  2817. /** @brief Wait until the work queue has drained, optionally plugging it.
  2818. *
  2819. * This blocks submission to the work queue except when coming from queue
  2820. * thread, and blocks the caller until no more work items are available in the
  2821. * queue.
  2822. *
  2823. * If @p plug is true then submission will continue to be blocked after the
  2824. * drain operation completes until k_work_queue_unplug() is invoked.
  2825. *
  2826. * Note that work items that are delayed are not yet associated with their
  2827. * work queue. They must be cancelled externally if a goal is to ensure the
  2828. * work queue remains empty. The @p plug feature can be used to prevent
  2829. * delayed items from being submitted after the drain completes.
  2830. *
  2831. * @param queue pointer to the queue structure.
  2832. *
  2833. * @param plug if true the work queue will continue to block new submissions
  2834. * after all items have drained.
  2835. *
  2836. * @retval 1 if call had to wait for the drain to complete
  2837. * @retval 0 if call did not have to wait
  2838. * @retval negative if wait was interrupted or failed
  2839. */
  2840. int k_work_queue_drain(struct k_work_q *queue, bool plug);
  2841. /** @brief Release a work queue to accept new submissions.
  2842. *
  2843. * This releases the block on new submissions placed when k_work_queue_drain()
  2844. * is invoked with the @p plug option enabled. If this is invoked before the
  2845. * drain completes new items may be submitted as soon as the drain completes.
  2846. *
  2847. * @funcprops \isr_ok
  2848. *
  2849. * @param queue pointer to the queue structure.
  2850. *
  2851. * @retval 0 if successfully unplugged
  2852. * @retval -EALREADY if the work queue was not plugged.
  2853. */
  2854. int k_work_queue_unplug(struct k_work_q *queue);
  2855. /** @brief Initialize a delayable work structure.
  2856. *
  2857. * This must be invoked before scheduling a delayable work structure for the
  2858. * first time. It need not be invoked again on the same work structure. It
  2859. * can be re-invoked to change the associated handler, but this must be done
  2860. * when the work item is idle.
  2861. *
  2862. * @funcprops \isr_ok
  2863. *
  2864. * @param dwork the delayable work structure to be initialized.
  2865. *
  2866. * @param handler the handler to be invoked by the work item.
  2867. */
  2868. void k_work_init_delayable(struct k_work_delayable *dwork,
  2869. k_work_handler_t handler);
  2870. /**
  2871. * @brief Get the parent delayable work structure from a work pointer.
  2872. *
  2873. * This function is necessary when a @c k_work_handler_t function is passed to
  2874. * k_work_schedule_for_queue() and the handler needs to access data from the
  2875. * container of the containing `k_work_delayable`.
  2876. *
  2877. * @param work Address passed to the work handler
  2878. *
  2879. * @return Address of the containing @c k_work_delayable structure.
  2880. */
  2881. static inline struct k_work_delayable *
  2882. k_work_delayable_from_work(struct k_work *work);
  2883. /** @brief Busy state flags from the delayable work item.
  2884. *
  2885. * @funcprops \isr_ok
  2886. *
  2887. * @note This is a live snapshot of state, which may change before the result
  2888. * can be inspected. Use locks where appropriate.
  2889. *
  2890. * @param dwork pointer to the delayable work item.
  2891. *
  2892. * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING, and
  2893. * K_WORK_CANCELING. A zero return value indicates the work item appears to
  2894. * be idle.
  2895. */
  2896. int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
  2897. /** @brief Test whether a delayed work item is currently pending.
  2898. *
  2899. * Wrapper to determine whether a delayed work item is in a non-idle state.
  2900. *
  2901. * @note This is a live snapshot of state, which may change before the result
  2902. * can be inspected. Use locks where appropriate.
  2903. *
  2904. * @funcprops \isr_ok
  2905. *
  2906. * @param dwork pointer to the delayable work item.
  2907. *
  2908. * @return true if and only if k_work_delayable_busy_get() returns a non-zero
  2909. * value.
  2910. */
  2911. static inline bool k_work_delayable_is_pending(
  2912. const struct k_work_delayable *dwork);
  2913. /** @brief Get the absolute tick count at which a scheduled delayable work
  2914. * will be submitted.
  2915. *
  2916. * @note This is a live snapshot of state, which may change before the result
  2917. * can be inspected. Use locks where appropriate.
  2918. *
  2919. * @funcprops \isr_ok
  2920. *
  2921. * @param dwork pointer to the delayable work item.
  2922. *
  2923. * @return the tick count when the timer that will schedule the work item will
  2924. * expire, or the current tick count if the work is not scheduled.
  2925. */
  2926. static inline k_ticks_t k_work_delayable_expires_get(
  2927. const struct k_work_delayable *dwork);
  2928. /** @brief Get the number of ticks until a scheduled delayable work will be
  2929. * submitted.
  2930. *
  2931. * @note This is a live snapshot of state, which may change before the result
  2932. * can be inspected. Use locks where appropriate.
  2933. *
  2934. * @funcprops \isr_ok
  2935. *
  2936. * @param dwork pointer to the delayable work item.
  2937. *
  2938. * @return the number of ticks until the timer that will schedule the work
  2939. * item will expire, or zero if the item is not scheduled.
  2940. */
  2941. static inline k_ticks_t k_work_delayable_remaining_get(
  2942. const struct k_work_delayable *dwork);
  2943. /** @brief Submit an idle work item to a queue after a delay.
  2944. *
  2945. * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
  2946. * already scheduled or submitted, even if @p delay is @c K_NO_WAIT.
  2947. *
  2948. * @funcprops \isr_ok
  2949. *
  2950. * @param queue the queue on which the work item should be submitted after the
  2951. * delay.
  2952. *
  2953. * @param dwork pointer to the delayable work item.
  2954. *
  2955. * @param delay the time to wait before submitting the work item. If @c
  2956. * K_NO_WAIT and the work is not pending this is equivalent to
  2957. * k_work_submit_to_queue().
  2958. *
  2959. * @retval 0 if work was already scheduled or submitted.
  2960. * @retval 1 if work has been scheduled.
  2961. * @retval -EBUSY if @p delay is @c K_NO_WAIT and
  2962. * k_work_submit_to_queue() fails with this code.
  2963. * @retval -EINVAL if @p delay is @c K_NO_WAIT and
  2964. * k_work_submit_to_queue() fails with this code.
  2965. * @retval -ENODEV if @p delay is @c K_NO_WAIT and
  2966. * k_work_submit_to_queue() fails with this code.
  2967. */
  2968. int k_work_schedule_for_queue(struct k_work_q *queue,
  2969. struct k_work_delayable *dwork,
  2970. k_timeout_t delay);
  2971. /** @brief Submit an idle work item to the system work queue after a
  2972. * delay.
  2973. *
  2974. * This is a thin wrapper around k_work_schedule_for_queue(), with all the API
  2975. * characteristcs of that function.
  2976. *
  2977. * @param dwork pointer to the delayable work item.
  2978. *
  2979. * @param delay the time to wait before submitting the work item. If @c
  2980. * K_NO_WAIT this is equivalent to k_work_submit_to_queue().
  2981. *
  2982. * @return as with k_work_schedule_for_queue().
  2983. */
  2984. extern int k_work_schedule(struct k_work_delayable *dwork,
  2985. k_timeout_t delay);
  2986. /** @brief Reschedule a work item to a queue after a delay.
  2987. *
  2988. * Unlike k_work_schedule_for_queue() this function can change the deadline of
  2989. * a scheduled work item, and will schedule a work item that isn't idle
  2990. * (e.g. is submitted or running). This function does not affect ("unsubmit")
  2991. * a work item that has been submitted to a queue.
  2992. *
  2993. * @funcprops \isr_ok
  2994. *
  2995. * @param queue the queue on which the work item should be submitted after the
  2996. * delay.
  2997. *
  2998. * @param dwork pointer to the delayable work item.
  2999. *
  3000. * @param delay the time to wait before submitting the work item. If @c
  3001. * K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling
  3002. * any previous scheduled submission.
  3003. *
  3004. * @note If delay is @c K_NO_WAIT ("no delay") the return values are as with
  3005. * k_work_submit_to_queue().
  3006. *
  3007. * @retval 0 if delay is @c K_NO_WAIT and work was already on a queue
  3008. * @retval 1 if
  3009. * * delay is @c K_NO_WAIT and work was not submitted but has now been queued
  3010. * to @p queue; or
  3011. * * delay not @c K_NO_WAIT and work has been scheduled
  3012. * @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued
  3013. * to the queue that was running it
  3014. * @retval -EBUSY if @p delay is @c K_NO_WAIT and
  3015. * k_work_submit_to_queue() fails with this code.
  3016. * @retval -EINVAL if @p delay is @c K_NO_WAIT and
  3017. * k_work_submit_to_queue() fails with this code.
  3018. * @retval -ENODEV if @p delay is @c K_NO_WAIT and
  3019. * k_work_submit_to_queue() fails with this code.
  3020. */
  3021. int k_work_reschedule_for_queue(struct k_work_q *queue,
  3022. struct k_work_delayable *dwork,
  3023. k_timeout_t delay);
  3024. /** @brief Reschedule a work item to the system work queue after a
  3025. * delay.
  3026. *
  3027. * This is a thin wrapper around k_work_reschedule_for_queue(), with all the
  3028. * API characteristcs of that function.
  3029. *
  3030. * @param dwork pointer to the delayable work item.
  3031. *
  3032. * @param delay the time to wait before submitting the work item.
  3033. *
  3034. * @return as with k_work_reschedule_for_queue().
  3035. */
  3036. extern int k_work_reschedule(struct k_work_delayable *dwork,
  3037. k_timeout_t delay);
  3038. /** @brief Flush delayable work.
  3039. *
  3040. * If the work is scheduled, it is immediately submitted. Then the caller
  3041. * blocks until the work completes, as with k_work_flush().
  3042. *
  3043. * @note Be careful of caller and work queue thread relative priority. If
  3044. * this function sleeps it will not return until the work queue thread
  3045. * completes the tasks that allow this thread to resume.
  3046. *
  3047. * @note Behavior is undefined if this function is invoked on @p dwork from a
  3048. * work queue running @p dwork.
  3049. *
  3050. * @param dwork pointer to the delayable work item.
  3051. *
  3052. * @param sync pointer to an opaque item containing state related to the
  3053. * pending cancellation. The object must persist until the call returns, and
  3054. * be accessible from both the caller thread and the work queue thread. The
  3055. * object must not be used for any other flush or cancel operation until this
  3056. * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
  3057. * must be allocated in coherent memory.
  3058. *
  3059. * @retval true if call had to wait for completion
  3060. * @retval false if work was already idle
  3061. */
  3062. bool k_work_flush_delayable(struct k_work_delayable *dwork,
  3063. struct k_work_sync *sync);
  3064. /** @brief Cancel delayable work.
  3065. *
  3066. * Similar to k_work_cancel() but for delayable work. If the work is
  3067. * scheduled or submitted it is canceled. This function does not wait for the
  3068. * cancellation to complete.
  3069. *
  3070. * @note The work may still be running when this returns. Use
  3071. * k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is
  3072. * not running.
  3073. *
  3074. * @note Canceling delayable work does not prevent rescheduling it. It does
  3075. * prevent submitting it until the cancellation completes.
  3076. *
  3077. * @funcprops \isr_ok
  3078. *
  3079. * @param dwork pointer to the delayable work item.
  3080. *
  3081. * @return the k_work_delayable_busy_get() status indicating the state of the
  3082. * item after all cancellation steps performed by this call are completed.
  3083. */
  3084. int k_work_cancel_delayable(struct k_work_delayable *dwork);
  3085. /** @brief Cancel delayable work and wait.
  3086. *
  3087. * Like k_work_cancel_delayable() but waits until the work becomes idle.
  3088. *
  3089. * @note Canceling delayable work does not prevent rescheduling it. It does
  3090. * prevent submitting it until the cancellation completes.
  3091. *
  3092. * @note Be careful of caller and work queue thread relative priority. If
  3093. * this function sleeps it will not return until the work queue thread
  3094. * completes the tasks that allow this thread to resume.
  3095. *
  3096. * @note Behavior is undefined if this function is invoked on @p dwork from a
  3097. * work queue running @p dwork.
  3098. *
  3099. * @param dwork pointer to the delayable work item.
  3100. *
  3101. * @param sync pointer to an opaque item containing state related to the
  3102. * pending cancellation. The object must persist until the call returns, and
  3103. * be accessible from both the caller thread and the work queue thread. The
  3104. * object must not be used for any other flush or cancel operation until this
  3105. * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
  3106. * must be allocated in coherent memory.
  3107. *
  3108. * @retval true if work was not idle (call had to wait for cancellation of a
  3109. * running handler to complete, or scheduled or submitted operations were
  3110. * cancelled);
  3111. * @retval false otherwise
  3112. */
  3113. bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
  3114. struct k_work_sync *sync);
  3115. enum {
  3116. /**
  3117. * @cond INTERNAL_HIDDEN
  3118. */
  3119. /* The atomic API is used for all work and queue flags fields to
  3120. * enforce sequential consistency in SMP environments.
  3121. */
  3122. /* Bits that represent the work item states. At least nine of the
  3123. * combinations are distinct valid stable states.
  3124. */
  3125. K_WORK_RUNNING_BIT = 0,
  3126. K_WORK_CANCELING_BIT = 1,
  3127. K_WORK_QUEUED_BIT = 2,
  3128. K_WORK_DELAYED_BIT = 3,
  3129. K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
  3130. | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT),
  3131. /* Static work flags */
  3132. K_WORK_DELAYABLE_BIT = 8,
  3133. K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
  3134. /* Dynamic work queue flags */
  3135. K_WORK_QUEUE_STARTED_BIT = 0,
  3136. K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
  3137. K_WORK_QUEUE_BUSY_BIT = 1,
  3138. K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
  3139. K_WORK_QUEUE_DRAIN_BIT = 2,
  3140. K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
  3141. K_WORK_QUEUE_PLUGGED_BIT = 3,
  3142. K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
  3143. /* Static work queue flags */
  3144. K_WORK_QUEUE_NO_YIELD_BIT = 8,
  3145. K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
  3146. /**
  3147. * INTERNAL_HIDDEN @endcond
  3148. */
  3149. /* Transient work flags */
  3150. /** @brief Flag indicating a work item that is running under a work
  3151. * queue thread.
  3152. *
  3153. * Accessed via k_work_busy_get(). May co-occur with other flags.
  3154. */
  3155. K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
  3156. /** @brief Flag indicating a work item that is being canceled.
  3157. *
  3158. * Accessed via k_work_busy_get(). May co-occur with other flags.
  3159. */
  3160. K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
  3161. /** @brief Flag indicating a work item that has been submitted to a
  3162. * queue but has not started running.
  3163. *
  3164. * Accessed via k_work_busy_get(). May co-occur with other flags.
  3165. */
  3166. K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
  3167. /** @brief Flag indicating a delayed work item that is scheduled for
  3168. * submission to a queue.
  3169. *
  3170. * Accessed via k_work_busy_get(). May co-occur with other flags.
  3171. */
  3172. K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
  3173. };
  3174. /** @brief A structure used to submit work. */
  3175. struct k_work {
  3176. /* All fields are protected by the work module spinlock. No fields
  3177. * are to be accessed except through kernel API.
  3178. */
  3179. /* Node to link into k_work_q pending list. */
  3180. sys_snode_t node;
  3181. /* The function to be invoked by the work queue thread. */
  3182. k_work_handler_t handler;
  3183. /* The queue on which the work item was last submitted. */
  3184. struct k_work_q *queue;
  3185. /* State of the work item.
  3186. *
  3187. * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
  3188. *
  3189. * It can be RUNNING and CANCELING simultaneously.
  3190. */
  3191. uint32_t flags;
  3192. };
  3193. #define Z_WORK_INITIALIZER(work_handler) { \
  3194. .handler = work_handler, \
  3195. }
  3196. /** @brief A structure used to submit work after a delay. */
  3197. struct k_work_delayable {
  3198. /* The work item. */
  3199. struct k_work work;
  3200. /* Timeout used to submit work after a delay. */
  3201. struct _timeout timeout;
  3202. /* The queue to which the work should be submitted. */
  3203. struct k_work_q *queue;
  3204. };
  3205. #define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
  3206. .work = { \
  3207. .handler = work_handler, \
  3208. .flags = K_WORK_DELAYABLE, \
  3209. }, \
  3210. }
  3211. /**
  3212. * @brief Initialize a statically-defined delayable work item.
  3213. *
  3214. * This macro can be used to initialize a statically-defined delayable
  3215. * work item, prior to its first use. For example,
  3216. *
  3217. * @code static K_WORK_DELAYABLE_DEFINE(<dwork>, <work_handler>); @endcode
  3218. *
  3219. * Note that if the runtime dependencies support initialization with
  3220. * k_work_init_delayable() using that will eliminate the initialized
  3221. * object in ROM that is produced by this macro and copied in at
  3222. * system startup.
  3223. *
  3224. * @param work Symbol name for delayable work item object
  3225. * @param work_handler Function to invoke each time work item is processed.
  3226. */
  3227. #define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
  3228. struct k_work_delayable work \
  3229. = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
  3230. /**
  3231. * @cond INTERNAL_HIDDEN
  3232. */
  3233. /* Record used to wait for work to flush.
  3234. *
  3235. * The work item is inserted into the queue that will process (or is
  3236. * processing) the item, and will be processed as soon as the item
  3237. * completes. When the flusher is processed the semaphore will be
  3238. * signaled, releasing the thread waiting for the flush.
  3239. */
  3240. struct z_work_flusher {
  3241. struct k_work work;
  3242. struct k_sem sem;
  3243. };
  3244. /* Record used to wait for work to complete a cancellation.
  3245. *
  3246. * The work item is inserted into a global queue of pending cancels.
  3247. * When a cancelling work item goes idle any matching waiters are
  3248. * removed from pending_cancels and are woken.
  3249. */
  3250. struct z_work_canceller {
  3251. sys_snode_t node;
  3252. struct k_work *work;
  3253. struct k_sem sem;
  3254. };
  3255. /**
  3256. * INTERNAL_HIDDEN @endcond
  3257. */
  3258. /** @brief A structure holding internal state for a pending synchronous
  3259. * operation on a work item or queue.
  3260. *
  3261. * Instances of this type are provided by the caller for invocation of
  3262. * k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs. A
  3263. * referenced object must persist until the call returns, and be accessible
  3264. * from both the caller thread and the work queue thread.
  3265. *
  3266. * @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
  3267. * coherent memory; see arch_mem_coherent(). The stack on these architectures
  3268. * is generally not coherent. be stack-allocated. Violations are detected by
  3269. * runtime assertion.
  3270. */
  3271. struct k_work_sync {
  3272. union {
  3273. struct z_work_flusher flusher;
  3274. struct z_work_canceller canceller;
  3275. };
  3276. };
  3277. /** @brief A structure holding optional configuration items for a work
  3278. * queue.
  3279. *
  3280. * This structure, and values it references, are not retained by
  3281. * k_work_queue_start().
  3282. */
  3283. struct k_work_queue_config {
  3284. /** The name to be given to the work queue thread.
  3285. *
  3286. * If left null the thread will not have a name.
  3287. */
  3288. const char *name;
  3289. /** Control whether the work queue thread should yield between
  3290. * items.
  3291. *
  3292. * Yielding between items helps guarantee the work queue
  3293. * thread does not starve other threads, including cooperative
  3294. * ones released by a work item. This is the default behavior.
  3295. *
  3296. * Set this to @c true to prevent the work queue thread from
  3297. * yielding between items. This may be appropriate when a
  3298. * sequence of items should complete without yielding
  3299. * control.
  3300. */
  3301. bool no_yield;
  3302. };
  3303. /** @brief A structure used to hold work until it can be processed. */
  3304. struct k_work_q {
  3305. /* The thread that animates the work. */
  3306. struct k_thread thread;
  3307. /* All the following fields must be accessed only while the
  3308. * work module spinlock is held.
  3309. */
  3310. /* List of k_work items to be worked. */
  3311. sys_slist_t pending;
  3312. /* Wait queue for idle work thread. */
  3313. _wait_q_t notifyq;
  3314. /* Wait queue for threads waiting for the queue to drain. */
  3315. _wait_q_t drainq;
  3316. /* Flags describing queue state. */
  3317. uint32_t flags;
  3318. };
  3319. /* Provide the implementation for inline functions declared above */
  3320. static inline bool k_work_is_pending(const struct k_work *work)
  3321. {
  3322. return k_work_busy_get(work) != 0;
  3323. }
  3324. static inline struct k_work_delayable *
  3325. k_work_delayable_from_work(struct k_work *work)
  3326. {
  3327. return CONTAINER_OF(work, struct k_work_delayable, work);
  3328. }
  3329. static inline bool k_work_delayable_is_pending(
  3330. const struct k_work_delayable *dwork)
  3331. {
  3332. return k_work_delayable_busy_get(dwork) != 0;
  3333. }
  3334. static inline k_ticks_t k_work_delayable_expires_get(
  3335. const struct k_work_delayable *dwork)
  3336. {
  3337. return z_timeout_expires(&dwork->timeout);
  3338. }
  3339. static inline k_ticks_t k_work_delayable_remaining_get(
  3340. const struct k_work_delayable *dwork)
  3341. {
  3342. return z_timeout_remaining(&dwork->timeout);
  3343. }
  3344. static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
  3345. {
  3346. return &queue->thread;
  3347. }
  3348. /* Legacy wrappers */
  3349. __deprecated
  3350. static inline bool k_work_pending(const struct k_work *work)
  3351. {
  3352. return k_work_is_pending(work);
  3353. }
  3354. __deprecated
  3355. static inline void k_work_q_start(struct k_work_q *work_q,
  3356. k_thread_stack_t *stack,
  3357. size_t stack_size, int prio)
  3358. {
  3359. k_work_queue_start(work_q, stack, stack_size, prio, NULL);
  3360. }
  3361. /* deprecated, remove when corresponding deprecated API is removed. */
  3362. struct k_delayed_work {
  3363. struct k_work_delayable work;
  3364. };
  3365. #define Z_DELAYED_WORK_INITIALIZER(work_handler) { \
  3366. .work = Z_WORK_DELAYABLE_INITIALIZER(work_handler), \
  3367. }
  3368. void k_delayed_work_init(struct k_delayed_work *work,
  3369. k_work_handler_t handler);
  3370. int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
  3371. struct k_delayed_work *work,
  3372. k_timeout_t delay);
  3373. int k_delayed_work_submit(struct k_delayed_work *work,
  3374. k_timeout_t delay);
  3375. int k_delayed_work_cancel(struct k_delayed_work *work);
  3376. __deprecated
  3377. static inline bool k_delayed_work_pending(struct k_delayed_work *work)
  3378. {
  3379. return k_work_delayable_is_pending(&work->work);
  3380. }
  3381. __deprecated
  3382. static inline int32_t k_delayed_work_remaining_get(struct k_delayed_work *work)
  3383. {
  3384. k_ticks_t rem = k_work_delayable_remaining_get(&work->work);
  3385. /* Probably should be ceil32, but was floor32 */
  3386. return k_ticks_to_ms_floor32(rem);
  3387. }
  3388. __deprecated
  3389. static inline k_ticks_t k_delayed_work_expires_ticks(
  3390. struct k_delayed_work *work)
  3391. {
  3392. return k_work_delayable_expires_get(&work->work);
  3393. }
  3394. __deprecated
  3395. static inline k_ticks_t k_delayed_work_remaining_ticks(
  3396. struct k_delayed_work *work)
  3397. {
  3398. return k_work_delayable_remaining_get(&work->work);
  3399. }
  3400. /** @} */
  3401. struct k_work_user;
  3402. /**
  3403. * @addtogroup workqueue_apis
  3404. * @{
  3405. */
  3406. /**
  3407. * @typedef k_work_user_handler_t
  3408. * @brief Work item handler function type for user work queues.
  3409. *
  3410. * A work item's handler function is executed by a user workqueue's thread
  3411. * when the work item is processed by the workqueue.
  3412. *
  3413. * @param work Address of the work item.
  3414. *
  3415. * @return N/A
  3416. */
  3417. typedef void (*k_work_user_handler_t)(struct k_work_user *work);
  3418. /**
  3419. * @cond INTERNAL_HIDDEN
  3420. */
  3421. struct k_work_user_q {
  3422. struct k_queue queue;
  3423. struct k_thread thread;
  3424. };
  3425. enum {
  3426. K_WORK_USER_STATE_PENDING, /* Work item pending state */
  3427. };
  3428. struct k_work_user {
  3429. void *_reserved; /* Used by k_queue implementation. */
  3430. k_work_user_handler_t handler;
  3431. atomic_t flags;
  3432. };
  3433. /**
  3434. * INTERNAL_HIDDEN @endcond
  3435. */
  3436. #define Z_WORK_USER_INITIALIZER(work_handler) \
  3437. { \
  3438. ._reserved = NULL, \
  3439. .handler = work_handler, \
  3440. .flags = 0 \
  3441. }
  3442. /**
  3443. * @brief Initialize a statically-defined user work item.
  3444. *
  3445. * This macro can be used to initialize a statically-defined user work
  3446. * item, prior to its first use. For example,
  3447. *
  3448. * @code static K_WORK_USER_DEFINE(<work>, <work_handler>); @endcode
  3449. *
  3450. * @param work Symbol name for work item object
  3451. * @param work_handler Function to invoke each time work item is processed.
  3452. */
  3453. #define K_WORK_USER_DEFINE(work, work_handler) \
  3454. struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
  3455. /**
  3456. * @brief Initialize a userspace work item.
  3457. *
  3458. * This routine initializes a user workqueue work item, prior to its
  3459. * first use.
  3460. *
  3461. * @param work Address of work item.
  3462. * @param handler Function to invoke each time work item is processed.
  3463. *
  3464. * @return N/A
  3465. */
  3466. static inline void k_work_user_init(struct k_work_user *work,
  3467. k_work_user_handler_t handler)
  3468. {
  3469. *work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
  3470. }
  3471. /**
  3472. * @brief Check if a userspace work item is pending.
  3473. *
  3474. * This routine indicates if user work item @a work is pending in a workqueue's
  3475. * queue.
  3476. *
  3477. * @note Checking if the work is pending gives no guarantee that the
  3478. * work will still be pending when this information is used. It is up to
  3479. * the caller to make sure that this information is used in a safe manner.
  3480. *
  3481. * @funcprops \isr_ok
  3482. *
  3483. * @param work Address of work item.
  3484. *
  3485. * @return true if work item is pending, or false if it is not pending.
  3486. */
  3487. static inline bool k_work_user_is_pending(struct k_work_user *work)
  3488. {
  3489. return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
  3490. }
  3491. /**
  3492. * @brief Submit a work item to a user mode workqueue
  3493. *
  3494. * Submits a work item to a workqueue that runs in user mode. A temporary
  3495. * memory allocation is made from the caller's resource pool which is freed
  3496. * once the worker thread consumes the k_work item. The workqueue
  3497. * thread must have memory access to the k_work item being submitted. The caller
  3498. * must have permission granted on the work_q parameter's queue object.
  3499. *
  3500. * @funcprops \isr_ok
  3501. *
  3502. * @param work_q Address of workqueue.
  3503. * @param work Address of work item.
  3504. *
  3505. * @retval -EBUSY if the work item was already in some workqueue
  3506. * @retval -ENOMEM if no memory for thread resource pool allocation
  3507. * @retval 0 Success
  3508. */
  3509. static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
  3510. struct k_work_user *work)
  3511. {
  3512. int ret = -EBUSY;
  3513. if (!atomic_test_and_set_bit(&work->flags,
  3514. K_WORK_USER_STATE_PENDING)) {
  3515. ret = k_queue_alloc_append(&work_q->queue, work);
  3516. /* Couldn't insert into the queue. Clear the pending bit
  3517. * so the work item can be submitted again
  3518. */
  3519. if (ret != 0) {
  3520. atomic_clear_bit(&work->flags,
  3521. K_WORK_USER_STATE_PENDING);
  3522. }
  3523. }
  3524. return ret;
  3525. }
  3526. /**
  3527. * @brief Start a workqueue in user mode
  3528. *
  3529. * This works identically to k_work_queue_start() except it is callable from
  3530. * user mode, and the worker thread created will run in user mode. The caller
  3531. * must have permissions granted on both the work_q parameter's thread and
  3532. * queue objects, and the same restrictions on priority apply as
  3533. * k_thread_create().
  3534. *
  3535. * @param work_q Address of workqueue.
  3536. * @param stack Pointer to work queue thread's stack space, as defined by
  3537. * K_THREAD_STACK_DEFINE()
  3538. * @param stack_size Size of the work queue thread's stack (in bytes), which
  3539. * should either be the same constant passed to
  3540. * K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
  3541. * @param prio Priority of the work queue's thread.
  3542. * @param name optional thread name. If not null a copy is made into the
  3543. * thread's name buffer.
  3544. *
  3545. * @return N/A
  3546. */
  3547. extern void k_work_user_queue_start(struct k_work_user_q *work_q,
  3548. k_thread_stack_t *stack,
  3549. size_t stack_size, int prio,
  3550. const char *name);
  3551. /** @} */
  3552. /**
  3553. * @cond INTERNAL_HIDDEN
  3554. */
  3555. struct k_work_poll {
  3556. struct k_work work;
  3557. struct k_work_q *workq;
  3558. struct z_poller poller;
  3559. struct k_poll_event *events;
  3560. int num_events;
  3561. k_work_handler_t real_handler;
  3562. struct _timeout timeout;
  3563. int poll_result;
  3564. };
  3565. /**
  3566. * INTERNAL_HIDDEN @endcond
  3567. */
  3568. /**
  3569. * @addtogroup workqueue_apis
  3570. * @{
  3571. */
  3572. /**
  3573. * @brief Initialize a statically-defined work item.
  3574. *
  3575. * This macro can be used to initialize a statically-defined workqueue work
  3576. * item, prior to its first use. For example,
  3577. *
  3578. * @code static K_WORK_DEFINE(<work>, <work_handler>); @endcode
  3579. *
  3580. * @param work Symbol name for work item object
  3581. * @param work_handler Function to invoke each time work item is processed.
  3582. */
  3583. #define K_WORK_DEFINE(work, work_handler) \
  3584. struct k_work work = Z_WORK_INITIALIZER(work_handler)
  3585. /**
  3586. * @brief Initialize a statically-defined delayed work item.
  3587. *
  3588. * This macro can be used to initialize a statically-defined workqueue
  3589. * delayed work item, prior to its first use. For example,
  3590. *
  3591. * @code static K_DELAYED_WORK_DEFINE(<work>, <work_handler>); @endcode
  3592. *
  3593. * @param work Symbol name for delayed work item object
  3594. * @param work_handler Function to invoke each time work item is processed.
  3595. */
  3596. #define K_DELAYED_WORK_DEFINE(work, work_handler) \
  3597. struct k_delayed_work work = Z_DELAYED_WORK_INITIALIZER(work_handler)
  3598. /**
  3599. * @brief Initialize a triggered work item.
  3600. *
  3601. * This routine initializes a workqueue triggered work item, prior to
  3602. * its first use.
  3603. *
  3604. * @param work Address of triggered work item.
  3605. * @param handler Function to invoke each time work item is processed.
  3606. *
  3607. * @return N/A
  3608. */
  3609. extern void k_work_poll_init(struct k_work_poll *work,
  3610. k_work_handler_t handler);
  3611. /**
  3612. * @brief Submit a triggered work item.
  3613. *
  3614. * This routine schedules work item @a work to be processed by workqueue
  3615. * @a work_q when one of the given @a events is signaled. The routine
  3616. * initiates internal poller for the work item and then returns to the caller.
  3617. * Only when one of the watched events happen the work item is actually
  3618. * submitted to the workqueue and becomes pending.
  3619. *
  3620. * Submitting a previously submitted triggered work item that is still
  3621. * waiting for the event cancels the existing submission and reschedules it
  3622. * the using the new event list. Note that this behavior is inherently subject
  3623. * to race conditions with the pre-existing triggered work item and work queue,
  3624. * so care must be taken to synchronize such resubmissions externally.
  3625. *
  3626. * @funcprops \isr_ok
  3627. *
  3628. * @warning
  3629. * Provided array of events as well as a triggered work item must be placed
  3630. * in persistent memory (valid until work handler execution or work
  3631. * cancellation) and cannot be modified after submission.
  3632. *
  3633. * @param work_q Address of workqueue.
  3634. * @param work Address of delayed work item.
  3635. * @param events An array of events which trigger the work.
  3636. * @param num_events The number of events in the array.
  3637. * @param timeout Timeout after which the work will be scheduled
  3638. * for execution even if not triggered.
  3639. *
  3640. *
  3641. * @retval 0 Work item started watching for events.
  3642. * @retval -EINVAL Work item is being processed or has completed its work.
  3643. * @retval -EADDRINUSE Work item is pending on a different workqueue.
  3644. */
  3645. extern int k_work_poll_submit_to_queue(struct k_work_q *work_q,
  3646. struct k_work_poll *work,
  3647. struct k_poll_event *events,
  3648. int num_events,
  3649. k_timeout_t timeout);
  3650. /**
  3651. * @brief Submit a triggered work item to the system workqueue.
  3652. *
  3653. * This routine schedules work item @a work to be processed by system
  3654. * workqueue when one of the given @a events is signaled. The routine
  3655. * initiates internal poller for the work item and then returns to the caller.
  3656. * Only when one of the watched events happen the work item is actually
  3657. * submitted to the workqueue and becomes pending.
  3658. *
  3659. * Submitting a previously submitted triggered work item that is still
  3660. * waiting for the event cancels the existing submission and reschedules it
  3661. * the using the new event list. Note that this behavior is inherently subject
  3662. * to race conditions with the pre-existing triggered work item and work queue,
  3663. * so care must be taken to synchronize such resubmissions externally.
  3664. *
  3665. * @funcprops \isr_ok
  3666. *
  3667. * @warning
  3668. * Provided array of events as well as a triggered work item must not be
  3669. * modified until the item has been processed by the workqueue.
  3670. *
  3671. * @param work Address of delayed work item.
  3672. * @param events An array of events which trigger the work.
  3673. * @param num_events The number of events in the array.
  3674. * @param timeout Timeout after which the work will be scheduled
  3675. * for execution even if not triggered.
  3676. *
  3677. * @retval 0 Work item started watching for events.
  3678. * @retval -EINVAL Work item is being processed or has completed its work.
  3679. * @retval -EADDRINUSE Work item is pending on a different workqueue.
  3680. */
  3681. extern int k_work_poll_submit(struct k_work_poll *work,
  3682. struct k_poll_event *events,
  3683. int num_events,
  3684. k_timeout_t timeout);
  3685. /**
  3686. * @brief Cancel a triggered work item.
  3687. *
  3688. * This routine cancels the submission of triggered work item @a work.
  3689. * A triggered work item can only be canceled if no event triggered work
  3690. * submission.
  3691. *
  3692. * @funcprops \isr_ok
  3693. *
  3694. * @param work Address of delayed work item.
  3695. *
  3696. * @retval 0 Work item canceled.
  3697. * @retval -EINVAL Work item is being processed or has completed its work.
  3698. */
  3699. extern int k_work_poll_cancel(struct k_work_poll *work);
  3700. /** @} */
  3701. /**
  3702. * @defgroup msgq_apis Message Queue APIs
  3703. * @ingroup kernel_apis
  3704. * @{
  3705. */
  3706. /**
  3707. * @brief Message Queue Structure
  3708. */
  3709. struct k_msgq {
  3710. /** Message queue wait queue */
  3711. _wait_q_t wait_q;
  3712. /** Lock */
  3713. struct k_spinlock lock;
  3714. /** Message size */
  3715. size_t msg_size;
  3716. /** Maximal number of messages */
  3717. uint32_t max_msgs;
  3718. /** Start of message buffer */
  3719. char *buffer_start;
  3720. /** End of message buffer */
  3721. char *buffer_end;
  3722. /** Read pointer */
  3723. char *read_ptr;
  3724. /** Write pointer */
  3725. char *write_ptr;
  3726. /** Number of used messages */
  3727. uint32_t used_msgs;
  3728. _POLL_EVENT;
  3729. /** Message queue */
  3730. uint8_t flags;
  3731. };
  3732. /**
  3733. * @cond INTERNAL_HIDDEN
  3734. */
  3735. #define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
  3736. { \
  3737. .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
  3738. .msg_size = q_msg_size, \
  3739. .max_msgs = q_max_msgs, \
  3740. .buffer_start = q_buffer, \
  3741. .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
  3742. .read_ptr = q_buffer, \
  3743. .write_ptr = q_buffer, \
  3744. .used_msgs = 0, \
  3745. _POLL_EVENT_OBJ_INIT(obj) \
  3746. }
  3747. /**
  3748. * INTERNAL_HIDDEN @endcond
  3749. */
  3750. #define K_MSGQ_FLAG_ALLOC BIT(0)
  3751. /**
  3752. * @brief Message Queue Attributes
  3753. */
  3754. struct k_msgq_attrs {
  3755. /** Message Size */
  3756. size_t msg_size;
  3757. /** Maximal number of messages */
  3758. uint32_t max_msgs;
  3759. /** Used messages */
  3760. uint32_t used_msgs;
  3761. };
  3762. /**
  3763. * @brief Statically define and initialize a message queue.
  3764. *
  3765. * The message queue's ring buffer contains space for @a q_max_msgs messages,
  3766. * each of which is @a q_msg_size bytes long. The buffer is aligned to a
  3767. * @a q_align -byte boundary, which must be a power of 2. To ensure that each
  3768. * message is similarly aligned to this boundary, @a q_msg_size must also be
  3769. * a multiple of @a q_align.
  3770. *
  3771. * The message queue can be accessed outside the module where it is defined
  3772. * using:
  3773. *
  3774. * @code extern struct k_msgq <name>; @endcode
  3775. *
  3776. * @param q_name Name of the message queue.
  3777. * @param q_msg_size Message size (in bytes).
  3778. * @param q_max_msgs Maximum number of messages that can be queued.
  3779. * @param q_align Alignment of the message queue's ring buffer.
  3780. *
  3781. */
  3782. #define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
  3783. static char __noinit __aligned(q_align) \
  3784. _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
  3785. STRUCT_SECTION_ITERABLE(k_msgq, q_name) = \
  3786. Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
  3787. q_msg_size, q_max_msgs)
  3788. /**
  3789. * @brief Initialize a message queue.
  3790. *
  3791. * This routine initializes a message queue object, prior to its first use.
  3792. *
  3793. * The message queue's ring buffer must contain space for @a max_msgs messages,
  3794. * each of which is @a msg_size bytes long. The buffer must be aligned to an
  3795. * N-byte boundary, where N is a power of 2 (i.e. 1, 2, 4, ...). To ensure
  3796. * that each message is similarly aligned to this boundary, @a q_msg_size
  3797. * must also be a multiple of N.
  3798. *
  3799. * @param msgq Address of the message queue.
  3800. * @param buffer Pointer to ring buffer that holds queued messages.
  3801. * @param msg_size Message size (in bytes).
  3802. * @param max_msgs Maximum number of messages that can be queued.
  3803. *
  3804. * @return N/A
  3805. */
  3806. void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
  3807. uint32_t max_msgs);
  3808. /**
  3809. * @brief Initialize a message queue.
  3810. *
  3811. * This routine initializes a message queue object, prior to its first use,
  3812. * allocating its internal ring buffer from the calling thread's resource
  3813. * pool.
  3814. *
  3815. * Memory allocated for the ring buffer can be released by calling
  3816. * k_msgq_cleanup(), or if userspace is enabled and the msgq object loses
  3817. * all of its references.
  3818. *
  3819. * @param msgq Address of the message queue.
  3820. * @param msg_size Message size (in bytes).
  3821. * @param max_msgs Maximum number of messages that can be queued.
  3822. *
  3823. * @return 0 on success, -ENOMEM if there was insufficient memory in the
  3824. * thread's resource pool, or -EINVAL if the size parameters cause
  3825. * an integer overflow.
  3826. */
  3827. __syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
  3828. uint32_t max_msgs);
  3829. /**
  3830. * @brief Release allocated buffer for a queue
  3831. *
  3832. * Releases memory allocated for the ring buffer.
  3833. *
  3834. * @param msgq message queue to cleanup
  3835. *
  3836. * @retval 0 on success
  3837. * @retval -EBUSY Queue not empty
  3838. */
  3839. int k_msgq_cleanup(struct k_msgq *msgq);
  3840. /**
  3841. * @brief Send a message to a message queue.
  3842. *
  3843. * This routine sends a message to message queue @a q.
  3844. *
  3845. * @note The message content is copied from @a data into @a msgq and the @a data
  3846. * pointer is not retained, so the message content will not be modified
  3847. * by this function.
  3848. *
  3849. * @funcprops \isr_ok
  3850. *
  3851. * @param msgq Address of the message queue.
  3852. * @param data Pointer to the message.
  3853. * @param timeout Non-negative waiting period to add the message,
  3854. * or one of the special values K_NO_WAIT and
  3855. * K_FOREVER.
  3856. *
  3857. * @retval 0 Message sent.
  3858. * @retval -ENOMSG Returned without waiting or queue purged.
  3859. * @retval -EAGAIN Waiting period timed out.
  3860. */
  3861. __syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
  3862. /**
  3863. * @brief Receive a message from a message queue.
  3864. *
  3865. * This routine receives a message from message queue @a q in a "first in,
  3866. * first out" manner.
  3867. *
  3868. * @note @a timeout must be set to K_NO_WAIT if called from ISR.
  3869. *
  3870. * @funcprops \isr_ok
  3871. *
  3872. * @param msgq Address of the message queue.
  3873. * @param data Address of area to hold the received message.
  3874. * @param timeout Waiting period to receive the message,
  3875. * or one of the special values K_NO_WAIT and
  3876. * K_FOREVER.
  3877. *
  3878. * @retval 0 Message received.
  3879. * @retval -ENOMSG Returned without waiting.
  3880. * @retval -EAGAIN Waiting period timed out.
  3881. */
  3882. __syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
  3883. /**
  3884. * @brief Peek/read a message from a message queue.
  3885. *
  3886. * This routine reads a message from message queue @a q in a "first in,
  3887. * first out" manner and leaves the message in the queue.
  3888. *
  3889. * @funcprops \isr_ok
  3890. *
  3891. * @param msgq Address of the message queue.
  3892. * @param data Address of area to hold the message read from the queue.
  3893. *
  3894. * @retval 0 Message read.
  3895. * @retval -ENOMSG Returned when the queue has no message.
  3896. */
  3897. __syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
  3898. /**
  3899. * @brief Purge a message queue.
  3900. *
  3901. * This routine discards all unreceived messages in a message queue's ring
  3902. * buffer. Any threads that are blocked waiting to send a message to the
  3903. * message queue are unblocked and see an -ENOMSG error code.
  3904. *
  3905. * @param msgq Address of the message queue.
  3906. *
  3907. * @return N/A
  3908. */
  3909. __syscall void k_msgq_purge(struct k_msgq *msgq);
  3910. /**
  3911. * @brief Get the amount of free space in a message queue.
  3912. *
  3913. * This routine returns the number of unused entries in a message queue's
  3914. * ring buffer.
  3915. *
  3916. * @param msgq Address of the message queue.
  3917. *
  3918. * @return Number of unused ring buffer entries.
  3919. */
  3920. __syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
  3921. /**
  3922. * @brief Get basic attributes of a message queue.
  3923. *
  3924. * This routine fetches basic attributes of message queue into attr argument.
  3925. *
  3926. * @param msgq Address of the message queue.
  3927. * @param attrs pointer to message queue attribute structure.
  3928. *
  3929. * @return N/A
  3930. */
  3931. __syscall void k_msgq_get_attrs(struct k_msgq *msgq,
  3932. struct k_msgq_attrs *attrs);
  3933. static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
  3934. {
  3935. return msgq->max_msgs - msgq->used_msgs;
  3936. }
  3937. /**
  3938. * @brief Get the number of messages in a message queue.
  3939. *
  3940. * This routine returns the number of messages in a message queue's ring buffer.
  3941. *
  3942. * @param msgq Address of the message queue.
  3943. *
  3944. * @return Number of messages.
  3945. */
  3946. __syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
  3947. static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
  3948. {
  3949. return msgq->used_msgs;
  3950. }
  3951. /** @} */
  3952. /**
  3953. * @defgroup mailbox_apis Mailbox APIs
  3954. * @ingroup kernel_apis
  3955. * @{
  3956. */
  3957. /**
  3958. * @brief Mailbox Message Structure
  3959. *
  3960. */
  3961. struct k_mbox_msg {
  3962. /** internal use only - needed for legacy API support */
  3963. uint32_t _mailbox;
  3964. /** size of message (in bytes) */
  3965. size_t size;
  3966. /** application-defined information value */
  3967. uint32_t info;
  3968. /** sender's message data buffer */
  3969. void *tx_data;
  3970. /** internal use only - needed for legacy API support */
  3971. void *_rx_data;
  3972. /** message data block descriptor */
  3973. struct k_mem_block tx_block;
  3974. /** source thread id */
  3975. k_tid_t rx_source_thread;
  3976. /** target thread id */
  3977. k_tid_t tx_target_thread;
  3978. /** internal use only - thread waiting on send (may be a dummy) */
  3979. k_tid_t _syncing_thread;
  3980. #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
  3981. /** internal use only - semaphore used during asynchronous send */
  3982. struct k_sem *_async_sem;
  3983. #endif
  3984. };
  3985. /**
  3986. * @brief Mailbox Structure
  3987. *
  3988. */
  3989. struct k_mbox {
  3990. /** Transmit messages queue */
  3991. _wait_q_t tx_msg_queue;
  3992. /** Receive message queue */
  3993. _wait_q_t rx_msg_queue;
  3994. struct k_spinlock lock;
  3995. _POLL_EVENT;
  3996. };
  3997. /**
  3998. * @cond INTERNAL_HIDDEN
  3999. */
  4000. #define Z_MBOX_INITIALIZER(obj) \
  4001. { \
  4002. .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
  4003. .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
  4004. _POLL_EVENT_OBJ_INIT(obj) \
  4005. }
  4006. /**
  4007. * INTERNAL_HIDDEN @endcond
  4008. */
  4009. /**
  4010. * @brief Statically define and initialize a mailbox.
  4011. *
  4012. * The mailbox is to be accessed outside the module where it is defined using:
  4013. *
  4014. * @code extern struct k_mbox <name>; @endcode
  4015. *
  4016. * @param name Name of the mailbox.
  4017. */
  4018. #define K_MBOX_DEFINE(name) \
  4019. STRUCT_SECTION_ITERABLE(k_mbox, name) = \
  4020. Z_MBOX_INITIALIZER(name) \
  4021. /**
  4022. * @brief Initialize a mailbox.
  4023. *
  4024. * This routine initializes a mailbox object, prior to its first use.
  4025. *
  4026. * @param mbox Address of the mailbox.
  4027. *
  4028. * @return N/A
  4029. */
  4030. extern void k_mbox_init(struct k_mbox *mbox);
  4031. /**
  4032. * @brief Send a mailbox message in a synchronous manner.
  4033. *
  4034. * This routine sends a message to @a mbox and waits for a receiver to both
  4035. * receive and process it. The message data may be in a buffer, in a memory
  4036. * pool block, or non-existent (i.e. an empty message).
  4037. *
  4038. * @param mbox Address of the mailbox.
  4039. * @param tx_msg Address of the transmit message descriptor.
  4040. * @param timeout Waiting period for the message to be received,
  4041. * or one of the special values K_NO_WAIT
  4042. * and K_FOREVER. Once the message has been received,
  4043. * this routine waits as long as necessary for the message
  4044. * to be completely processed.
  4045. *
  4046. * @retval 0 Message sent.
  4047. * @retval -ENOMSG Returned without waiting.
  4048. * @retval -EAGAIN Waiting period timed out.
  4049. */
  4050. extern int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
  4051. k_timeout_t timeout);
  4052. /**
  4053. * @brief Send a mailbox message in an asynchronous manner.
  4054. *
  4055. * This routine sends a message to @a mbox without waiting for a receiver
  4056. * to process it. The message data may be in a buffer, in a memory pool block,
  4057. * or non-existent (i.e. an empty message). Optionally, the semaphore @a sem
  4058. * will be given when the message has been both received and completely
  4059. * processed by the receiver.
  4060. *
  4061. * @param mbox Address of the mailbox.
  4062. * @param tx_msg Address of the transmit message descriptor.
  4063. * @param sem Address of a semaphore, or NULL if none is needed.
  4064. *
  4065. * @return N/A
  4066. */
  4067. extern void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
  4068. struct k_sem *sem);
  4069. /**
  4070. * @brief Receive a mailbox message.
  4071. *
  4072. * This routine receives a message from @a mbox, then optionally retrieves
  4073. * its data and disposes of the message.
  4074. *
  4075. * @param mbox Address of the mailbox.
  4076. * @param rx_msg Address of the receive message descriptor.
  4077. * @param buffer Address of the buffer to receive data, or NULL to defer data
  4078. * retrieval and message disposal until later.
  4079. * @param timeout Waiting period for a message to be received,
  4080. * or one of the special values K_NO_WAIT and K_FOREVER.
  4081. *
  4082. * @retval 0 Message received.
  4083. * @retval -ENOMSG Returned without waiting.
  4084. * @retval -EAGAIN Waiting period timed out.
  4085. */
  4086. extern int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
  4087. void *buffer, k_timeout_t timeout);
  4088. /**
  4089. * @brief Retrieve mailbox message data into a buffer.
  4090. *
  4091. * This routine completes the processing of a received message by retrieving
  4092. * its data into a buffer, then disposing of the message.
  4093. *
  4094. * Alternatively, this routine can be used to dispose of a received message
  4095. * without retrieving its data.
  4096. *
  4097. * @param rx_msg Address of the receive message descriptor.
  4098. * @param buffer Address of the buffer to receive data, or NULL to discard
  4099. * the data.
  4100. *
  4101. * @return N/A
  4102. */
  4103. extern void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
  4104. /**
  4105. * @brief clear all message in mbox.
  4106. *
  4107. * This routine clear all message in mbox , if async message is send to many than
  4108. * max async message num ,drop all message send before.
  4109. *
  4110. * @param mbox Address of the mailbox.
  4111. *
  4112. * @retval void
  4113. */
  4114. extern void k_mbox_clear_msg(struct k_mbox *mbox);
  4115. /**
  4116. * @brief check have new message in mbox.
  4117. *
  4118. * This routine get how many msgs in mailbox
  4119. *
  4120. * @param mbox Address of the mailbox.
  4121. * @param target_thread thread id of target tread .
  4122. *
  4123. * @retval numer of new msg in mail box
  4124. */
  4125. extern int k_mbox_get_pending_msg_cnt(struct k_mbox *mbox, k_tid_t target_thread);
  4126. /** @} */
  4127. /**
  4128. * @defgroup pipe_apis Pipe APIs
  4129. * @ingroup kernel_apis
  4130. * @{
  4131. */
  4132. /** Pipe Structure */
  4133. struct k_pipe {
  4134. unsigned char *buffer; /**< Pipe buffer: may be NULL */
  4135. size_t size; /**< Buffer size */
  4136. size_t bytes_used; /**< # bytes used in buffer */
  4137. size_t read_index; /**< Where in buffer to read from */
  4138. size_t write_index; /**< Where in buffer to write */
  4139. struct k_spinlock lock; /**< Synchronization lock */
  4140. struct {
  4141. _wait_q_t readers; /**< Reader wait queue */
  4142. _wait_q_t writers; /**< Writer wait queue */
  4143. } wait_q; /** Wait queue */
  4144. uint8_t flags; /**< Flags */
  4145. };
  4146. /**
  4147. * @cond INTERNAL_HIDDEN
  4148. */
  4149. #define K_PIPE_FLAG_ALLOC BIT(0) /** Buffer was allocated */
  4150. #define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size) \
  4151. { \
  4152. .buffer = pipe_buffer, \
  4153. .size = pipe_buffer_size, \
  4154. .bytes_used = 0, \
  4155. .read_index = 0, \
  4156. .write_index = 0, \
  4157. .lock = {}, \
  4158. .wait_q = { \
  4159. .readers = Z_WAIT_Q_INIT(&obj.wait_q.readers), \
  4160. .writers = Z_WAIT_Q_INIT(&obj.wait_q.writers) \
  4161. }, \
  4162. .flags = 0 \
  4163. }
  4164. /**
  4165. * INTERNAL_HIDDEN @endcond
  4166. */
  4167. /**
  4168. * @brief Statically define and initialize a pipe.
  4169. *
  4170. * The pipe can be accessed outside the module where it is defined using:
  4171. *
  4172. * @code extern struct k_pipe <name>; @endcode
  4173. *
  4174. * @param name Name of the pipe.
  4175. * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes),
  4176. * or zero if no ring buffer is used.
  4177. * @param pipe_align Alignment of the pipe's ring buffer (power of 2).
  4178. *
  4179. */
  4180. #define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
  4181. static unsigned char __noinit __aligned(pipe_align) \
  4182. _k_pipe_buf_##name[pipe_buffer_size]; \
  4183. STRUCT_SECTION_ITERABLE(k_pipe, name) = \
  4184. Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
  4185. /**
  4186. * @brief Initialize a pipe.
  4187. *
  4188. * This routine initializes a pipe object, prior to its first use.
  4189. *
  4190. * @param pipe Address of the pipe.
  4191. * @param buffer Address of the pipe's ring buffer, or NULL if no ring buffer
  4192. * is used.
  4193. * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
  4194. * buffer is used.
  4195. *
  4196. * @return N/A
  4197. */
  4198. void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size);
  4199. /**
  4200. * @brief Release a pipe's allocated buffer
  4201. *
  4202. * If a pipe object was given a dynamically allocated buffer via
  4203. * k_pipe_alloc_init(), this will free it. This function does nothing
  4204. * if the buffer wasn't dynamically allocated.
  4205. *
  4206. * @param pipe Address of the pipe.
  4207. * @retval 0 on success
  4208. * @retval -EAGAIN nothing to cleanup
  4209. */
  4210. int k_pipe_cleanup(struct k_pipe *pipe);
  4211. /**
  4212. * @brief Initialize a pipe and allocate a buffer for it
  4213. *
  4214. * Storage for the buffer region will be allocated from the calling thread's
  4215. * resource pool. This memory will be released if k_pipe_cleanup() is called,
  4216. * or userspace is enabled and the pipe object loses all references to it.
  4217. *
  4218. * This function should only be called on uninitialized pipe objects.
  4219. *
  4220. * @param pipe Address of the pipe.
  4221. * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
  4222. * buffer is used.
  4223. * @retval 0 on success
  4224. * @retval -ENOMEM if memory couldn't be allocated
  4225. */
  4226. __syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size);
  4227. /**
  4228. * @brief Write data to a pipe.
  4229. *
  4230. * This routine writes up to @a bytes_to_write bytes of data to @a pipe.
  4231. *
  4232. * @param pipe Address of the pipe.
  4233. * @param data Address of data to write.
  4234. * @param bytes_to_write Size of data (in bytes).
  4235. * @param bytes_written Address of area to hold the number of bytes written.
  4236. * @param min_xfer Minimum number of bytes to write.
  4237. * @param timeout Waiting period to wait for the data to be written,
  4238. * or one of the special values K_NO_WAIT and K_FOREVER.
  4239. *
  4240. * @retval 0 At least @a min_xfer bytes of data were written.
  4241. * @retval -EIO Returned without waiting; zero data bytes were written.
  4242. * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
  4243. * minus one data bytes were written.
  4244. */
  4245. __syscall int k_pipe_put(struct k_pipe *pipe, void *data,
  4246. size_t bytes_to_write, size_t *bytes_written,
  4247. size_t min_xfer, k_timeout_t timeout);
  4248. /**
  4249. * @brief Read data from a pipe.
  4250. *
  4251. * This routine reads up to @a bytes_to_read bytes of data from @a pipe.
  4252. *
  4253. * @param pipe Address of the pipe.
  4254. * @param data Address to place the data read from pipe.
  4255. * @param bytes_to_read Maximum number of data bytes to read.
  4256. * @param bytes_read Address of area to hold the number of bytes read.
  4257. * @param min_xfer Minimum number of data bytes to read.
  4258. * @param timeout Waiting period to wait for the data to be read,
  4259. * or one of the special values K_NO_WAIT and K_FOREVER.
  4260. *
  4261. * @retval 0 At least @a min_xfer bytes of data were read.
  4262. * @retval -EINVAL invalid parameters supplied
  4263. * @retval -EIO Returned without waiting; zero data bytes were read.
  4264. * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
  4265. * minus one data bytes were read.
  4266. */
  4267. __syscall int k_pipe_get(struct k_pipe *pipe, void *data,
  4268. size_t bytes_to_read, size_t *bytes_read,
  4269. size_t min_xfer, k_timeout_t timeout);
  4270. /**
  4271. * @brief Query the number of bytes that may be read from @a pipe.
  4272. *
  4273. * @param pipe Address of the pipe.
  4274. *
  4275. * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
  4276. * result is zero for unbuffered pipes.
  4277. */
  4278. __syscall size_t k_pipe_read_avail(struct k_pipe *pipe);
  4279. /**
  4280. * @brief Query the number of bytes that may be written to @a pipe
  4281. *
  4282. * @param pipe Address of the pipe.
  4283. *
  4284. * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
  4285. * result is zero for unbuffered pipes.
  4286. */
  4287. __syscall size_t k_pipe_write_avail(struct k_pipe *pipe);
  4288. /** @} */
  4289. /**
  4290. * @cond INTERNAL_HIDDEN
  4291. */
  4292. struct k_mem_slab {
  4293. _wait_q_t wait_q;
  4294. struct k_spinlock lock;
  4295. uint32_t num_blocks;
  4296. size_t block_size;
  4297. char *buffer;
  4298. char *free_list;
  4299. uint32_t num_used;
  4300. #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
  4301. uint32_t max_used;
  4302. #endif
  4303. };
  4304. #define Z_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \
  4305. slab_num_blocks) \
  4306. { \
  4307. .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
  4308. .lock = {}, \
  4309. .num_blocks = slab_num_blocks, \
  4310. .block_size = slab_block_size, \
  4311. .buffer = slab_buffer, \
  4312. .free_list = NULL, \
  4313. .num_used = 0, \
  4314. }
  4315. /**
  4316. * INTERNAL_HIDDEN @endcond
  4317. */
  4318. /**
  4319. * @defgroup mem_slab_apis Memory Slab APIs
  4320. * @ingroup kernel_apis
  4321. * @{
  4322. */
  4323. /**
  4324. * @brief Statically define and initialize a memory slab.
  4325. *
  4326. * The memory slab's buffer contains @a slab_num_blocks memory blocks
  4327. * that are @a slab_block_size bytes long. The buffer is aligned to a
  4328. * @a slab_align -byte boundary. To ensure that each memory block is similarly
  4329. * aligned to this boundary, @a slab_block_size must also be a multiple of
  4330. * @a slab_align.
  4331. *
  4332. * The memory slab can be accessed outside the module where it is defined
  4333. * using:
  4334. *
  4335. * @code extern struct k_mem_slab <name>; @endcode
  4336. *
  4337. * @param name Name of the memory slab.
  4338. * @param slab_block_size Size of each memory block (in bytes).
  4339. * @param slab_num_blocks Number memory blocks.
  4340. * @param slab_align Alignment of the memory slab's buffer (power of 2).
  4341. */
  4342. #define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
  4343. char __noinit_named(k_mem_slab_buf_##name) \
  4344. __aligned(WB_UP(slab_align)) \
  4345. _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
  4346. STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
  4347. Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
  4348. WB_UP(slab_block_size), slab_num_blocks)
  4349. /**
  4350. * @brief Initialize a memory slab.
  4351. *
  4352. * Initializes a memory slab, prior to its first use.
  4353. *
  4354. * The memory slab's buffer contains @a slab_num_blocks memory blocks
  4355. * that are @a slab_block_size bytes long. The buffer must be aligned to an
  4356. * N-byte boundary matching a word boundary, where N is a power of 2
  4357. * (i.e. 4 on 32-bit systems, 8, 16, ...).
  4358. * To ensure that each memory block is similarly aligned to this boundary,
  4359. * @a slab_block_size must also be a multiple of N.
  4360. *
  4361. * @param slab Address of the memory slab.
  4362. * @param buffer Pointer to buffer used for the memory blocks.
  4363. * @param block_size Size of each memory block (in bytes).
  4364. * @param num_blocks Number of memory blocks.
  4365. *
  4366. * @retval 0 on success
  4367. * @retval -EINVAL invalid data supplied
  4368. *
  4369. */
  4370. extern int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
  4371. size_t block_size, uint32_t num_blocks);
  4372. /**
  4373. * @brief Allocate memory from a memory slab.
  4374. *
  4375. * This routine allocates a memory block from a memory slab.
  4376. *
  4377. * @note @a timeout must be set to K_NO_WAIT if called from ISR.
  4378. * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
  4379. *
  4380. * @funcprops \isr_ok
  4381. *
  4382. * @param slab Address of the memory slab.
  4383. * @param mem Pointer to block address area.
  4384. * @param timeout Non-negative waiting period to wait for operation to complete.
  4385. * Use K_NO_WAIT to return without waiting,
  4386. * or K_FOREVER to wait as long as necessary.
  4387. *
  4388. * @retval 0 Memory allocated. The block address area pointed at by @a mem
  4389. * is set to the starting address of the memory block.
  4390. * @retval -ENOMEM Returned without waiting.
  4391. * @retval -EAGAIN Waiting period timed out.
  4392. * @retval -EINVAL Invalid data supplied
  4393. */
  4394. extern int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
  4395. k_timeout_t timeout);
  4396. /**
  4397. * @brief Free memory allocated from a memory slab.
  4398. *
  4399. * This routine releases a previously allocated memory block back to its
  4400. * associated memory slab.
  4401. *
  4402. * @param slab Address of the memory slab.
  4403. * @param mem Pointer to block address area (as set by k_mem_slab_alloc()).
  4404. *
  4405. * @return N/A
  4406. */
  4407. extern void k_mem_slab_free(struct k_mem_slab *slab, void **mem);
  4408. /**
  4409. * @brief Get the number of used blocks in a memory slab.
  4410. *
  4411. * This routine gets the number of memory blocks that are currently
  4412. * allocated in @a slab.
  4413. *
  4414. * @param slab Address of the memory slab.
  4415. *
  4416. * @return Number of allocated memory blocks.
  4417. */
  4418. static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
  4419. {
  4420. return slab->num_used;
  4421. }
  4422. /**
  4423. * @brief Get the number of maximum used blocks so far in a memory slab.
  4424. *
  4425. * This routine gets the maximum number of memory blocks that were
  4426. * allocated in @a slab.
  4427. *
  4428. * @param slab Address of the memory slab.
  4429. *
  4430. * @return Maximum number of allocated memory blocks.
  4431. */
  4432. static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
  4433. {
  4434. #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
  4435. return slab->max_used;
  4436. #else
  4437. ARG_UNUSED(slab);
  4438. return 0;
  4439. #endif
  4440. }
  4441. /**
  4442. * @brief Get the number of unused blocks in a memory slab.
  4443. *
  4444. * This routine gets the number of memory blocks that are currently
  4445. * unallocated in @a slab.
  4446. *
  4447. * @param slab Address of the memory slab.
  4448. *
  4449. * @return Number of unallocated memory blocks.
  4450. */
  4451. static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
  4452. {
  4453. return slab->num_blocks - slab->num_used;
  4454. }
  4455. /** @} */
  4456. /**
  4457. * @addtogroup heap_apis
  4458. * @{
  4459. */
  4460. /* kernel synchronized heap struct */
  4461. struct k_heap {
  4462. struct sys_heap heap;
  4463. _wait_q_t wait_q;
  4464. struct k_spinlock lock;
  4465. };
  4466. /**
  4467. * @brief Initialize a k_heap
  4468. *
  4469. * This constructs a synchronized k_heap object over a memory region
  4470. * specified by the user. Note that while any alignment and size can
  4471. * be passed as valid parameters, internal alignment restrictions
  4472. * inside the inner sys_heap mean that not all bytes may be usable as
  4473. * allocated memory.
  4474. *
  4475. * @param h Heap struct to initialize
  4476. * @param mem Pointer to memory.
  4477. * @param bytes Size of memory region, in bytes
  4478. */
  4479. void k_heap_init(struct k_heap *h, void *mem, size_t bytes);
  4480. /** @brief Allocate aligned memory from a k_heap
  4481. *
  4482. * Behaves in all ways like k_heap_alloc(), except that the returned
  4483. * memory (if available) will have a starting address in memory which
  4484. * is a multiple of the specified power-of-two alignment value in
  4485. * bytes. The resulting memory can be returned to the heap using
  4486. * k_heap_free().
  4487. *
  4488. * @note @a timeout must be set to K_NO_WAIT if called from ISR.
  4489. * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
  4490. *
  4491. * @funcprops \isr_ok
  4492. *
  4493. * @param h Heap from which to allocate
  4494. * @param align Alignment in bytes, must be a power of two
  4495. * @param bytes Number of bytes requested
  4496. * @param timeout How long to wait, or K_NO_WAIT
  4497. * @return Pointer to memory the caller can now use
  4498. */
  4499. void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
  4500. k_timeout_t timeout);
  4501. /**
  4502. * @brief Allocate memory from a k_heap
  4503. *
  4504. * Allocates and returns a memory buffer from the memory region owned
  4505. * by the heap. If no memory is available immediately, the call will
  4506. * block for the specified timeout (constructed via the standard
  4507. * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
  4508. * freed. If the allocation cannot be performed by the expiration of
  4509. * the timeout, NULL will be returned.
  4510. *
  4511. * @note @a timeout must be set to K_NO_WAIT if called from ISR.
  4512. * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
  4513. *
  4514. * @funcprops \isr_ok
  4515. *
  4516. * @param h Heap from which to allocate
  4517. * @param bytes Desired size of block to allocate
  4518. * @param timeout How long to wait, or K_NO_WAIT
  4519. * @return A pointer to valid heap memory, or NULL
  4520. */
  4521. void *k_heap_alloc(struct k_heap *h, size_t bytes,
  4522. k_timeout_t timeout);
  4523. /**
  4524. * @brief Free memory allocated by k_heap_alloc()
  4525. *
  4526. * Returns the specified memory block, which must have been returned
  4527. * from k_heap_alloc(), to the heap for use by other callers. Passing
  4528. * a NULL block is legal, and has no effect.
  4529. *
  4530. * @param h Heap to which to return the memory
  4531. * @param mem A valid memory block, or NULL
  4532. */
  4533. void k_heap_free(struct k_heap *h, void *mem);
  4534. /* Hand-calculated minimum heap sizes needed to return a successful
  4535. * 1-byte allocation. See details in lib/os/heap.[ch]
  4536. */
  4537. #define Z_HEAP_MIN_SIZE (sizeof(void *) > 4 ? 56 : 44)
  4538. /**
  4539. * @brief Define a static k_heap in the specified linker section
  4540. *
  4541. * This macro defines and initializes a static memory region and
  4542. * k_heap of the requested size in the specified linker section.
  4543. * After kernel start, &name can be used as if k_heap_init() had
  4544. * been called.
  4545. *
  4546. * Note that this macro enforces a minimum size on the memory region
  4547. * to accommodate metadata requirements. Very small heaps will be
  4548. * padded to fit.
  4549. *
  4550. * @param name Symbol name for the struct k_heap object
  4551. * @param bytes Size of memory region, in bytes
  4552. * @param in_section __attribute__((section(name))
  4553. */
  4554. #define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section) \
  4555. char in_section \
  4556. __aligned(8) /* CHUNK_UNIT */ \
  4557. kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)]; \
  4558. STRUCT_SECTION_ITERABLE(k_heap, name) = { \
  4559. .heap = { \
  4560. .init_mem = kheap_##name, \
  4561. .init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
  4562. }, \
  4563. }
  4564. /**
  4565. * @brief Define a static k_heap
  4566. *
  4567. * This macro defines and initializes a static memory region and
  4568. * k_heap of the requested size. After kernel start, &name can be
  4569. * used as if k_heap_init() had been called.
  4570. *
  4571. * Note that this macro enforces a minimum size on the memory region
  4572. * to accommodate metadata requirements. Very small heaps will be
  4573. * padded to fit.
  4574. *
  4575. * @param name Symbol name for the struct k_heap object
  4576. * @param bytes Size of memory region, in bytes
  4577. */
  4578. #define K_HEAP_DEFINE(name, bytes) \
  4579. Z_HEAP_DEFINE_IN_SECT(name, bytes, \
  4580. __noinit_named(kheap_buf_##name))
  4581. /**
  4582. * @brief Define a static k_heap in uncached memory
  4583. *
  4584. * This macro defines and initializes a static memory region and
  4585. * k_heap of the requested size in uncache memory. After kernel
  4586. * start, &name can be used as if k_heap_init() had been called.
  4587. *
  4588. * Note that this macro enforces a minimum size on the memory region
  4589. * to accommodate metadata requirements. Very small heaps will be
  4590. * padded to fit.
  4591. *
  4592. * @param name Symbol name for the struct k_heap object
  4593. * @param bytes Size of memory region, in bytes
  4594. */
  4595. #define K_HEAP_DEFINE_NOCACHE(name, bytes) \
  4596. Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
  4597. /**
  4598. * @}
  4599. */
  4600. /**
  4601. * @defgroup heap_apis Heap APIs
  4602. * @ingroup kernel_apis
  4603. * @{
  4604. */
  4605. /**
  4606. * @brief Allocate memory from the heap with a specified alignment.
  4607. *
  4608. * This routine provides semantics similar to aligned_alloc(); memory is
  4609. * allocated from the heap with a specified alignment. However, one minor
  4610. * difference is that k_aligned_alloc() accepts any non-zero @p size,
  4611. * wherase aligned_alloc() only accepts a @p size that is an integral
  4612. * multiple of @p align.
  4613. *
  4614. * Above, aligned_alloc() refers to:
  4615. * C11 standard (ISO/IEC 9899:2011): 7.22.3.1
  4616. * The aligned_alloc function (p: 347-348)
  4617. *
  4618. * @param align Alignment of memory requested (in bytes).
  4619. * @param size Amount of memory requested (in bytes).
  4620. *
  4621. * @return Address of the allocated memory if successful; otherwise NULL.
  4622. */
  4623. extern void *k_aligned_alloc(size_t align, size_t size);
  4624. /**
  4625. * @brief Allocate memory from the heap.
  4626. *
  4627. * This routine provides traditional malloc() semantics. Memory is
  4628. * allocated from the heap memory pool.
  4629. *
  4630. * @param size Amount of memory requested (in bytes).
  4631. *
  4632. * @return Address of the allocated memory if successful; otherwise NULL.
  4633. */
  4634. extern void *k_malloc(size_t size);
  4635. /**
  4636. * @brief Free memory allocated from heap.
  4637. *
  4638. * This routine provides traditional free() semantics. The memory being
  4639. * returned must have been allocated from the heap memory pool or
  4640. * k_mem_pool_malloc().
  4641. *
  4642. * If @a ptr is NULL, no operation is performed.
  4643. *
  4644. * @param ptr Pointer to previously allocated memory.
  4645. *
  4646. * @return N/A
  4647. */
  4648. extern void k_free(void *ptr);
  4649. /**
  4650. * @brief Allocate memory from heap, array style
  4651. *
  4652. * This routine provides traditional calloc() semantics. Memory is
  4653. * allocated from the heap memory pool and zeroed.
  4654. *
  4655. * @param nmemb Number of elements in the requested array
  4656. * @param size Size of each array element (in bytes).
  4657. *
  4658. * @return Address of the allocated memory if successful; otherwise NULL.
  4659. */
  4660. extern void *k_calloc(size_t nmemb, size_t size);
  4661. /** @} */
  4662. /* polling API - PRIVATE */
  4663. #ifdef CONFIG_POLL
  4664. #define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
  4665. #else
  4666. #define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
  4667. #endif
  4668. /* private - types bit positions */
  4669. enum _poll_types_bits {
  4670. /* can be used to ignore an event */
  4671. _POLL_TYPE_IGNORE,
  4672. /* to be signaled by k_poll_signal_raise() */
  4673. _POLL_TYPE_SIGNAL,
  4674. /* semaphore availability */
  4675. _POLL_TYPE_SEM_AVAILABLE,
  4676. /* queue/FIFO/LIFO data availability */
  4677. _POLL_TYPE_DATA_AVAILABLE,
  4678. /* msgq data availability */
  4679. _POLL_TYPE_MSGQ_DATA_AVAILABLE,
  4680. /* mbox data availability */
  4681. _POLL_TYPE_MBOX_DATA_AVAILABLE,
  4682. _POLL_NUM_TYPES
  4683. };
  4684. #define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
  4685. /* private - states bit positions */
  4686. enum _poll_states_bits {
  4687. /* default state when creating event */
  4688. _POLL_STATE_NOT_READY,
  4689. /* signaled by k_poll_signal_raise() */
  4690. _POLL_STATE_SIGNALED,
  4691. /* semaphore is available */
  4692. _POLL_STATE_SEM_AVAILABLE,
  4693. /* data is available to read on queue/FIFO/LIFO */
  4694. _POLL_STATE_DATA_AVAILABLE,
  4695. /* queue/FIFO/LIFO wait was cancelled */
  4696. _POLL_STATE_CANCELLED,
  4697. /* data is available to read on a message queue */
  4698. _POLL_STATE_MSGQ_DATA_AVAILABLE,
  4699. /* data is available to read on mbox */
  4700. _POLL_STATE_MBOX_DATA_AVAILABLE,
  4701. _POLL_NUM_STATES
  4702. };
  4703. #define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
  4704. #define _POLL_EVENT_NUM_UNUSED_BITS \
  4705. (32 - (0 \
  4706. + 8 /* tag */ \
  4707. + _POLL_NUM_TYPES \
  4708. + _POLL_NUM_STATES \
  4709. + 1 /* modes */ \
  4710. ))
  4711. /* end of polling API - PRIVATE */
  4712. /**
  4713. * @defgroup poll_apis Async polling APIs
  4714. * @ingroup kernel_apis
  4715. * @{
  4716. */
  4717. /* Public polling API */
  4718. /* public - values for k_poll_event.type bitfield */
  4719. #define K_POLL_TYPE_IGNORE 0
  4720. #define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
  4721. #define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
  4722. #define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
  4723. #define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
  4724. #define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
  4725. #define K_POLL_TYPE_MBOX_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MBOX_DATA_AVAILABLE)
  4726. /* public - polling modes */
  4727. enum k_poll_modes {
  4728. /* polling thread does not take ownership of objects when available */
  4729. K_POLL_MODE_NOTIFY_ONLY = 0,
  4730. K_POLL_NUM_MODES
  4731. };
  4732. /* public - values for k_poll_event.state bitfield */
  4733. #define K_POLL_STATE_NOT_READY 0
  4734. #define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
  4735. #define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
  4736. #define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
  4737. #define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
  4738. #define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
  4739. #define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
  4740. #define K_POLL_STATE_MBOX_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MBOX_DATA_AVAILABLE)
  4741. /* public - poll signal object */
  4742. struct k_poll_signal {
  4743. /** PRIVATE - DO NOT TOUCH */
  4744. sys_dlist_t poll_events;
  4745. /**
  4746. * 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
  4747. * user resets it to 0.
  4748. */
  4749. unsigned int signaled;
  4750. /** custom result value passed to k_poll_signal_raise() if needed */
  4751. int result;
  4752. };
  4753. #define K_POLL_SIGNAL_INITIALIZER(obj) \
  4754. { \
  4755. .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
  4756. .signaled = 0, \
  4757. .result = 0, \
  4758. }
  4759. /**
  4760. * @brief Poll Event
  4761. *
  4762. */
  4763. struct k_poll_event {
  4764. /** PRIVATE - DO NOT TOUCH */
  4765. sys_dnode_t _node;
  4766. /** PRIVATE - DO NOT TOUCH */
  4767. struct z_poller *poller;
  4768. /** optional user-specified tag, opaque, untouched by the API */
  4769. uint32_t tag:8;
  4770. /** bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
  4771. uint32_t type:_POLL_NUM_TYPES;
  4772. /** bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
  4773. uint32_t state:_POLL_NUM_STATES;
  4774. /** mode of operation, from enum k_poll_modes */
  4775. uint32_t mode:1;
  4776. /** unused bits in 32-bit word */
  4777. uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
  4778. /** per-type data */
  4779. union {
  4780. void *obj;
  4781. struct k_poll_signal *signal;
  4782. struct k_sem *sem;
  4783. struct k_fifo *fifo;
  4784. struct k_queue *queue;
  4785. struct k_msgq *msgq;
  4786. struct k_mbox *mbox;
  4787. };
  4788. };
  4789. #define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
  4790. { \
  4791. .poller = NULL, \
  4792. .type = _event_type, \
  4793. .state = K_POLL_STATE_NOT_READY, \
  4794. .mode = _event_mode, \
  4795. .unused = 0, \
  4796. { \
  4797. .obj = _event_obj, \
  4798. }, \
  4799. }
  4800. #define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
  4801. event_tag) \
  4802. { \
  4803. .tag = event_tag, \
  4804. .type = _event_type, \
  4805. .state = K_POLL_STATE_NOT_READY, \
  4806. .mode = _event_mode, \
  4807. .unused = 0, \
  4808. { \
  4809. .obj = _event_obj, \
  4810. }, \
  4811. }
  4812. /**
  4813. * @brief Initialize one struct k_poll_event instance
  4814. *
  4815. * After this routine is called on a poll event, the event it ready to be
  4816. * placed in an event array to be passed to k_poll().
  4817. *
  4818. * @param event The event to initialize.
  4819. * @param type A bitfield of the types of event, from the K_POLL_TYPE_xxx
  4820. * values. Only values that apply to the same object being polled
  4821. * can be used together. Choosing K_POLL_TYPE_IGNORE disables the
  4822. * event.
  4823. * @param mode Future. Use K_POLL_MODE_NOTIFY_ONLY.
  4824. * @param obj Kernel object or poll signal.
  4825. *
  4826. * @return N/A
  4827. */
  4828. extern void k_poll_event_init(struct k_poll_event *event, uint32_t type,
  4829. int mode, void *obj);
  4830. /**
  4831. * @brief Wait for one or many of multiple poll events to occur
  4832. *
  4833. * This routine allows a thread to wait concurrently for one or many of
  4834. * multiple poll events to have occurred. Such events can be a kernel object
  4835. * being available, like a semaphore, or a poll signal event.
  4836. *
  4837. * When an event notifies that a kernel object is available, the kernel object
  4838. * is not "given" to the thread calling k_poll(): it merely signals the fact
  4839. * that the object was available when the k_poll() call was in effect. Also,
  4840. * all threads trying to acquire an object the regular way, i.e. by pending on
  4841. * the object, have precedence over the thread polling on the object. This
  4842. * means that the polling thread will never get the poll event on an object
  4843. * until the object becomes available and its pend queue is empty. For this
  4844. * reason, the k_poll() call is more effective when the objects being polled
  4845. * only have one thread, the polling thread, trying to acquire them.
  4846. *
  4847. * When k_poll() returns 0, the caller should loop on all the events that were
  4848. * passed to k_poll() and check the state field for the values that were
  4849. * expected and take the associated actions.
  4850. *
  4851. * Before being reused for another call to k_poll(), the user has to reset the
  4852. * state field to K_POLL_STATE_NOT_READY.
  4853. *
  4854. * When called from user mode, a temporary memory allocation is required from
  4855. * the caller's resource pool.
  4856. *
  4857. * @param events An array of events to be polled for.
  4858. * @param num_events The number of events in the array.
  4859. * @param timeout Waiting period for an event to be ready,
  4860. * or one of the special values K_NO_WAIT and K_FOREVER.
  4861. *
  4862. * @retval 0 One or more events are ready.
  4863. * @retval -EAGAIN Waiting period timed out.
  4864. * @retval -EINTR Polling has been interrupted, e.g. with
  4865. * k_queue_cancel_wait(). All output events are still set and valid,
  4866. * cancelled event(s) will be set to K_POLL_STATE_CANCELLED. In other
  4867. * words, -EINTR status means that at least one of output events is
  4868. * K_POLL_STATE_CANCELLED.
  4869. * @retval -ENOMEM Thread resource pool insufficient memory (user mode only)
  4870. * @retval -EINVAL Bad parameters (user mode only)
  4871. */
  4872. __syscall int k_poll(struct k_poll_event *events, int num_events,
  4873. k_timeout_t timeout);
  4874. /**
  4875. * @brief Initialize a poll signal object.
  4876. *
  4877. * Ready a poll signal object to be signaled via k_poll_signal_raise().
  4878. *
  4879. * @param sig A poll signal.
  4880. *
  4881. * @return N/A
  4882. */
  4883. __syscall void k_poll_signal_init(struct k_poll_signal *sig);
  4884. /*
  4885. * @brief Reset a poll signal object's state to unsignaled.
  4886. *
  4887. * @param sig A poll signal object
  4888. */
  4889. __syscall void k_poll_signal_reset(struct k_poll_signal *sig);
  4890. /**
  4891. * @brief Fetch the signaled state and result value of a poll signal
  4892. *
  4893. * @param sig A poll signal object
  4894. * @param signaled An integer buffer which will be written nonzero if the
  4895. * object was signaled
  4896. * @param result An integer destination buffer which will be written with the
  4897. * result value if the object was signaled, or an undefined
  4898. * value if it was not.
  4899. */
  4900. __syscall void k_poll_signal_check(struct k_poll_signal *sig,
  4901. unsigned int *signaled, int *result);
  4902. /**
  4903. * @brief Signal a poll signal object.
  4904. *
  4905. * This routine makes ready a poll signal, which is basically a poll event of
  4906. * type K_POLL_TYPE_SIGNAL. If a thread was polling on that event, it will be
  4907. * made ready to run. A @a result value can be specified.
  4908. *
  4909. * The poll signal contains a 'signaled' field that, when set by
  4910. * k_poll_signal_raise(), stays set until the user sets it back to 0 with
  4911. * k_poll_signal_reset(). It thus has to be reset by the user before being
  4912. * passed again to k_poll() or k_poll() will consider it being signaled, and
  4913. * will return immediately.
  4914. *
  4915. * @note The result is stored and the 'signaled' field is set even if
  4916. * this function returns an error indicating that an expiring poll was
  4917. * not notified. The next k_poll() will detect the missed raise.
  4918. *
  4919. * @param sig A poll signal.
  4920. * @param result The value to store in the result field of the signal.
  4921. *
  4922. * @retval 0 The signal was delivered successfully.
  4923. * @retval -EAGAIN The polling thread's timeout is in the process of expiring.
  4924. */
  4925. __syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
  4926. /**
  4927. * @internal
  4928. */
  4929. extern void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state);
  4930. extern void z_handle_obj_poll_thread_events(sys_dlist_t *events, uint32_t state, k_tid_t tid);
  4931. /** @} */
  4932. /**
  4933. * @defgroup cpu_idle_apis CPU Idling APIs
  4934. * @ingroup kernel_apis
  4935. * @{
  4936. */
  4937. /**
  4938. * @brief Make the CPU idle.
  4939. *
  4940. * This function makes the CPU idle until an event wakes it up.
  4941. *
  4942. * In a regular system, the idle thread should be the only thread responsible
  4943. * for making the CPU idle and triggering any type of power management.
  4944. * However, in some more constrained systems, such as a single-threaded system,
  4945. * the only thread would be responsible for this if needed.
  4946. *
  4947. * @note In some architectures, before returning, the function unmasks interrupts
  4948. * unconditionally.
  4949. *
  4950. * @return N/A
  4951. */
  4952. static inline void k_cpu_idle(void)
  4953. {
  4954. arch_cpu_idle();
  4955. }
  4956. /**
  4957. * @brief Make the CPU idle in an atomic fashion.
  4958. *
  4959. * Similar to k_cpu_idle(), but must be called with interrupts locked.
  4960. *
  4961. * Enabling interrupts and entering a low-power mode will be atomic,
  4962. * i.e. there will be no period of time where interrupts are enabled before
  4963. * the processor enters a low-power mode.
  4964. *
  4965. * After waking up from the low-power mode, the interrupt lockout state will
  4966. * be restored as if by irq_unlock(key).
  4967. *
  4968. * @param key Interrupt locking key obtained from irq_lock().
  4969. *
  4970. * @return N/A
  4971. */
  4972. static inline void k_cpu_atomic_idle(unsigned int key)
  4973. {
  4974. arch_cpu_atomic_idle(key);
  4975. }
  4976. /**
  4977. * @}
  4978. */
  4979. /**
  4980. * @internal
  4981. */
  4982. #ifdef ARCH_EXCEPT
  4983. /* This architecture has direct support for triggering a CPU exception */
  4984. #define z_except_reason(reason) ARCH_EXCEPT(reason)
  4985. #else
  4986. #if !defined(CONFIG_ASSERT_NO_FILE_INFO)
  4987. #define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
  4988. #else
  4989. #define __EXCEPT_LOC()
  4990. #endif
  4991. /* NOTE: This is the implementation for arches that do not implement
  4992. * ARCH_EXCEPT() to generate a real CPU exception.
  4993. *
  4994. * We won't have a real exception frame to determine the PC value when
  4995. * the oops occurred, so print file and line number before we jump into
  4996. * the fatal error handler.
  4997. */
  4998. #define z_except_reason(reason) do { \
  4999. __EXCEPT_LOC(); \
  5000. z_fatal_error(reason, NULL); \
  5001. } while (false)
  5002. #endif /* _ARCH__EXCEPT */
  5003. /**
  5004. * @brief Fatally terminate a thread
  5005. *
  5006. * This should be called when a thread has encountered an unrecoverable
  5007. * runtime condition and needs to terminate. What this ultimately
  5008. * means is determined by the _fatal_error_handler() implementation, which
  5009. * will be called will reason code K_ERR_KERNEL_OOPS.
  5010. *
  5011. * If this is called from ISR context, the default system fatal error handler
  5012. * will treat it as an unrecoverable system error, just like k_panic().
  5013. */
  5014. #define k_oops() z_except_reason(K_ERR_KERNEL_OOPS)
  5015. /**
  5016. * @brief Fatally terminate the system
  5017. *
  5018. * This should be called when the Zephyr kernel has encountered an
  5019. * unrecoverable runtime condition and needs to terminate. What this ultimately
  5020. * means is determined by the _fatal_error_handler() implementation, which
  5021. * will be called will reason code K_ERR_KERNEL_PANIC.
  5022. */
  5023. #define k_panic() z_except_reason(K_ERR_KERNEL_PANIC)
  5024. /*
  5025. * private APIs that are utilized by one or more public APIs
  5026. */
  5027. /**
  5028. * @internal
  5029. */
  5030. extern void z_init_thread_base(struct _thread_base *thread_base,
  5031. int priority, uint32_t initial_state,
  5032. unsigned int options);
  5033. #ifdef CONFIG_MULTITHREADING
  5034. /**
  5035. * @internal
  5036. */
  5037. extern void z_init_static_threads(void);
  5038. #else
  5039. /**
  5040. * @internal
  5041. */
  5042. #define z_init_static_threads() do { } while (false)
  5043. #endif
  5044. /**
  5045. * @internal
  5046. */
  5047. extern bool z_is_thread_essential(void);
  5048. #ifdef CONFIG_SMP
  5049. void z_smp_thread_init(void *arg, struct k_thread *thread);
  5050. void z_smp_thread_swap(void);
  5051. #endif
  5052. /**
  5053. * @internal
  5054. */
  5055. extern void z_timer_expiration_handler(struct _timeout *t);
  5056. #ifdef CONFIG_PRINTK
  5057. /**
  5058. * @brief Emit a character buffer to the console device
  5059. *
  5060. * @param c String of characters to print
  5061. * @param n The length of the string
  5062. *
  5063. */
  5064. __syscall void k_str_out(char *c, size_t n);
  5065. #endif
  5066. /**
  5067. * @brief Disable preservation of floating point context information.
  5068. *
  5069. * This routine informs the kernel that the specified thread
  5070. * will no longer be using the floating point registers.
  5071. *
  5072. * @warning
  5073. * Some architectures apply restrictions on how the disabling of floating
  5074. * point preservation may be requested, see arch_float_disable.
  5075. *
  5076. * @warning
  5077. * This routine should only be used to disable floating point support for
  5078. * a thread that currently has such support enabled.
  5079. *
  5080. * @param thread ID of thread.
  5081. *
  5082. * @retval 0 On success.
  5083. * @retval -ENOTSUP If the floating point disabling is not implemented.
  5084. * -EINVAL If the floating point disabling could not be performed.
  5085. */
  5086. __syscall int k_float_disable(struct k_thread *thread);
  5087. /**
  5088. * @brief Enable preservation of floating point context information.
  5089. *
  5090. * This routine informs the kernel that the specified thread
  5091. * will use the floating point registers.
  5092. * Invoking this routine initializes the thread's floating point context info
  5093. * to that of an FPU that has been reset. The next time the thread is scheduled
  5094. * by z_swap() it will either inherit an FPU that is guaranteed to be in a
  5095. * "sane" state (if the most recent user of the FPU was cooperatively swapped
  5096. * out) or the thread's own floating point context will be loaded (if the most
  5097. * recent user of the FPU was preempted, or if this thread is the first user
  5098. * of the FPU). Thereafter, the kernel will protect the thread's FP context
  5099. * so that it is not altered during a preemptive context switch.
  5100. *
  5101. * The @a options parameter indicates which floating point register sets will
  5102. * be used by the specified thread.
  5103. *
  5104. * For x86 options:
  5105. *
  5106. * - K_FP_REGS indicates x87 FPU and MMX registers only
  5107. * - K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
  5108. *
  5109. * @warning
  5110. * Some architectures apply restrictions on how the enabling of floating
  5111. * point preservation may be requested, see arch_float_enable.
  5112. *
  5113. * @warning
  5114. * This routine should only be used to enable floating point support for
  5115. * a thread that currently has such support enabled.
  5116. *
  5117. * @param thread ID of thread.
  5118. * @param options architecture dependent options
  5119. *
  5120. * @retval 0 On success.
  5121. * @retval -ENOTSUP If the floating point enabling is not implemented.
  5122. * -EINVAL If the floating point enabling could not be performed.
  5123. */
  5124. __syscall int k_float_enable(struct k_thread *thread, unsigned int options);
  5125. #ifdef CONFIG_THREAD_RUNTIME_STATS
  5126. /**
  5127. * @brief Get the runtime statistics of a thread
  5128. *
  5129. * @param thread ID of thread.
  5130. * @param stats Pointer to struct to copy statistics into.
  5131. * @return -EINVAL if null pointers, otherwise 0
  5132. */
  5133. int k_thread_runtime_stats_get(k_tid_t thread,
  5134. k_thread_runtime_stats_t *stats);
  5135. /**
  5136. * @brief Get the runtime statistics of all threads
  5137. *
  5138. * @param stats Pointer to struct to copy statistics into.
  5139. * @return -EINVAL if null pointers, otherwise 0
  5140. */
  5141. int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats);
  5142. #endif
  5143. #ifdef __cplusplus
  5144. }
  5145. #endif
  5146. #include <tracing/tracing.h>
  5147. #include <syscalls/kernel.h>
  5148. #endif /* !_ASMLANGUAGE */
  5149. #endif /* ZEPHYR_INCLUDE_KERNEL_H_ */