twisterlib.py 156 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206
  1. #!/usr/bin/env python3
  2. # vim: set syntax=python ts=4 :
  3. #
  4. # Copyright (c) 2018 Intel Corporation
  5. # SPDX-License-Identifier: Apache-2.0
  6. import os
  7. import contextlib
  8. import string
  9. import mmap
  10. import sys
  11. import re
  12. import subprocess
  13. import select
  14. import shutil
  15. import shlex
  16. import signal
  17. import threading
  18. import concurrent.futures
  19. from collections import OrderedDict
  20. import queue
  21. import time
  22. import csv
  23. import glob
  24. import concurrent
  25. import xml.etree.ElementTree as ET
  26. import logging
  27. import pty
  28. from pathlib import Path
  29. from distutils.spawn import find_executable
  30. from colorama import Fore
  31. import pickle
  32. import platform
  33. import yaml
  34. import json
  35. from multiprocessing import Lock, Process, Value
  36. try:
  37. # Use the C LibYAML parser if available, rather than the Python parser.
  38. # It's much faster.
  39. from yaml import CSafeLoader as SafeLoader
  40. from yaml import CDumper as Dumper
  41. except ImportError:
  42. from yaml import SafeLoader, Dumper
  43. try:
  44. import serial
  45. except ImportError:
  46. print("Install pyserial python module with pip to use --device-testing option.")
  47. try:
  48. from tabulate import tabulate
  49. except ImportError:
  50. print("Install tabulate python module with pip to use --device-testing option.")
  51. try:
  52. import psutil
  53. except ImportError:
  54. print("Install psutil python module with pip to run in Qemu.")
  55. ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
  56. if not ZEPHYR_BASE:
  57. sys.exit("$ZEPHYR_BASE environment variable undefined")
  58. # This is needed to load edt.pickle files.
  59. sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts",
  60. "python-devicetree", "src"))
  61. from devicetree import edtlib # pylint: disable=unused-import
  62. # Use this for internal comparisons; that's what canonicalization is
  63. # for. Don't use it when invoking other components of the build system
  64. # to avoid confusing and hard to trace inconsistencies in error messages
  65. # and logs, generated Makefiles, etc. compared to when users invoke these
  66. # components directly.
  67. # Note "normalization" is different from canonicalization, see os.path.
  68. canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
  69. sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
  70. import scl
  71. import expr_parser
  72. logger = logging.getLogger('twister')
  73. logger.setLevel(logging.DEBUG)
  74. class ExecutionCounter(object):
  75. def __init__(self, total=0):
  76. self._done = Value('i', 0)
  77. self._passed = Value('i', 0)
  78. self._skipped_configs = Value('i', 0)
  79. self._skipped_runtime = Value('i', 0)
  80. self._skipped_cases = Value('i', 0)
  81. self._error = Value('i', 0)
  82. self._failed = Value('i', 0)
  83. self._total = Value('i', total)
  84. self._cases = Value('i', 0)
  85. self.lock = Lock()
  86. @property
  87. def cases(self):
  88. with self._cases.get_lock():
  89. return self._cases.value
  90. @cases.setter
  91. def cases(self, value):
  92. with self._cases.get_lock():
  93. self._cases.value = value
  94. @property
  95. def skipped_cases(self):
  96. with self._skipped_cases.get_lock():
  97. return self._skipped_cases.value
  98. @skipped_cases.setter
  99. def skipped_cases(self, value):
  100. with self._skipped_cases.get_lock():
  101. self._skipped_cases.value = value
  102. @property
  103. def error(self):
  104. with self._error.get_lock():
  105. return self._error.value
  106. @error.setter
  107. def error(self, value):
  108. with self._error.get_lock():
  109. self._error.value = value
  110. @property
  111. def done(self):
  112. with self._done.get_lock():
  113. return self._done.value
  114. @done.setter
  115. def done(self, value):
  116. with self._done.get_lock():
  117. self._done.value = value
  118. @property
  119. def passed(self):
  120. with self._passed.get_lock():
  121. return self._passed.value
  122. @passed.setter
  123. def passed(self, value):
  124. with self._passed.get_lock():
  125. self._passed.value = value
  126. @property
  127. def skipped_configs(self):
  128. with self._skipped_configs.get_lock():
  129. return self._skipped_configs.value
  130. @skipped_configs.setter
  131. def skipped_configs(self, value):
  132. with self._skipped_configs.get_lock():
  133. self._skipped_configs.value = value
  134. @property
  135. def skipped_runtime(self):
  136. with self._skipped_runtime.get_lock():
  137. return self._skipped_runtime.value
  138. @skipped_runtime.setter
  139. def skipped_runtime(self, value):
  140. with self._skipped_runtime.get_lock():
  141. self._skipped_runtime.value = value
  142. @property
  143. def failed(self):
  144. with self._failed.get_lock():
  145. return self._failed.value
  146. @failed.setter
  147. def failed(self, value):
  148. with self._failed.get_lock():
  149. self._failed.value = value
  150. @property
  151. def total(self):
  152. with self._total.get_lock():
  153. return self._total.value
  154. class CMakeCacheEntry:
  155. '''Represents a CMake cache entry.
  156. This class understands the type system in a CMakeCache.txt, and
  157. converts the following cache types to Python types:
  158. Cache Type Python type
  159. ---------- -------------------------------------------
  160. FILEPATH str
  161. PATH str
  162. STRING str OR list of str (if ';' is in the value)
  163. BOOL bool
  164. INTERNAL str OR list of str (if ';' is in the value)
  165. ---------- -------------------------------------------
  166. '''
  167. # Regular expression for a cache entry.
  168. #
  169. # CMake variable names can include escape characters, allowing a
  170. # wider set of names than is easy to match with a regular
  171. # expression. To be permissive here, use a non-greedy match up to
  172. # the first colon (':'). This breaks if the variable name has a
  173. # colon inside, but it's good enough.
  174. CACHE_ENTRY = re.compile(
  175. r'''(?P<name>.*?) # name
  176. :(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
  177. =(?P<value>.*) # value
  178. ''', re.X)
  179. @classmethod
  180. def _to_bool(cls, val):
  181. # Convert a CMake BOOL string into a Python bool.
  182. #
  183. # "True if the constant is 1, ON, YES, TRUE, Y, or a
  184. # non-zero number. False if the constant is 0, OFF, NO,
  185. # FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
  186. # the suffix -NOTFOUND. Named boolean constants are
  187. # case-insensitive. If the argument is not one of these
  188. # constants, it is treated as a variable."
  189. #
  190. # https://cmake.org/cmake/help/v3.0/command/if.html
  191. val = val.upper()
  192. if val in ('ON', 'YES', 'TRUE', 'Y'):
  193. return 1
  194. elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
  195. return 0
  196. elif val.endswith('-NOTFOUND'):
  197. return 0
  198. else:
  199. try:
  200. v = int(val)
  201. return v != 0
  202. except ValueError as exc:
  203. raise ValueError('invalid bool {}'.format(val)) from exc
  204. @classmethod
  205. def from_line(cls, line, line_no):
  206. # Comments can only occur at the beginning of a line.
  207. # (The value of an entry could contain a comment character).
  208. if line.startswith('//') or line.startswith('#'):
  209. return None
  210. # Whitespace-only lines do not contain cache entries.
  211. if not line.strip():
  212. return None
  213. m = cls.CACHE_ENTRY.match(line)
  214. if not m:
  215. return None
  216. name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
  217. if type_ == 'BOOL':
  218. try:
  219. value = cls._to_bool(value)
  220. except ValueError as exc:
  221. args = exc.args + ('on line {}: {}'.format(line_no, line),)
  222. raise ValueError(args) from exc
  223. elif type_ in ['STRING', 'INTERNAL']:
  224. # If the value is a CMake list (i.e. is a string which
  225. # contains a ';'), convert to a Python list.
  226. if ';' in value:
  227. value = value.split(';')
  228. return CMakeCacheEntry(name, value)
  229. def __init__(self, name, value):
  230. self.name = name
  231. self.value = value
  232. def __str__(self):
  233. fmt = 'CMakeCacheEntry(name={}, value={})'
  234. return fmt.format(self.name, self.value)
  235. class CMakeCache:
  236. '''Parses and represents a CMake cache file.'''
  237. @staticmethod
  238. def from_file(cache_file):
  239. return CMakeCache(cache_file)
  240. def __init__(self, cache_file):
  241. self.cache_file = cache_file
  242. self.load(cache_file)
  243. def load(self, cache_file):
  244. entries = []
  245. with open(cache_file, 'r') as cache:
  246. for line_no, line in enumerate(cache):
  247. entry = CMakeCacheEntry.from_line(line, line_no)
  248. if entry:
  249. entries.append(entry)
  250. self._entries = OrderedDict((e.name, e) for e in entries)
  251. def get(self, name, default=None):
  252. entry = self._entries.get(name)
  253. if entry is not None:
  254. return entry.value
  255. else:
  256. return default
  257. def get_list(self, name, default=None):
  258. if default is None:
  259. default = []
  260. entry = self._entries.get(name)
  261. if entry is not None:
  262. value = entry.value
  263. if isinstance(value, list):
  264. return value
  265. elif isinstance(value, str):
  266. return [value] if value else []
  267. else:
  268. msg = 'invalid value {} type {}'
  269. raise RuntimeError(msg.format(value, type(value)))
  270. else:
  271. return default
  272. def __contains__(self, name):
  273. return name in self._entries
  274. def __getitem__(self, name):
  275. return self._entries[name].value
  276. def __setitem__(self, name, entry):
  277. if not isinstance(entry, CMakeCacheEntry):
  278. msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
  279. raise TypeError(msg.format(type(entry), entry))
  280. self._entries[name] = entry
  281. def __delitem__(self, name):
  282. del self._entries[name]
  283. def __iter__(self):
  284. return iter(self._entries.values())
  285. class TwisterException(Exception):
  286. pass
  287. class TwisterRuntimeError(TwisterException):
  288. pass
  289. class ConfigurationError(TwisterException):
  290. def __init__(self, cfile, message):
  291. TwisterException.__init__(self, cfile + ": " + message)
  292. class BuildError(TwisterException):
  293. pass
  294. class ExecutionError(TwisterException):
  295. pass
  296. class HarnessImporter:
  297. def __init__(self, name):
  298. sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
  299. module = __import__("harness")
  300. if name:
  301. my_class = getattr(module, name)
  302. else:
  303. my_class = getattr(module, "Test")
  304. self.instance = my_class()
  305. class Handler:
  306. def __init__(self, instance, type_str="build"):
  307. """Constructor
  308. """
  309. self.state = "waiting"
  310. self.run = False
  311. self.duration = 0
  312. self.type_str = type_str
  313. self.binary = None
  314. self.pid_fn = None
  315. self.call_make_run = False
  316. self.name = instance.name
  317. self.instance = instance
  318. self.timeout = instance.testcase.timeout
  319. self.sourcedir = instance.testcase.source_dir
  320. self.build_dir = instance.build_dir
  321. self.log = os.path.join(self.build_dir, "handler.log")
  322. self.returncode = 0
  323. self.set_state("running", self.duration)
  324. self.generator = None
  325. self.generator_cmd = None
  326. self.args = []
  327. self.terminated = False
  328. def set_state(self, state, duration):
  329. self.state = state
  330. self.duration = duration
  331. def get_state(self):
  332. ret = (self.state, self.duration)
  333. return ret
  334. def record(self, harness):
  335. if harness.recording:
  336. filename = os.path.join(self.build_dir, "recording.csv")
  337. with open(filename, "at") as csvfile:
  338. cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
  339. cw.writerow(harness.fieldnames)
  340. for instance in harness.recording:
  341. cw.writerow(instance)
  342. def terminate(self, proc):
  343. # encapsulate terminate functionality so we do it consistently where ever
  344. # we might want to terminate the proc. We need try_kill_process_by_pid
  345. # because of both how newer ninja (1.6.0 or greater) and .NET / renode
  346. # work. Newer ninja's don't seem to pass SIGTERM down to the children
  347. # so we need to use try_kill_process_by_pid.
  348. for child in psutil.Process(proc.pid).children(recursive=True):
  349. try:
  350. os.kill(child.pid, signal.SIGTERM)
  351. except ProcessLookupError:
  352. pass
  353. proc.terminate()
  354. # sleep for a while before attempting to kill
  355. time.sleep(0.5)
  356. proc.kill()
  357. self.terminated = True
  358. def add_missing_testscases(self, harness):
  359. """
  360. If testsuite was broken by some error (e.g. timeout) it is necessary to
  361. add information about next testcases, which were not be
  362. performed due to this error.
  363. """
  364. for c in self.instance.testcase.cases:
  365. if c not in harness.tests:
  366. harness.tests[c] = "BLOCK"
  367. class BinaryHandler(Handler):
  368. def __init__(self, instance, type_str):
  369. """Constructor
  370. @param instance Test Instance
  371. """
  372. super().__init__(instance, type_str)
  373. self.call_west_flash = False
  374. # Tool options
  375. self.valgrind = False
  376. self.lsan = False
  377. self.asan = False
  378. self.ubsan = False
  379. self.coverage = False
  380. def try_kill_process_by_pid(self):
  381. if self.pid_fn:
  382. pid = int(open(self.pid_fn).read())
  383. os.unlink(self.pid_fn)
  384. self.pid_fn = None # clear so we don't try to kill the binary twice
  385. try:
  386. os.kill(pid, signal.SIGTERM)
  387. except ProcessLookupError:
  388. pass
  389. def _output_reader(self, proc):
  390. self.line = proc.stdout.readline()
  391. def _output_handler(self, proc, harness):
  392. if harness.is_pytest:
  393. harness.handle(None)
  394. return
  395. log_out_fp = open(self.log, "wt")
  396. timeout_extended = False
  397. timeout_time = time.time() + self.timeout
  398. while True:
  399. this_timeout = timeout_time - time.time()
  400. if this_timeout < 0:
  401. break
  402. reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True)
  403. reader_t.start()
  404. reader_t.join(this_timeout)
  405. if not reader_t.is_alive():
  406. line = self.line
  407. logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
  408. log_out_fp.write(line.decode('utf-8'))
  409. log_out_fp.flush()
  410. harness.handle(line.decode('utf-8').rstrip())
  411. if harness.state:
  412. if not timeout_extended or harness.capture_coverage:
  413. timeout_extended = True
  414. if harness.capture_coverage:
  415. timeout_time = time.time() + 30
  416. else:
  417. timeout_time = time.time() + 2
  418. else:
  419. reader_t.join(0)
  420. break
  421. try:
  422. # POSIX arch based ztests end on their own,
  423. # so let's give it up to 100ms to do so
  424. proc.wait(0.1)
  425. except subprocess.TimeoutExpired:
  426. self.terminate(proc)
  427. log_out_fp.close()
  428. def handle(self):
  429. harness_name = self.instance.testcase.harness.capitalize()
  430. harness_import = HarnessImporter(harness_name)
  431. harness = harness_import.instance
  432. harness.configure(self.instance)
  433. if self.call_make_run:
  434. command = [self.generator_cmd, "run"]
  435. elif self.call_west_flash:
  436. command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
  437. else:
  438. command = [self.binary]
  439. run_valgrind = False
  440. if self.valgrind and shutil.which("valgrind"):
  441. command = ["valgrind", "--error-exitcode=2",
  442. "--leak-check=full",
  443. "--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
  444. "--log-file=" + self.build_dir + "/valgrind.log"
  445. ] + command
  446. run_valgrind = True
  447. logger.debug("Spawning process: " +
  448. " ".join(shlex.quote(word) for word in command) + os.linesep +
  449. "in directory: " + self.build_dir)
  450. start_time = time.time()
  451. env = os.environ.copy()
  452. if self.asan:
  453. env["ASAN_OPTIONS"] = "log_path=stdout:" + \
  454. env.get("ASAN_OPTIONS", "")
  455. if not self.lsan:
  456. env["ASAN_OPTIONS"] += "detect_leaks=0"
  457. if self.ubsan:
  458. env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
  459. env.get("UBSAN_OPTIONS", "")
  460. with subprocess.Popen(command, stdout=subprocess.PIPE,
  461. stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
  462. logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
  463. t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True)
  464. t.start()
  465. t.join()
  466. if t.is_alive():
  467. self.terminate(proc)
  468. t.join()
  469. proc.wait()
  470. self.returncode = proc.returncode
  471. self.try_kill_process_by_pid()
  472. handler_time = time.time() - start_time
  473. if self.coverage:
  474. subprocess.call(["GCOV_PREFIX=" + self.build_dir,
  475. "gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
  476. # FIXME: This is needed when killing the simulator, the console is
  477. # garbled and needs to be reset. Did not find a better way to do that.
  478. if sys.stdout.isatty():
  479. subprocess.call(["stty", "sane"])
  480. if harness.is_pytest:
  481. harness.pytest_run(self.log)
  482. self.instance.results = harness.tests
  483. if not self.terminated and self.returncode != 0:
  484. # When a process is killed, the default handler returns 128 + SIGTERM
  485. # so in that case the return code itself is not meaningful
  486. self.set_state("failed", handler_time)
  487. self.instance.reason = "Failed"
  488. elif run_valgrind and self.returncode == 2:
  489. self.set_state("failed", handler_time)
  490. self.instance.reason = "Valgrind error"
  491. elif harness.state:
  492. self.set_state(harness.state, handler_time)
  493. if harness.state == "failed":
  494. self.instance.reason = "Failed"
  495. else:
  496. self.set_state("timeout", handler_time)
  497. self.instance.reason = "Timeout"
  498. self.add_missing_testscases(harness)
  499. self.record(harness)
  500. class DeviceHandler(Handler):
  501. def __init__(self, instance, type_str):
  502. """Constructor
  503. @param instance Test Instance
  504. """
  505. super().__init__(instance, type_str)
  506. self.suite = None
  507. def monitor_serial(self, ser, halt_fileno, harness):
  508. if harness.is_pytest:
  509. harness.handle(None)
  510. return
  511. log_out_fp = open(self.log, "wt")
  512. ser_fileno = ser.fileno()
  513. readlist = [halt_fileno, ser_fileno]
  514. if self.coverage:
  515. # Set capture_coverage to True to indicate that right after
  516. # test results we should get coverage data, otherwise we exit
  517. # from the test.
  518. harness.capture_coverage = True
  519. ser.flush()
  520. while ser.isOpen():
  521. readable, _, _ = select.select(readlist, [], [], self.timeout)
  522. if halt_fileno in readable:
  523. logger.debug('halted')
  524. ser.close()
  525. break
  526. if ser_fileno not in readable:
  527. continue # Timeout.
  528. serial_line = None
  529. try:
  530. serial_line = ser.readline()
  531. except TypeError:
  532. pass
  533. except serial.SerialException:
  534. ser.close()
  535. break
  536. # Just because ser_fileno has data doesn't mean an entire line
  537. # is available yet.
  538. if serial_line:
  539. sl = serial_line.decode('utf-8', 'ignore').lstrip()
  540. logger.debug("DEVICE: {0}".format(sl.rstrip()))
  541. log_out_fp.write(sl)
  542. log_out_fp.flush()
  543. harness.handle(sl.rstrip())
  544. if harness.state:
  545. if not harness.capture_coverage:
  546. ser.close()
  547. break
  548. log_out_fp.close()
  549. def device_is_available(self, instance):
  550. device = instance.platform.name
  551. fixture = instance.testcase.harness_config.get("fixture")
  552. for d in self.suite.duts:
  553. if fixture and fixture not in d.fixtures:
  554. continue
  555. if d.platform != device or not (d.serial or d.serial_pty):
  556. continue
  557. d.lock.acquire()
  558. avail = False
  559. if d.available:
  560. d.available = 0
  561. d.counter += 1
  562. avail = True
  563. d.lock.release()
  564. if avail:
  565. return d
  566. return None
  567. def make_device_available(self, serial):
  568. for d in self.suite.duts:
  569. if d.serial == serial or d.serial_pty:
  570. d.available = 1
  571. @staticmethod
  572. def run_custom_script(script, timeout):
  573. with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
  574. try:
  575. stdout, _ = proc.communicate(timeout=timeout)
  576. logger.debug(stdout.decode())
  577. except subprocess.TimeoutExpired:
  578. proc.kill()
  579. proc.communicate()
  580. logger.error("{} timed out".format(script))
  581. def handle(self):
  582. out_state = "failed"
  583. runner = None
  584. hardware = self.device_is_available(self.instance)
  585. while not hardware:
  586. logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
  587. time.sleep(1)
  588. hardware = self.device_is_available(self.instance)
  589. runner = hardware.runner or self.suite.west_runner
  590. serial_pty = hardware.serial_pty
  591. ser_pty_process = None
  592. if serial_pty:
  593. master, slave = pty.openpty()
  594. try:
  595. ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
  596. except subprocess.CalledProcessError as error:
  597. logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
  598. return
  599. serial_device = os.ttyname(slave)
  600. else:
  601. serial_device = hardware.serial
  602. logger.debug("Using serial device {}".format(serial_device))
  603. if (self.suite.west_flash is not None) or runner:
  604. command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
  605. command_extra_args = []
  606. # There are three ways this option is used.
  607. # 1) bare: --west-flash
  608. # This results in options.west_flash == []
  609. # 2) with a value: --west-flash="--board-id=42"
  610. # This results in options.west_flash == "--board-id=42"
  611. # 3) Multiple values: --west-flash="--board-id=42,--erase"
  612. # This results in options.west_flash == "--board-id=42 --erase"
  613. if self.suite.west_flash and self.suite.west_flash != []:
  614. command_extra_args.extend(self.suite.west_flash.split(','))
  615. if runner:
  616. command.append("--runner")
  617. command.append(runner)
  618. board_id = hardware.probe_id or hardware.id
  619. product = hardware.product
  620. if board_id is not None:
  621. if runner == "pyocd":
  622. command_extra_args.append("--board-id")
  623. command_extra_args.append(board_id)
  624. elif runner == "nrfjprog":
  625. command_extra_args.append("--snr")
  626. command_extra_args.append(board_id)
  627. elif runner == "openocd" and product == "STM32 STLink":
  628. command_extra_args.append("--cmd-pre-init")
  629. command_extra_args.append("hla_serial %s" % (board_id))
  630. elif runner == "openocd" and product == "STLINK-V3":
  631. command_extra_args.append("--cmd-pre-init")
  632. command_extra_args.append("hla_serial %s" % (board_id))
  633. elif runner == "openocd" and product == "EDBG CMSIS-DAP":
  634. command_extra_args.append("--cmd-pre-init")
  635. command_extra_args.append("cmsis_dap_serial %s" % (board_id))
  636. elif runner == "jlink":
  637. command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
  638. if command_extra_args != []:
  639. command.append('--')
  640. command.extend(command_extra_args)
  641. else:
  642. command = [self.generator_cmd, "-C", self.build_dir, "flash"]
  643. pre_script = hardware.pre_script
  644. post_flash_script = hardware.post_flash_script
  645. post_script = hardware.post_script
  646. if pre_script:
  647. self.run_custom_script(pre_script, 30)
  648. try:
  649. ser = serial.Serial(
  650. serial_device,
  651. baudrate=115200,
  652. parity=serial.PARITY_NONE,
  653. stopbits=serial.STOPBITS_ONE,
  654. bytesize=serial.EIGHTBITS,
  655. timeout=self.timeout
  656. )
  657. except serial.SerialException as e:
  658. self.set_state("failed", 0)
  659. self.instance.reason = "Failed"
  660. logger.error("Serial device error: %s" % (str(e)))
  661. if serial_pty and ser_pty_process:
  662. ser_pty_process.terminate()
  663. outs, errs = ser_pty_process.communicate()
  664. logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
  665. self.make_device_available(serial_device)
  666. return
  667. ser.flush()
  668. harness_name = self.instance.testcase.harness.capitalize()
  669. harness_import = HarnessImporter(harness_name)
  670. harness = harness_import.instance
  671. harness.configure(self.instance)
  672. read_pipe, write_pipe = os.pipe()
  673. start_time = time.time()
  674. t = threading.Thread(target=self.monitor_serial, daemon=True,
  675. args=(ser, read_pipe, harness))
  676. t.start()
  677. d_log = "{}/device.log".format(self.instance.build_dir)
  678. logger.debug('Flash command: %s', command)
  679. try:
  680. stdout = stderr = None
  681. with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
  682. try:
  683. (stdout, stderr) = proc.communicate(timeout=30)
  684. logger.debug(stdout.decode())
  685. if proc.returncode != 0:
  686. self.instance.reason = "Device issue (Flash?)"
  687. with open(d_log, "w") as dlog_fp:
  688. dlog_fp.write(stderr.decode())
  689. os.write(write_pipe, b'x') # halt the thread
  690. out_state = "flash_error"
  691. except subprocess.TimeoutExpired:
  692. proc.kill()
  693. (stdout, stderr) = proc.communicate()
  694. self.instance.reason = "Device issue (Timeout)"
  695. with open(d_log, "w") as dlog_fp:
  696. dlog_fp.write(stderr.decode())
  697. except subprocess.CalledProcessError:
  698. os.write(write_pipe, b'x') # halt the thread
  699. if post_flash_script:
  700. self.run_custom_script(post_flash_script, 30)
  701. t.join(self.timeout)
  702. if t.is_alive():
  703. logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
  704. out_state = "timeout"
  705. if ser.isOpen():
  706. ser.close()
  707. if serial_pty:
  708. ser_pty_process.terminate()
  709. outs, errs = ser_pty_process.communicate()
  710. logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
  711. os.close(write_pipe)
  712. os.close(read_pipe)
  713. handler_time = time.time() - start_time
  714. if out_state in ["timeout", "flash_error"]:
  715. self.add_missing_testscases(harness)
  716. if out_state == "timeout":
  717. self.instance.reason = "Timeout"
  718. elif out_state == "flash_error":
  719. self.instance.reason = "Flash error"
  720. if harness.is_pytest:
  721. harness.pytest_run(self.log)
  722. self.instance.results = harness.tests
  723. # sometimes a test instance hasn't been executed successfully with an
  724. # empty dictionary results, in order to include it into final report,
  725. # so fill the results as BLOCK
  726. if self.instance.results == {}:
  727. for k in self.instance.testcase.cases:
  728. self.instance.results[k] = 'BLOCK'
  729. if harness.state:
  730. self.set_state(harness.state, handler_time)
  731. if harness.state == "failed":
  732. self.instance.reason = "Failed"
  733. else:
  734. self.set_state(out_state, handler_time)
  735. if post_script:
  736. self.run_custom_script(post_script, 30)
  737. self.make_device_available(serial_device)
  738. self.record(harness)
  739. class QEMUHandler(Handler):
  740. """Spawns a thread to monitor QEMU output from pipes
  741. We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
  742. We need to do this as once qemu starts, it runs forever until killed.
  743. Test cases emit special messages to the console as they run, we check
  744. for these to collect whether the test passed or failed.
  745. """
  746. def __init__(self, instance, type_str):
  747. """Constructor
  748. @param instance Test instance
  749. """
  750. super().__init__(instance, type_str)
  751. self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
  752. self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
  753. if "ignore_qemu_crash" in instance.testcase.tags:
  754. self.ignore_qemu_crash = True
  755. self.ignore_unexpected_eof = True
  756. else:
  757. self.ignore_qemu_crash = False
  758. self.ignore_unexpected_eof = False
  759. @staticmethod
  760. def _get_cpu_time(pid):
  761. """get process CPU time.
  762. The guest virtual time in QEMU icount mode isn't host time and
  763. it's maintained by counting guest instructions, so we use QEMU
  764. process exection time to mostly simulate the time of guest OS.
  765. """
  766. proc = psutil.Process(pid)
  767. cpu_time = proc.cpu_times()
  768. return cpu_time.user + cpu_time.system
  769. @staticmethod
  770. def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
  771. ignore_unexpected_eof=False):
  772. fifo_in = fifo_fn + ".in"
  773. fifo_out = fifo_fn + ".out"
  774. # These in/out nodes are named from QEMU's perspective, not ours
  775. if os.path.exists(fifo_in):
  776. os.unlink(fifo_in)
  777. os.mkfifo(fifo_in)
  778. if os.path.exists(fifo_out):
  779. os.unlink(fifo_out)
  780. os.mkfifo(fifo_out)
  781. # We don't do anything with out_fp but we need to open it for
  782. # writing so that QEMU doesn't block, due to the way pipes work
  783. out_fp = open(fifo_in, "wb")
  784. # Disable internal buffering, we don't
  785. # want read() or poll() to ever block if there is data in there
  786. in_fp = open(fifo_out, "rb", buffering=0)
  787. log_out_fp = open(logfile, "wt")
  788. start_time = time.time()
  789. timeout_time = start_time + timeout
  790. p = select.poll()
  791. p.register(in_fp, select.POLLIN)
  792. out_state = None
  793. line = ""
  794. timeout_extended = False
  795. pid = 0
  796. if os.path.exists(pid_fn):
  797. pid = int(open(pid_fn).read())
  798. while True:
  799. this_timeout = int((timeout_time - time.time()) * 1000)
  800. if this_timeout < 0 or not p.poll(this_timeout):
  801. try:
  802. if pid and this_timeout > 0:
  803. #there's possibility we polled nothing because
  804. #of not enough CPU time scheduled by host for
  805. #QEMU process during p.poll(this_timeout)
  806. cpu_time = QEMUHandler._get_cpu_time(pid)
  807. if cpu_time < timeout and not out_state:
  808. timeout_time = time.time() + (timeout - cpu_time)
  809. continue
  810. except ProcessLookupError:
  811. out_state = "failed"
  812. break
  813. if not out_state:
  814. out_state = "timeout"
  815. break
  816. if pid == 0 and os.path.exists(pid_fn):
  817. pid = int(open(pid_fn).read())
  818. if harness.is_pytest:
  819. harness.handle(None)
  820. out_state = harness.state
  821. break
  822. try:
  823. c = in_fp.read(1).decode("utf-8")
  824. except UnicodeDecodeError:
  825. # Test is writing something weird, fail
  826. out_state = "unexpected byte"
  827. break
  828. if c == "":
  829. # EOF, this shouldn't happen unless QEMU crashes
  830. if not ignore_unexpected_eof:
  831. out_state = "unexpected eof"
  832. break
  833. line = line + c
  834. if c != "\n":
  835. continue
  836. # line contains a full line of data output from QEMU
  837. log_out_fp.write(line)
  838. log_out_fp.flush()
  839. line = line.strip()
  840. logger.debug(f"QEMU ({pid}): {line}")
  841. harness.handle(line)
  842. if harness.state:
  843. # if we have registered a fail make sure the state is not
  844. # overridden by a false success message coming from the
  845. # testsuite
  846. if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
  847. out_state = harness.state
  848. # if we get some state, that means test is doing well, we reset
  849. # the timeout and wait for 2 more seconds to catch anything
  850. # printed late. We wait much longer if code
  851. # coverage is enabled since dumping this information can
  852. # take some time.
  853. if not timeout_extended or harness.capture_coverage:
  854. timeout_extended = True
  855. if harness.capture_coverage:
  856. timeout_time = time.time() + 30
  857. else:
  858. timeout_time = time.time() + 2
  859. line = ""
  860. if harness.is_pytest:
  861. harness.pytest_run(logfile)
  862. out_state = harness.state
  863. handler.record(harness)
  864. handler_time = time.time() - start_time
  865. logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds")
  866. if out_state == "timeout":
  867. handler.instance.reason = "Timeout"
  868. handler.set_state("failed", handler_time)
  869. elif out_state == "failed":
  870. handler.instance.reason = "Failed"
  871. handler.set_state("failed", handler_time)
  872. elif out_state in ['unexpected eof', 'unexpected byte']:
  873. handler.instance.reason = out_state
  874. handler.set_state("failed", handler_time)
  875. else:
  876. handler.set_state(out_state, handler_time)
  877. log_out_fp.close()
  878. out_fp.close()
  879. in_fp.close()
  880. if pid:
  881. try:
  882. if pid:
  883. os.kill(pid, signal.SIGTERM)
  884. except ProcessLookupError:
  885. # Oh well, as long as it's dead! User probably sent Ctrl-C
  886. pass
  887. os.unlink(fifo_in)
  888. os.unlink(fifo_out)
  889. def handle(self):
  890. self.results = {}
  891. self.run = True
  892. # We pass this to QEMU which looks for fifos with .in and .out
  893. # suffixes.
  894. self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
  895. self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
  896. if os.path.exists(self.pid_fn):
  897. os.unlink(self.pid_fn)
  898. self.log_fn = self.log
  899. harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
  900. harness = harness_import.instance
  901. harness.configure(self.instance)
  902. self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
  903. args=(self, self.timeout, self.build_dir,
  904. self.log_fn, self.fifo_fn,
  905. self.pid_fn, self.results, harness,
  906. self.ignore_unexpected_eof))
  907. self.instance.results = harness.tests
  908. self.thread.daemon = True
  909. logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
  910. self.thread.start()
  911. if sys.stdout.isatty():
  912. subprocess.call(["stty", "sane"])
  913. logger.debug("Running %s (%s)" % (self.name, self.type_str))
  914. command = [self.generator_cmd]
  915. command += ["-C", self.build_dir, "run"]
  916. is_timeout = False
  917. qemu_pid = None
  918. with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
  919. logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
  920. try:
  921. proc.wait(self.timeout)
  922. except subprocess.TimeoutExpired:
  923. # sometimes QEMU can't handle SIGTERM signal correctly
  924. # in that case kill -9 QEMU process directly and leave
  925. # twister to judge testing result by console output
  926. is_timeout = True
  927. self.terminate(proc)
  928. if harness.state == "passed":
  929. self.returncode = 0
  930. else:
  931. self.returncode = proc.returncode
  932. else:
  933. if os.path.exists(self.pid_fn):
  934. qemu_pid = int(open(self.pid_fn).read())
  935. logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}")
  936. self.returncode = proc.returncode
  937. # Need to wait for harness to finish processing
  938. # output from QEMU. Otherwise it might miss some
  939. # error messages.
  940. self.thread.join(0)
  941. if self.thread.is_alive():
  942. logger.debug("Timed out while monitoring QEMU output")
  943. if os.path.exists(self.pid_fn):
  944. qemu_pid = int(open(self.pid_fn).read())
  945. os.unlink(self.pid_fn)
  946. logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}")
  947. if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
  948. self.set_state("failed", 0)
  949. if is_timeout:
  950. self.instance.reason = "Timeout"
  951. else:
  952. self.instance.reason = "Exited with {}".format(self.returncode)
  953. self.add_missing_testscases(harness)
  954. def get_fifo(self):
  955. return self.fifo_fn
  956. class SizeCalculator:
  957. alloc_sections = [
  958. "bss",
  959. "noinit",
  960. "app_bss",
  961. "app_noinit",
  962. "ccm_bss",
  963. "ccm_noinit"
  964. ]
  965. rw_sections = [
  966. "datas",
  967. "initlevel",
  968. "exceptions",
  969. "initshell",
  970. "_static_thread_data_area",
  971. "k_timer_area",
  972. "k_mem_slab_area",
  973. "k_mem_pool_area",
  974. "sw_isr_table",
  975. "k_sem_area",
  976. "k_mutex_area",
  977. "app_shmem_regions",
  978. "_k_fifo_area",
  979. "_k_lifo_area",
  980. "k_stack_area",
  981. "k_msgq_area",
  982. "k_mbox_area",
  983. "k_pipe_area",
  984. "net_if_area",
  985. "net_if_dev_area",
  986. "net_l2_area",
  987. "net_l2_data",
  988. "k_queue_area",
  989. "_net_buf_pool_area",
  990. "app_datas",
  991. "kobject_data",
  992. "mmu_tables",
  993. "app_pad",
  994. "priv_stacks",
  995. "ccm_data",
  996. "usb_descriptor",
  997. "usb_data", "usb_bos_desc",
  998. "uart_mux",
  999. 'log_backends_sections',
  1000. 'log_dynamic_sections',
  1001. 'log_const_sections',
  1002. "app_smem",
  1003. 'shell_root_cmds_sections',
  1004. 'log_const_sections',
  1005. "font_entry_sections",
  1006. "priv_stacks_noinit",
  1007. "_GCOV_BSS_SECTION_NAME",
  1008. "gcov",
  1009. "nocache",
  1010. "devices",
  1011. "k_heap_area",
  1012. ]
  1013. # These get copied into RAM only on non-XIP
  1014. ro_sections = [
  1015. "rom_start",
  1016. "text",
  1017. "ctors",
  1018. "init_array",
  1019. "reset",
  1020. "z_object_assignment_area",
  1021. "rodata",
  1022. "net_l2",
  1023. "vector",
  1024. "sw_isr_table",
  1025. "settings_handler_static_area",
  1026. "bt_l2cap_fixed_chan_area",
  1027. "bt_l2cap_br_fixed_chan_area",
  1028. "bt_gatt_service_static_area",
  1029. "vectors",
  1030. "net_socket_register_area",
  1031. "net_ppp_proto",
  1032. "shell_area",
  1033. "tracing_backend_area",
  1034. "ppp_protocol_handler_area",
  1035. ]
  1036. def __init__(self, filename, extra_sections):
  1037. """Constructor
  1038. @param filename Path to the output binary
  1039. The <filename> is parsed by objdump to determine section sizes
  1040. """
  1041. # Make sure this is an ELF binary
  1042. with open(filename, "rb") as f:
  1043. magic = f.read(4)
  1044. try:
  1045. if magic != b'\x7fELF':
  1046. raise TwisterRuntimeError("%s is not an ELF binary" % filename)
  1047. except Exception as e:
  1048. print(str(e))
  1049. sys.exit(2)
  1050. # Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
  1051. # GREP can not be used as it returns an error if the symbol is not
  1052. # found.
  1053. is_xip_command = "nm " + filename + \
  1054. " | awk '/CONFIG_XIP/ { print $3 }'"
  1055. is_xip_output = subprocess.check_output(
  1056. is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
  1057. "utf-8").strip()
  1058. try:
  1059. if is_xip_output.endswith("no symbols"):
  1060. raise TwisterRuntimeError("%s has no symbol information" % filename)
  1061. except Exception as e:
  1062. print(str(e))
  1063. sys.exit(2)
  1064. self.is_xip = (len(is_xip_output) != 0)
  1065. self.filename = filename
  1066. self.sections = []
  1067. self.rom_size = 0
  1068. self.ram_size = 0
  1069. self.extra_sections = extra_sections
  1070. self._calculate_sizes()
  1071. def get_ram_size(self):
  1072. """Get the amount of RAM the application will use up on the device
  1073. @return amount of RAM, in bytes
  1074. """
  1075. return self.ram_size
  1076. def get_rom_size(self):
  1077. """Get the size of the data that this application uses on device's flash
  1078. @return amount of ROM, in bytes
  1079. """
  1080. return self.rom_size
  1081. def unrecognized_sections(self):
  1082. """Get a list of sections inside the binary that weren't recognized
  1083. @return list of unrecognized section names
  1084. """
  1085. slist = []
  1086. for v in self.sections:
  1087. if not v["recognized"]:
  1088. slist.append(v["name"])
  1089. return slist
  1090. def _calculate_sizes(self):
  1091. """ Calculate RAM and ROM usage by section """
  1092. objdump_command = "objdump -h " + self.filename
  1093. objdump_output = subprocess.check_output(
  1094. objdump_command, shell=True).decode("utf-8").splitlines()
  1095. for line in objdump_output:
  1096. words = line.split()
  1097. if not words: # Skip lines that are too short
  1098. continue
  1099. index = words[0]
  1100. if not index[0].isdigit(): # Skip lines that do not start
  1101. continue # with a digit
  1102. name = words[1] # Skip lines with section names
  1103. if name[0] == '.': # starting with '.'
  1104. continue
  1105. # TODO this doesn't actually reflect the size in flash or RAM as
  1106. # it doesn't include linker-imposed padding between sections.
  1107. # It is close though.
  1108. size = int(words[2], 16)
  1109. if size == 0:
  1110. continue
  1111. load_addr = int(words[4], 16)
  1112. virt_addr = int(words[3], 16)
  1113. # Add section to memory use totals (for both non-XIP and XIP scenarios)
  1114. # Unrecognized section names are not included in the calculations.
  1115. recognized = True
  1116. if name in SizeCalculator.alloc_sections:
  1117. self.ram_size += size
  1118. stype = "alloc"
  1119. elif name in SizeCalculator.rw_sections:
  1120. self.ram_size += size
  1121. self.rom_size += size
  1122. stype = "rw"
  1123. elif name in SizeCalculator.ro_sections:
  1124. self.rom_size += size
  1125. if not self.is_xip:
  1126. self.ram_size += size
  1127. stype = "ro"
  1128. else:
  1129. stype = "unknown"
  1130. if name not in self.extra_sections:
  1131. recognized = False
  1132. self.sections.append({"name": name, "load_addr": load_addr,
  1133. "size": size, "virt_addr": virt_addr,
  1134. "type": stype, "recognized": recognized})
  1135. class TwisterConfigParser:
  1136. """Class to read test case files with semantic checking
  1137. """
  1138. def __init__(self, filename, schema):
  1139. """Instantiate a new TwisterConfigParser object
  1140. @param filename Source .yaml file to read
  1141. """
  1142. self.data = {}
  1143. self.schema = schema
  1144. self.filename = filename
  1145. self.tests = {}
  1146. self.common = {}
  1147. def load(self):
  1148. self.data = scl.yaml_load_verify(self.filename, self.schema)
  1149. if 'tests' in self.data:
  1150. self.tests = self.data['tests']
  1151. if 'common' in self.data:
  1152. self.common = self.data['common']
  1153. def _cast_value(self, value, typestr):
  1154. if isinstance(value, str):
  1155. v = value.strip()
  1156. if typestr == "str":
  1157. return v
  1158. elif typestr == "float":
  1159. return float(value)
  1160. elif typestr == "int":
  1161. return int(value)
  1162. elif typestr == "bool":
  1163. return value
  1164. elif typestr.startswith("list") and isinstance(value, list):
  1165. return value
  1166. elif typestr.startswith("list") and isinstance(value, str):
  1167. vs = v.split()
  1168. if len(typestr) > 4 and typestr[4] == ":":
  1169. return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
  1170. else:
  1171. return vs
  1172. elif typestr.startswith("set"):
  1173. vs = v.split()
  1174. if len(typestr) > 3 and typestr[3] == ":":
  1175. return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
  1176. else:
  1177. return set(vs)
  1178. elif typestr.startswith("map"):
  1179. return value
  1180. else:
  1181. raise ConfigurationError(
  1182. self.filename, "unknown type '%s'" % value)
  1183. def get_test(self, name, valid_keys):
  1184. """Get a dictionary representing the keys/values within a test
  1185. @param name The test in the .yaml file to retrieve data from
  1186. @param valid_keys A dictionary representing the intended semantics
  1187. for this test. Each key in this dictionary is a key that could
  1188. be specified, if a key is given in the .yaml file which isn't in
  1189. here, it will generate an error. Each value in this dictionary
  1190. is another dictionary containing metadata:
  1191. "default" - Default value if not given
  1192. "type" - Data type to convert the text value to. Simple types
  1193. supported are "str", "float", "int", "bool" which will get
  1194. converted to respective Python data types. "set" and "list"
  1195. may also be specified which will split the value by
  1196. whitespace (but keep the elements as strings). finally,
  1197. "list:<type>" and "set:<type>" may be given which will
  1198. perform a type conversion after splitting the value up.
  1199. "required" - If true, raise an error if not defined. If false
  1200. and "default" isn't specified, a type conversion will be
  1201. done on an empty string
  1202. @return A dictionary containing the test key-value pairs with
  1203. type conversion and default values filled in per valid_keys
  1204. """
  1205. d = {}
  1206. for k, v in self.common.items():
  1207. d[k] = v
  1208. for k, v in self.tests[name].items():
  1209. if k in d:
  1210. if isinstance(d[k], str):
  1211. # By default, we just concatenate string values of keys
  1212. # which appear both in "common" and per-test sections,
  1213. # but some keys are handled in adhoc way based on their
  1214. # semantics.
  1215. if k == "filter":
  1216. d[k] = "(%s) and (%s)" % (d[k], v)
  1217. else:
  1218. d[k] += " " + v
  1219. else:
  1220. d[k] = v
  1221. for k, kinfo in valid_keys.items():
  1222. if k not in d:
  1223. if "required" in kinfo:
  1224. required = kinfo["required"]
  1225. else:
  1226. required = False
  1227. if required:
  1228. raise ConfigurationError(
  1229. self.filename,
  1230. "missing required value for '%s' in test '%s'" %
  1231. (k, name))
  1232. else:
  1233. if "default" in kinfo:
  1234. default = kinfo["default"]
  1235. else:
  1236. default = self._cast_value("", kinfo["type"])
  1237. d[k] = default
  1238. else:
  1239. try:
  1240. d[k] = self._cast_value(d[k], kinfo["type"])
  1241. except ValueError:
  1242. raise ConfigurationError(
  1243. self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
  1244. (kinfo["type"], d[k], k, name))
  1245. return d
  1246. class Platform:
  1247. """Class representing metadata for a particular platform
  1248. Maps directly to BOARD when building"""
  1249. platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
  1250. "scripts", "schemas", "twister", "platform-schema.yaml"))
  1251. def __init__(self):
  1252. """Constructor.
  1253. """
  1254. self.name = ""
  1255. self.twister = True
  1256. # if no RAM size is specified by the board, take a default of 128K
  1257. self.ram = 128
  1258. self.ignore_tags = []
  1259. self.only_tags = []
  1260. self.default = False
  1261. # if no flash size is specified by the board, take a default of 512K
  1262. self.flash = 512
  1263. self.supported = set()
  1264. self.arch = ""
  1265. self.type = "na"
  1266. self.simulation = "na"
  1267. self.supported_toolchains = []
  1268. self.env = []
  1269. self.env_satisfied = True
  1270. self.filter_data = dict()
  1271. def load(self, platform_file):
  1272. scp = TwisterConfigParser(platform_file, self.platform_schema)
  1273. scp.load()
  1274. data = scp.data
  1275. self.name = data['identifier']
  1276. self.twister = data.get("twister", True)
  1277. # if no RAM size is specified by the board, take a default of 128K
  1278. self.ram = data.get("ram", 128)
  1279. testing = data.get("testing", {})
  1280. self.ignore_tags = testing.get("ignore_tags", [])
  1281. self.only_tags = testing.get("only_tags", [])
  1282. self.default = testing.get("default", False)
  1283. # if no flash size is specified by the board, take a default of 512K
  1284. self.flash = data.get("flash", 512)
  1285. self.supported = set()
  1286. for supp_feature in data.get("supported", []):
  1287. for item in supp_feature.split(":"):
  1288. self.supported.add(item)
  1289. self.arch = data['arch']
  1290. self.type = data.get('type', "na")
  1291. self.simulation = data.get('simulation', "na")
  1292. self.supported_toolchains = data.get("toolchain", [])
  1293. self.env = data.get("env", [])
  1294. self.env_satisfied = True
  1295. for env in self.env:
  1296. if not os.environ.get(env, None):
  1297. self.env_satisfied = False
  1298. def __repr__(self):
  1299. return "<%s on %s>" % (self.name, self.arch)
  1300. class DisablePyTestCollectionMixin(object):
  1301. __test__ = False
  1302. class TestCase(DisablePyTestCollectionMixin):
  1303. """Class representing a test application
  1304. """
  1305. def __init__(self, testcase_root, workdir, name):
  1306. """TestCase constructor.
  1307. This gets called by TestSuite as it finds and reads test yaml files.
  1308. Multiple TestCase instances may be generated from a single testcase.yaml,
  1309. each one corresponds to an entry within that file.
  1310. We need to have a unique name for every single test case. Since
  1311. a testcase.yaml can define multiple tests, the canonical name for
  1312. the test case is <workdir>/<name>.
  1313. @param testcase_root os.path.abspath() of one of the --testcase-root
  1314. @param workdir Sub-directory of testcase_root where the
  1315. .yaml test configuration file was found
  1316. @param name Name of this test case, corresponding to the entry name
  1317. in the test case configuration file. For many test cases that just
  1318. define one test, can be anything and is usually "test". This is
  1319. really only used to distinguish between different cases when
  1320. the testcase.yaml defines multiple tests
  1321. """
  1322. self.source_dir = ""
  1323. self.yamlfile = ""
  1324. self.cases = []
  1325. self.name = self.get_unique(testcase_root, workdir, name)
  1326. self.id = name
  1327. self.type = None
  1328. self.tags = set()
  1329. self.extra_args = None
  1330. self.extra_configs = None
  1331. self.arch_allow = None
  1332. self.arch_exclude = None
  1333. self.skip = False
  1334. self.platform_exclude = None
  1335. self.platform_allow = None
  1336. self.toolchain_exclude = None
  1337. self.toolchain_allow = None
  1338. self.tc_filter = None
  1339. self.timeout = 60
  1340. self.harness = ""
  1341. self.harness_config = {}
  1342. self.build_only = True
  1343. self.build_on_all = False
  1344. self.slow = False
  1345. self.min_ram = -1
  1346. self.depends_on = None
  1347. self.min_flash = -1
  1348. self.extra_sections = None
  1349. self.integration_platforms = []
  1350. @staticmethod
  1351. def get_unique(testcase_root, workdir, name):
  1352. canonical_testcase_root = os.path.realpath(testcase_root)
  1353. if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
  1354. # This is in ZEPHYR_BASE, so include path in name for uniqueness
  1355. # FIXME: We should not depend on path of test for unique names.
  1356. relative_tc_root = os.path.relpath(canonical_testcase_root,
  1357. start=canonical_zephyr_base)
  1358. else:
  1359. relative_tc_root = ""
  1360. # workdir can be "."
  1361. unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
  1362. check = name.split(".")
  1363. if len(check) < 2:
  1364. raise TwisterException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
  1365. Tests should reference the category and subsystem with a dot as a separator.
  1366. """
  1367. )
  1368. return unique
  1369. @staticmethod
  1370. def scan_file(inf_name):
  1371. suite_regex = re.compile(
  1372. # do not match until end-of-line, otherwise we won't allow
  1373. # stc_regex below to catch the ones that are declared in the same
  1374. # line--as we only search starting the end of this match
  1375. br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
  1376. re.MULTILINE)
  1377. stc_regex = re.compile(
  1378. br"^\s*" # empy space at the beginning is ok
  1379. # catch the case where it is declared in the same sentence, e.g:
  1380. #
  1381. # ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
  1382. br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?"
  1383. # Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
  1384. br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?"
  1385. # Consume the argument that becomes the extra testcse
  1386. br"\(\s*"
  1387. br"(?P<stc_name>[a-zA-Z0-9_]+)"
  1388. # _setup_teardown() variant has two extra arguments that we ignore
  1389. br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?"
  1390. br"\s*\)",
  1391. # We don't check how it finishes; we don't care
  1392. re.MULTILINE)
  1393. suite_run_regex = re.compile(
  1394. br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
  1395. re.MULTILINE)
  1396. achtung_regex = re.compile(
  1397. br"(#ifdef|#endif)",
  1398. re.MULTILINE)
  1399. warnings = None
  1400. with open(inf_name) as inf:
  1401. if os.name == 'nt':
  1402. mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
  1403. else:
  1404. mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
  1405. 'offset': 0}
  1406. with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
  1407. suite_regex_match = suite_regex.search(main_c)
  1408. if not suite_regex_match:
  1409. # can't find ztest_test_suite, maybe a client, because
  1410. # it includes ztest.h
  1411. return None, None
  1412. suite_run_match = suite_run_regex.search(main_c)
  1413. if not suite_run_match:
  1414. raise ValueError("can't find ztest_run_test_suite")
  1415. achtung_matches = re.findall(
  1416. achtung_regex,
  1417. main_c[suite_regex_match.end():suite_run_match.start()])
  1418. if achtung_matches:
  1419. warnings = "found invalid %s in ztest_test_suite()" \
  1420. % ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
  1421. _matches = re.findall(
  1422. stc_regex,
  1423. main_c[suite_regex_match.end():suite_run_match.start()])
  1424. for match in _matches:
  1425. if not match.decode().startswith("test_"):
  1426. warnings = "Found a test that does not start with test_"
  1427. matches = [match.decode().replace("test_", "", 1) for match in _matches]
  1428. return matches, warnings
  1429. def scan_path(self, path):
  1430. subcases = []
  1431. for filename in glob.glob(os.path.join(path, "src", "*.c*")):
  1432. try:
  1433. _subcases, warnings = self.scan_file(filename)
  1434. if warnings:
  1435. logger.error("%s: %s" % (filename, warnings))
  1436. raise TwisterRuntimeError("%s: %s" % (filename, warnings))
  1437. if _subcases:
  1438. subcases += _subcases
  1439. except ValueError as e:
  1440. logger.error("%s: can't find: %s" % (filename, e))
  1441. for filename in glob.glob(os.path.join(path, "*.c")):
  1442. try:
  1443. _subcases, warnings = self.scan_file(filename)
  1444. if warnings:
  1445. logger.error("%s: %s" % (filename, warnings))
  1446. if _subcases:
  1447. subcases += _subcases
  1448. except ValueError as e:
  1449. logger.error("%s: can't find: %s" % (filename, e))
  1450. return subcases
  1451. def parse_subcases(self, test_path):
  1452. results = self.scan_path(test_path)
  1453. for sub in results:
  1454. name = "{}.{}".format(self.id, sub)
  1455. self.cases.append(name)
  1456. if not results:
  1457. self.cases.append(self.id)
  1458. def __str__(self):
  1459. return self.name
  1460. class TestInstance(DisablePyTestCollectionMixin):
  1461. """Class representing the execution of a particular TestCase on a platform
  1462. @param test The TestCase object we want to build/execute
  1463. @param platform Platform object that we want to build and run against
  1464. @param base_outdir Base directory for all test results. The actual
  1465. out directory used is <outdir>/<platform>/<test case name>
  1466. """
  1467. def __init__(self, testcase, platform, outdir):
  1468. self.testcase = testcase
  1469. self.platform = platform
  1470. self.status = None
  1471. self.reason = "Unknown"
  1472. self.metrics = dict()
  1473. self.handler = None
  1474. self.outdir = outdir
  1475. self.name = os.path.join(platform.name, testcase.name)
  1476. self.build_dir = os.path.join(outdir, platform.name, testcase.name)
  1477. self.run = False
  1478. self.results = {}
  1479. def __getstate__(self):
  1480. d = self.__dict__.copy()
  1481. return d
  1482. def __setstate__(self, d):
  1483. self.__dict__.update(d)
  1484. def __lt__(self, other):
  1485. return self.name < other.name
  1486. @staticmethod
  1487. def testcase_runnable(testcase, fixtures):
  1488. can_run = False
  1489. # console harness allows us to run the test and capture data.
  1490. if testcase.harness in [ 'console', 'ztest', 'pytest']:
  1491. can_run = True
  1492. # if we have a fixture that is also being supplied on the
  1493. # command-line, then we need to run the test, not just build it.
  1494. fixture = testcase.harness_config.get('fixture')
  1495. if fixture:
  1496. can_run = (fixture in fixtures)
  1497. elif testcase.harness:
  1498. can_run = False
  1499. else:
  1500. can_run = True
  1501. return can_run
  1502. # Global testsuite parameters
  1503. def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
  1504. # right now we only support building on windows. running is still work
  1505. # in progress.
  1506. if os.name == 'nt':
  1507. return False
  1508. # we asked for build-only on the command line
  1509. if self.testcase.build_only:
  1510. return False
  1511. # Do not run slow tests:
  1512. skip_slow = self.testcase.slow and not enable_slow
  1513. if skip_slow:
  1514. return False
  1515. target_ready = bool(self.testcase.type == "unit" or \
  1516. self.platform.type == "native" or \
  1517. self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp"] or \
  1518. filter == 'runnable')
  1519. if self.platform.simulation == "nsim":
  1520. if not find_executable("nsimdrv"):
  1521. target_ready = False
  1522. if self.platform.simulation == "mdb-nsim":
  1523. if not find_executable("mdb"):
  1524. target_ready = False
  1525. if self.platform.simulation == "renode":
  1526. if not find_executable("renode"):
  1527. target_ready = False
  1528. if self.platform.simulation == "tsim":
  1529. if not find_executable("tsim-leon3"):
  1530. target_ready = False
  1531. testcase_runnable = self.testcase_runnable(self.testcase, fixtures)
  1532. return testcase_runnable and target_ready
  1533. def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
  1534. # Create this in a "twister/" subdirectory otherwise this
  1535. # will pass this overlay to kconfig.py *twice* and kconfig.cmake
  1536. # will silently give that second time precedence over any
  1537. # --extra-args=CONFIG_*
  1538. subdir = os.path.join(self.build_dir, "twister")
  1539. content = ""
  1540. if self.testcase.extra_configs:
  1541. content = "\n".join(self.testcase.extra_configs)
  1542. if enable_coverage:
  1543. if platform.name in coverage_platform:
  1544. content = content + "\nCONFIG_COVERAGE=y"
  1545. content = content + "\nCONFIG_COVERAGE_DUMP=y"
  1546. if enable_asan:
  1547. if platform.type == "native":
  1548. content = content + "\nCONFIG_ASAN=y"
  1549. if enable_ubsan:
  1550. if platform.type == "native":
  1551. content = content + "\nCONFIG_UBSAN=y"
  1552. if content:
  1553. os.makedirs(subdir, exist_ok=True)
  1554. file = os.path.join(subdir, "testcase_extra.conf")
  1555. with open(file, "w") as f:
  1556. f.write(content)
  1557. return content
  1558. def calculate_sizes(self):
  1559. """Get the RAM/ROM sizes of a test case.
  1560. This can only be run after the instance has been executed by
  1561. MakeGenerator, otherwise there won't be any binaries to measure.
  1562. @return A SizeCalculator object
  1563. """
  1564. fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
  1565. fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
  1566. fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
  1567. if len(fns) != 1:
  1568. raise BuildError("Missing/multiple output ELF binary")
  1569. return SizeCalculator(fns[0], self.testcase.extra_sections)
  1570. def fill_results_by_status(self):
  1571. """Fills results according to self.status
  1572. The method is used to propagate the instance level status
  1573. to the test cases inside. Useful when the whole instance is skipped
  1574. and the info is required also at the test cases level for reporting.
  1575. Should be used with caution, e.g. should not be used
  1576. to fill all results with passes
  1577. """
  1578. status_to_verdict = {
  1579. 'skipped': 'SKIP',
  1580. 'error': 'BLOCK',
  1581. 'failure': 'FAILED'
  1582. }
  1583. for k in self.results:
  1584. self.results[k] = status_to_verdict[self.status]
  1585. def __repr__(self):
  1586. return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
  1587. class CMake():
  1588. config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
  1589. dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
  1590. def __init__(self, testcase, platform, source_dir, build_dir):
  1591. self.cwd = None
  1592. self.capture_output = True
  1593. self.defconfig = {}
  1594. self.cmake_cache = {}
  1595. self.instance = None
  1596. self.testcase = testcase
  1597. self.platform = platform
  1598. self.source_dir = source_dir
  1599. self.build_dir = build_dir
  1600. self.log = "build.log"
  1601. self.generator = None
  1602. self.generator_cmd = None
  1603. def parse_generated(self):
  1604. self.defconfig = {}
  1605. return {}
  1606. def run_build(self, args=[]):
  1607. logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
  1608. cmake_args = []
  1609. cmake_args.extend(args)
  1610. cmake = shutil.which('cmake')
  1611. cmd = [cmake] + cmake_args
  1612. kwargs = dict()
  1613. if self.capture_output:
  1614. kwargs['stdout'] = subprocess.PIPE
  1615. # CMake sends the output of message() to stderr unless it's STATUS
  1616. kwargs['stderr'] = subprocess.STDOUT
  1617. if self.cwd:
  1618. kwargs['cwd'] = self.cwd
  1619. p = subprocess.Popen(cmd, **kwargs)
  1620. out, _ = p.communicate()
  1621. results = {}
  1622. if p.returncode == 0:
  1623. msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
  1624. self.instance.status = "passed"
  1625. results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
  1626. if out:
  1627. log_msg = out.decode(sys.getdefaultencoding())
  1628. with open(os.path.join(self.build_dir, self.log), "a") as log:
  1629. log.write(log_msg)
  1630. else:
  1631. return None
  1632. else:
  1633. # A real error occurred, raise an exception
  1634. log_msg = ""
  1635. if out:
  1636. log_msg = out.decode(sys.getdefaultencoding())
  1637. with open(os.path.join(self.build_dir, self.log), "a") as log:
  1638. log.write(log_msg)
  1639. if log_msg:
  1640. res = re.findall("region `(FLASH|ROM|RAM|ICCM|DCCM|SRAM)' overflowed by", log_msg)
  1641. if res and not self.overflow_as_errors:
  1642. logger.debug("Test skipped due to {} Overflow".format(res[0]))
  1643. self.instance.status = "skipped"
  1644. self.instance.reason = "{} overflow".format(res[0])
  1645. else:
  1646. self.instance.status = "error"
  1647. self.instance.reason = "Build failure"
  1648. results = {
  1649. "returncode": p.returncode,
  1650. "instance": self.instance,
  1651. }
  1652. return results
  1653. def run_cmake(self, args=[]):
  1654. if self.warnings_as_errors:
  1655. ldflags = "-Wl,--fatal-warnings"
  1656. cflags = "-Werror"
  1657. aflags = "-Wa,--fatal-warnings"
  1658. gen_defines_args = "--edtlib-Werror"
  1659. else:
  1660. ldflags = cflags = aflags = ""
  1661. gen_defines_args = ""
  1662. logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
  1663. cmake_args = [
  1664. f'-B{self.build_dir}',
  1665. f'-S{self.source_dir}',
  1666. f'-DEXTRA_CFLAGS="{cflags}"',
  1667. f'-DEXTRA_AFLAGS="{aflags}',
  1668. f'-DEXTRA_LDFLAGS="{ldflags}"',
  1669. f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}',
  1670. f'-G{self.generator}'
  1671. ]
  1672. if self.cmake_only:
  1673. cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1")
  1674. args = ["-D{}".format(a.replace('"', '')) for a in args]
  1675. cmake_args.extend(args)
  1676. cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
  1677. cmake_args.extend(cmake_opts)
  1678. logger.debug("Calling cmake with arguments: {}".format(cmake_args))
  1679. cmake = shutil.which('cmake')
  1680. cmd = [cmake] + cmake_args
  1681. kwargs = dict()
  1682. if self.capture_output:
  1683. kwargs['stdout'] = subprocess.PIPE
  1684. # CMake sends the output of message() to stderr unless it's STATUS
  1685. kwargs['stderr'] = subprocess.STDOUT
  1686. if self.cwd:
  1687. kwargs['cwd'] = self.cwd
  1688. p = subprocess.Popen(cmd, **kwargs)
  1689. out, _ = p.communicate()
  1690. if p.returncode == 0:
  1691. filter_results = self.parse_generated()
  1692. msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
  1693. logger.debug(msg)
  1694. results = {'msg': msg, 'filter': filter_results}
  1695. else:
  1696. self.instance.status = "error"
  1697. self.instance.reason = "Cmake build failure"
  1698. self.instance.fill_results_by_status()
  1699. logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
  1700. results = {"returncode": p.returncode}
  1701. if out:
  1702. with open(os.path.join(self.build_dir, self.log), "a") as log:
  1703. log_msg = out.decode(sys.getdefaultencoding())
  1704. log.write(log_msg)
  1705. return results
  1706. @staticmethod
  1707. def run_cmake_script(args=[]):
  1708. logger.debug("Running cmake script %s" % (args[0]))
  1709. cmake_args = ["-D{}".format(a.replace('"', '')) for a in args[1:]]
  1710. cmake_args.extend(['-P', args[0]])
  1711. logger.debug("Calling cmake with arguments: {}".format(cmake_args))
  1712. cmake = shutil.which('cmake')
  1713. if not cmake:
  1714. msg = "Unable to find `cmake` in path"
  1715. logger.error(msg)
  1716. raise Exception(msg)
  1717. cmd = [cmake] + cmake_args
  1718. kwargs = dict()
  1719. kwargs['stdout'] = subprocess.PIPE
  1720. # CMake sends the output of message() to stderr unless it's STATUS
  1721. kwargs['stderr'] = subprocess.STDOUT
  1722. p = subprocess.Popen(cmd, **kwargs)
  1723. out, _ = p.communicate()
  1724. # It might happen that the environment adds ANSI escape codes like \x1b[0m,
  1725. # for instance if twister is executed from inside a makefile. In such a
  1726. # scenario it is then necessary to remove them, as otherwise the JSON decoding
  1727. # will fail.
  1728. ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
  1729. out = ansi_escape.sub('', out.decode())
  1730. if p.returncode == 0:
  1731. msg = "Finished running %s" % (args[0])
  1732. logger.debug(msg)
  1733. results = {"returncode": p.returncode, "msg": msg, "stdout": out}
  1734. else:
  1735. logger.error("Cmake script failure: %s" % (args[0]))
  1736. results = {"returncode": p.returncode, "returnmsg": out}
  1737. return results
  1738. class FilterBuilder(CMake):
  1739. def __init__(self, testcase, platform, source_dir, build_dir):
  1740. super().__init__(testcase, platform, source_dir, build_dir)
  1741. self.log = "config-twister.log"
  1742. def parse_generated(self):
  1743. if self.platform.name == "unit_testing":
  1744. return {}
  1745. cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
  1746. defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
  1747. with open(defconfig_path, "r") as fp:
  1748. defconfig = {}
  1749. for line in fp.readlines():
  1750. m = self.config_re.match(line)
  1751. if not m:
  1752. if line.strip() and not line.startswith("#"):
  1753. sys.stderr.write("Unrecognized line %s\n" % line)
  1754. continue
  1755. defconfig[m.group(1)] = m.group(2).strip()
  1756. self.defconfig = defconfig
  1757. cmake_conf = {}
  1758. try:
  1759. cache = CMakeCache.from_file(cmake_cache_path)
  1760. except FileNotFoundError:
  1761. cache = {}
  1762. for k in iter(cache):
  1763. cmake_conf[k.name] = k.value
  1764. self.cmake_cache = cmake_conf
  1765. filter_data = {
  1766. "ARCH": self.platform.arch,
  1767. "PLATFORM": self.platform.name
  1768. }
  1769. filter_data.update(os.environ)
  1770. filter_data.update(self.defconfig)
  1771. filter_data.update(self.cmake_cache)
  1772. edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
  1773. if self.testcase and self.testcase.tc_filter:
  1774. try:
  1775. if os.path.exists(edt_pickle):
  1776. with open(edt_pickle, 'rb') as f:
  1777. edt = pickle.load(f)
  1778. else:
  1779. edt = None
  1780. res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
  1781. except (ValueError, SyntaxError) as se:
  1782. sys.stderr.write(
  1783. "Failed processing %s\n" % self.testcase.yamlfile)
  1784. raise se
  1785. if not res:
  1786. return {os.path.join(self.platform.name, self.testcase.name): True}
  1787. else:
  1788. return {os.path.join(self.platform.name, self.testcase.name): False}
  1789. else:
  1790. self.platform.filter_data = filter_data
  1791. return filter_data
  1792. class ProjectBuilder(FilterBuilder):
  1793. def __init__(self, suite, instance, **kwargs):
  1794. super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
  1795. self.log = "build.log"
  1796. self.instance = instance
  1797. self.suite = suite
  1798. self.filtered_tests = 0
  1799. self.lsan = kwargs.get('lsan', False)
  1800. self.asan = kwargs.get('asan', False)
  1801. self.ubsan = kwargs.get('ubsan', False)
  1802. self.valgrind = kwargs.get('valgrind', False)
  1803. self.extra_args = kwargs.get('extra_args', [])
  1804. self.device_testing = kwargs.get('device_testing', False)
  1805. self.cmake_only = kwargs.get('cmake_only', False)
  1806. self.cleanup = kwargs.get('cleanup', False)
  1807. self.coverage = kwargs.get('coverage', False)
  1808. self.inline_logs = kwargs.get('inline_logs', False)
  1809. self.generator = kwargs.get('generator', None)
  1810. self.generator_cmd = kwargs.get('generator_cmd', None)
  1811. self.verbose = kwargs.get('verbose', None)
  1812. self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
  1813. self.overflow_as_errors = kwargs.get('overflow_as_errors', False)
  1814. @staticmethod
  1815. def log_info(filename, inline_logs):
  1816. filename = os.path.abspath(os.path.realpath(filename))
  1817. if inline_logs:
  1818. logger.info("{:-^100}".format(filename))
  1819. try:
  1820. with open(filename) as fp:
  1821. data = fp.read()
  1822. except Exception as e:
  1823. data = "Unable to read log data (%s)\n" % (str(e))
  1824. logger.error(data)
  1825. logger.info("{:-^100}".format(filename))
  1826. else:
  1827. logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
  1828. def log_info_file(self, inline_logs):
  1829. build_dir = self.instance.build_dir
  1830. h_log = "{}/handler.log".format(build_dir)
  1831. b_log = "{}/build.log".format(build_dir)
  1832. v_log = "{}/valgrind.log".format(build_dir)
  1833. d_log = "{}/device.log".format(build_dir)
  1834. if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
  1835. self.log_info("{}".format(v_log), inline_logs)
  1836. elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
  1837. self.log_info("{}".format(h_log), inline_logs)
  1838. elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
  1839. self.log_info("{}".format(d_log), inline_logs)
  1840. else:
  1841. self.log_info("{}".format(b_log), inline_logs)
  1842. def setup_handler(self):
  1843. instance = self.instance
  1844. args = []
  1845. # FIXME: Needs simplification
  1846. if instance.platform.simulation == "qemu":
  1847. instance.handler = QEMUHandler(instance, "qemu")
  1848. args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
  1849. instance.handler.call_make_run = True
  1850. elif instance.testcase.type == "unit":
  1851. instance.handler = BinaryHandler(instance, "unit")
  1852. instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
  1853. if self.coverage:
  1854. args.append("COVERAGE=1")
  1855. elif instance.platform.type == "native":
  1856. handler = BinaryHandler(instance, "native")
  1857. handler.asan = self.asan
  1858. handler.valgrind = self.valgrind
  1859. handler.lsan = self.lsan
  1860. handler.ubsan = self.ubsan
  1861. handler.coverage = self.coverage
  1862. handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
  1863. instance.handler = handler
  1864. elif instance.platform.simulation == "renode":
  1865. if find_executable("renode"):
  1866. instance.handler = BinaryHandler(instance, "renode")
  1867. instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
  1868. instance.handler.call_make_run = True
  1869. elif instance.platform.simulation == "tsim":
  1870. instance.handler = BinaryHandler(instance, "tsim")
  1871. instance.handler.call_make_run = True
  1872. elif self.device_testing:
  1873. instance.handler = DeviceHandler(instance, "device")
  1874. instance.handler.coverage = self.coverage
  1875. elif instance.platform.simulation == "nsim":
  1876. if find_executable("nsimdrv"):
  1877. instance.handler = BinaryHandler(instance, "nsim")
  1878. instance.handler.call_make_run = True
  1879. elif instance.platform.simulation == "mdb-nsim":
  1880. if find_executable("mdb"):
  1881. instance.handler = BinaryHandler(instance, "nsim")
  1882. instance.handler.call_make_run = True
  1883. elif instance.platform.simulation == "armfvp":
  1884. instance.handler = BinaryHandler(instance, "armfvp")
  1885. instance.handler.call_make_run = True
  1886. if instance.handler:
  1887. instance.handler.args = args
  1888. instance.handler.generator_cmd = self.generator_cmd
  1889. instance.handler.generator = self.generator
  1890. def process(self, pipeline, done, message, lock, results):
  1891. op = message.get('op')
  1892. if not self.instance.handler:
  1893. self.setup_handler()
  1894. # The build process, call cmake and build with configured generator
  1895. if op == "cmake":
  1896. res = self.cmake()
  1897. if self.instance.status in ["failed", "error"]:
  1898. pipeline.put({"op": "report", "test": self.instance})
  1899. elif self.cmake_only:
  1900. if self.instance.status is None:
  1901. self.instance.status = "passed"
  1902. pipeline.put({"op": "report", "test": self.instance})
  1903. else:
  1904. if self.instance.name in res['filter'] and res['filter'][self.instance.name]:
  1905. logger.debug("filtering %s" % self.instance.name)
  1906. self.instance.status = "skipped"
  1907. self.instance.reason = "filter"
  1908. results.skipped_runtime += 1
  1909. for case in self.instance.testcase.cases:
  1910. self.instance.results.update({case: 'SKIP'})
  1911. pipeline.put({"op": "report", "test": self.instance})
  1912. else:
  1913. pipeline.put({"op": "build", "test": self.instance})
  1914. elif op == "build":
  1915. logger.debug("build test: %s" % self.instance.name)
  1916. res = self.build()
  1917. if not res:
  1918. self.instance.status = "error"
  1919. self.instance.reason = "Build Failure"
  1920. pipeline.put({"op": "report", "test": self.instance})
  1921. else:
  1922. # Count skipped cases during build, for example
  1923. # due to ram/rom overflow.
  1924. inst = res.get("instance", None)
  1925. if inst and inst.status == "skipped":
  1926. results.skipped_runtime += 1
  1927. if res.get('returncode', 1) > 0:
  1928. pipeline.put({"op": "report", "test": self.instance})
  1929. else:
  1930. if self.instance.run and self.instance.handler:
  1931. pipeline.put({"op": "run", "test": self.instance})
  1932. else:
  1933. pipeline.put({"op": "report", "test": self.instance})
  1934. # Run the generated binary using one of the supported handlers
  1935. elif op == "run":
  1936. logger.debug("run test: %s" % self.instance.name)
  1937. self.run()
  1938. self.instance.status, _ = self.instance.handler.get_state()
  1939. logger.debug(f"run status: {self.instance.name} {self.instance.status}")
  1940. # to make it work with pickle
  1941. self.instance.handler.thread = None
  1942. self.instance.handler.suite = None
  1943. pipeline.put({
  1944. "op": "report",
  1945. "test": self.instance,
  1946. "status": self.instance.status,
  1947. "reason": self.instance.reason
  1948. }
  1949. )
  1950. # Report results and output progress to screen
  1951. elif op == "report":
  1952. with lock:
  1953. done.put(self.instance)
  1954. self.report_out(results)
  1955. if self.cleanup and not self.coverage and self.instance.status == "passed":
  1956. pipeline.put({
  1957. "op": "cleanup",
  1958. "test": self.instance
  1959. })
  1960. elif op == "cleanup":
  1961. if self.device_testing:
  1962. self.cleanup_device_testing_artifacts()
  1963. else:
  1964. self.cleanup_artifacts()
  1965. def cleanup_artifacts(self, additional_keep=[]):
  1966. logger.debug("Cleaning up {}".format(self.instance.build_dir))
  1967. allow = [
  1968. 'zephyr/.config',
  1969. 'handler.log',
  1970. 'build.log',
  1971. 'device.log',
  1972. 'recording.csv',
  1973. ]
  1974. allow += additional_keep
  1975. allow = [os.path.join(self.instance.build_dir, file) for file in allow]
  1976. for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
  1977. for name in filenames:
  1978. path = os.path.join(dirpath, name)
  1979. if path not in allow:
  1980. os.remove(path)
  1981. # Remove empty directories and symbolic links to directories
  1982. for dir in dirnames:
  1983. path = os.path.join(dirpath, dir)
  1984. if os.path.islink(path):
  1985. os.remove(path)
  1986. elif not os.listdir(path):
  1987. os.rmdir(path)
  1988. def cleanup_device_testing_artifacts(self):
  1989. logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir))
  1990. sanitizelist = [
  1991. 'CMakeCache.txt',
  1992. 'zephyr/runners.yaml',
  1993. ]
  1994. keep = [
  1995. 'zephyr/zephyr.hex',
  1996. 'zephyr/zephyr.bin',
  1997. 'zephyr/zephyr.elf',
  1998. ]
  1999. keep += sanitizelist
  2000. self.cleanup_artifacts(keep)
  2001. # sanitize paths so files are relocatable
  2002. for file in sanitizelist:
  2003. file = os.path.join(self.instance.build_dir, file)
  2004. with open(file, "rt") as fin:
  2005. data = fin.read()
  2006. data = data.replace(canonical_zephyr_base+"/", "")
  2007. with open(file, "wt") as fin:
  2008. fin.write(data)
  2009. def report_out(self, results):
  2010. total_to_do = results.total - results.skipped_configs
  2011. total_tests_width = len(str(total_to_do))
  2012. results.done += 1
  2013. instance = self.instance
  2014. if instance.status in ["error", "failed", "timeout", "flash_error"]:
  2015. if instance.status == "error":
  2016. results.error += 1
  2017. results.failed += 1
  2018. if self.verbose:
  2019. status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
  2020. else:
  2021. print("")
  2022. logger.error(
  2023. "{:<25} {:<50} {}FAILED{}: {}".format(
  2024. instance.platform.name,
  2025. instance.testcase.name,
  2026. Fore.RED,
  2027. Fore.RESET,
  2028. instance.reason))
  2029. if not self.verbose:
  2030. self.log_info_file(self.inline_logs)
  2031. elif instance.status == "skipped":
  2032. status = Fore.YELLOW + "SKIPPED" + Fore.RESET
  2033. elif instance.status == "passed":
  2034. status = Fore.GREEN + "PASSED" + Fore.RESET
  2035. else:
  2036. logger.debug(f"Unknown status = {instance.status}")
  2037. status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
  2038. if self.verbose:
  2039. if self.cmake_only:
  2040. more_info = "cmake"
  2041. elif instance.status == "skipped":
  2042. more_info = instance.reason
  2043. else:
  2044. if instance.handler and instance.run:
  2045. more_info = instance.handler.type_str
  2046. htime = instance.handler.duration
  2047. if htime:
  2048. more_info += " {:.3f}s".format(htime)
  2049. else:
  2050. more_info = "build"
  2051. logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
  2052. results.done, total_tests_width, total_to_do, instance.platform.name,
  2053. instance.testcase.name, status, more_info))
  2054. if instance.status in ["error", "failed", "timeout"]:
  2055. self.log_info_file(self.inline_logs)
  2056. else:
  2057. completed_perc = 0
  2058. if total_to_do > 0:
  2059. completed_perc = int((float(results.done) / total_to_do) * 100)
  2060. skipped = results.skipped_configs + results.skipped_runtime
  2061. sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
  2062. Fore.GREEN,
  2063. results.done,
  2064. total_to_do,
  2065. Fore.RESET,
  2066. completed_perc,
  2067. Fore.YELLOW if skipped > 0 else Fore.RESET,
  2068. skipped,
  2069. Fore.RESET,
  2070. Fore.RED if results.failed > 0 else Fore.RESET,
  2071. results.failed,
  2072. Fore.RESET
  2073. )
  2074. )
  2075. sys.stdout.flush()
  2076. def cmake(self):
  2077. instance = self.instance
  2078. args = self.testcase.extra_args[:]
  2079. args += self.extra_args
  2080. if instance.handler:
  2081. args += instance.handler.args
  2082. # merge overlay files into one variable
  2083. def extract_overlays(args):
  2084. re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
  2085. other_args = []
  2086. overlays = []
  2087. for arg in args:
  2088. match = re_overlay.search(arg)
  2089. if match:
  2090. overlays.append(match.group(1).strip('\'"'))
  2091. else:
  2092. other_args.append(arg)
  2093. args[:] = other_args
  2094. return overlays
  2095. overlays = extract_overlays(args)
  2096. if os.path.exists(os.path.join(instance.build_dir,
  2097. "twister", "testcase_extra.conf")):
  2098. overlays.append(os.path.join(instance.build_dir,
  2099. "twister", "testcase_extra.conf"))
  2100. if overlays:
  2101. args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
  2102. res = self.run_cmake(args)
  2103. return res
  2104. def build(self):
  2105. res = self.run_build(['--build', self.build_dir])
  2106. return res
  2107. def run(self):
  2108. instance = self.instance
  2109. if instance.handler:
  2110. if instance.handler.type_str == "device":
  2111. instance.handler.suite = self.suite
  2112. instance.handler.handle()
  2113. sys.stdout.flush()
  2114. class TestSuite(DisablePyTestCollectionMixin):
  2115. config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
  2116. dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
  2117. tc_schema = scl.yaml_load(
  2118. os.path.join(ZEPHYR_BASE,
  2119. "scripts", "schemas", "twister", "testcase-schema.yaml"))
  2120. quarantine_schema = scl.yaml_load(
  2121. os.path.join(ZEPHYR_BASE,
  2122. "scripts", "schemas", "twister", "quarantine-schema.yaml"))
  2123. testcase_valid_keys = {"tags": {"type": "set", "required": False},
  2124. "type": {"type": "str", "default": "integration"},
  2125. "extra_args": {"type": "list"},
  2126. "extra_configs": {"type": "list"},
  2127. "build_only": {"type": "bool", "default": False},
  2128. "build_on_all": {"type": "bool", "default": False},
  2129. "skip": {"type": "bool", "default": False},
  2130. "slow": {"type": "bool", "default": False},
  2131. "timeout": {"type": "int", "default": 60},
  2132. "min_ram": {"type": "int", "default": 8},
  2133. "depends_on": {"type": "set"},
  2134. "min_flash": {"type": "int", "default": 32},
  2135. "arch_allow": {"type": "set"},
  2136. "arch_exclude": {"type": "set"},
  2137. "extra_sections": {"type": "list", "default": []},
  2138. "integration_platforms": {"type": "list", "default": []},
  2139. "platform_exclude": {"type": "set"},
  2140. "platform_allow": {"type": "set"},
  2141. "toolchain_exclude": {"type": "set"},
  2142. "toolchain_allow": {"type": "set"},
  2143. "filter": {"type": "str"},
  2144. "harness": {"type": "str"},
  2145. "harness_config": {"type": "map", "default": {}}
  2146. }
  2147. RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "release",
  2148. "twister_last_release.csv")
  2149. SAMPLE_FILENAME = 'sample.yaml'
  2150. TESTCASE_FILENAME = 'testcase.yaml'
  2151. def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
  2152. self.roots = testcase_roots
  2153. if not isinstance(board_root_list, list):
  2154. self.board_roots = [board_root_list]
  2155. else:
  2156. self.board_roots = board_root_list
  2157. # Testsuite Options
  2158. self.coverage_platform = []
  2159. self.build_only = False
  2160. self.cmake_only = False
  2161. self.cleanup = False
  2162. self.enable_slow = False
  2163. self.device_testing = False
  2164. self.fixtures = []
  2165. self.enable_coverage = False
  2166. self.enable_ubsan = False
  2167. self.enable_lsan = False
  2168. self.enable_asan = False
  2169. self.enable_valgrind = False
  2170. self.extra_args = []
  2171. self.inline_logs = False
  2172. self.enable_sizes_report = False
  2173. self.west_flash = None
  2174. self.west_runner = None
  2175. self.generator = None
  2176. self.generator_cmd = None
  2177. self.warnings_as_errors = True
  2178. self.overflow_as_errors = False
  2179. self.quarantine_verify = False
  2180. # Keep track of which test cases we've filtered out and why
  2181. self.testcases = {}
  2182. self.quarantine = {}
  2183. self.platforms = []
  2184. self.selected_platforms = []
  2185. self.filtered_platforms = []
  2186. self.default_platforms = []
  2187. self.outdir = os.path.abspath(outdir)
  2188. self.discards = {}
  2189. self.load_errors = 0
  2190. self.instances = dict()
  2191. self.total_platforms = 0
  2192. self.start_time = 0
  2193. self.duration = 0
  2194. self.warnings = 0
  2195. # hardcoded for now
  2196. self.duts = []
  2197. # run integration tests only
  2198. self.integration = False
  2199. self.pipeline = None
  2200. self.version = "NA"
  2201. def check_zephyr_version(self):
  2202. try:
  2203. subproc = subprocess.run(["git", "describe", "--abbrev=12"],
  2204. stdout=subprocess.PIPE,
  2205. universal_newlines=True,
  2206. cwd=ZEPHYR_BASE)
  2207. if subproc.returncode == 0:
  2208. self.version = subproc.stdout.strip()
  2209. logger.info(f"Zephyr version: {self.version}")
  2210. except OSError:
  2211. logger.info("Cannot read zephyr version.")
  2212. def get_platform_instances(self, platform):
  2213. filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + "/")}
  2214. return filtered_dict
  2215. def config(self):
  2216. logger.info("coverage platform: {}".format(self.coverage_platform))
  2217. # Debug Functions
  2218. @staticmethod
  2219. def info(what):
  2220. sys.stdout.write(what + "\n")
  2221. sys.stdout.flush()
  2222. def update_counting(self, results=None, initial=False):
  2223. results.skipped_configs = 0
  2224. results.skipped_cases = 0
  2225. for instance in self.instances.values():
  2226. if initial:
  2227. results.cases += len(instance.testcase.cases)
  2228. if instance.status == 'skipped':
  2229. results.skipped_configs += 1
  2230. results.skipped_cases += len(instance.testcase.cases)
  2231. elif instance.status == "passed":
  2232. results.passed += 1
  2233. for res in instance.results.values():
  2234. if res == 'SKIP':
  2235. results.skipped_cases += 1
  2236. def compare_metrics(self, filename):
  2237. # name, datatype, lower results better
  2238. interesting_metrics = [("ram_size", int, True),
  2239. ("rom_size", int, True)]
  2240. if not os.path.exists(filename):
  2241. logger.error("Cannot compare metrics, %s not found" % filename)
  2242. return []
  2243. results = []
  2244. saved_metrics = {}
  2245. with open(filename) as fp:
  2246. cr = csv.DictReader(fp)
  2247. for row in cr:
  2248. d = {}
  2249. for m, _, _ in interesting_metrics:
  2250. d[m] = row[m]
  2251. saved_metrics[(row["test"], row["platform"])] = d
  2252. for instance in self.instances.values():
  2253. mkey = (instance.testcase.name, instance.platform.name)
  2254. if mkey not in saved_metrics:
  2255. continue
  2256. sm = saved_metrics[mkey]
  2257. for metric, mtype, lower_better in interesting_metrics:
  2258. if metric not in instance.metrics:
  2259. continue
  2260. if sm[metric] == "":
  2261. continue
  2262. delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
  2263. if delta == 0:
  2264. continue
  2265. results.append((instance, metric, instance.metrics.get(metric, 0), delta,
  2266. lower_better))
  2267. return results
  2268. def footprint_reports(self, report, show_footprint, all_deltas,
  2269. footprint_threshold, last_metrics):
  2270. if not report:
  2271. return
  2272. logger.debug("running footprint_reports")
  2273. deltas = self.compare_metrics(report)
  2274. warnings = 0
  2275. if deltas and show_footprint:
  2276. for i, metric, value, delta, lower_better in deltas:
  2277. if not all_deltas and ((delta < 0 and lower_better) or
  2278. (delta > 0 and not lower_better)):
  2279. continue
  2280. percentage = 0
  2281. if value > delta:
  2282. percentage = (float(delta) / float(value - delta))
  2283. if not all_deltas and (percentage < (footprint_threshold / 100.0)):
  2284. continue
  2285. logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
  2286. i.platform.name, i.testcase.name, Fore.YELLOW,
  2287. "INFO" if all_deltas else "WARNING", Fore.RESET,
  2288. metric, delta, value, percentage))
  2289. warnings += 1
  2290. if warnings:
  2291. logger.warning("Deltas based on metrics from last %s" %
  2292. ("release" if not last_metrics else "run"))
  2293. def summary(self, results, unrecognized_sections):
  2294. failed = 0
  2295. run = 0
  2296. for instance in self.instances.values():
  2297. if instance.status == "failed":
  2298. failed += 1
  2299. elif instance.metrics.get("unrecognized") and not unrecognized_sections:
  2300. logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
  2301. (Fore.RED, Fore.RESET, instance.name,
  2302. str(instance.metrics.get("unrecognized", []))))
  2303. failed += 1
  2304. if instance.metrics.get('handler_time', None):
  2305. run += 1
  2306. if results.total and results.total != results.skipped_configs:
  2307. pass_rate = (float(results.passed) / float(results.total - results.skipped_configs))
  2308. else:
  2309. pass_rate = 0
  2310. logger.info(
  2311. "{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
  2312. Fore.RED if failed else Fore.GREEN,
  2313. results.passed,
  2314. results.total - results.skipped_configs,
  2315. Fore.RESET,
  2316. pass_rate,
  2317. Fore.RED if results.failed else Fore.RESET,
  2318. results.failed,
  2319. Fore.RESET,
  2320. results.skipped_configs,
  2321. Fore.YELLOW if self.warnings else Fore.RESET,
  2322. self.warnings,
  2323. Fore.RESET,
  2324. self.duration))
  2325. self.total_platforms = len(self.platforms)
  2326. # if we are only building, do not report about tests being executed.
  2327. if self.platforms and not self.build_only:
  2328. logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format(
  2329. results.cases - results.skipped_cases,
  2330. results.skipped_cases,
  2331. len(self.filtered_platforms),
  2332. self.total_platforms,
  2333. (100 * len(self.filtered_platforms) / len(self.platforms))
  2334. ))
  2335. logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \
  2336. {Fore.RED}{results.total - run - results.skipped_configs}{Fore.RESET} test configurations were only built.")
  2337. def save_reports(self, name, suffix, report_dir, no_update, release, only_failed, platform_reports, json_report):
  2338. if not self.instances:
  2339. return
  2340. logger.info("Saving reports...")
  2341. if name:
  2342. report_name = name
  2343. else:
  2344. report_name = "twister"
  2345. if report_dir:
  2346. os.makedirs(report_dir, exist_ok=True)
  2347. filename = os.path.join(report_dir, report_name)
  2348. outdir = report_dir
  2349. else:
  2350. filename = os.path.join(self.outdir, report_name)
  2351. outdir = self.outdir
  2352. if suffix:
  2353. filename = "{}_{}".format(filename, suffix)
  2354. if not no_update:
  2355. self.xunit_report(filename + ".xml", full_report=False,
  2356. append=only_failed, version=self.version)
  2357. self.xunit_report(filename + "_report.xml", full_report=True,
  2358. append=only_failed, version=self.version)
  2359. self.csv_report(filename + ".csv")
  2360. if json_report:
  2361. self.json_report(filename + ".json", append=only_failed, version=self.version)
  2362. if platform_reports:
  2363. self.target_report(outdir, suffix, append=only_failed)
  2364. if self.discards:
  2365. self.discard_report(filename + "_discard.csv")
  2366. if release:
  2367. self.csv_report(self.RELEASE_DATA)
  2368. def add_configurations(self):
  2369. for board_root in self.board_roots:
  2370. board_root = os.path.abspath(board_root)
  2371. logger.debug("Reading platform configuration files under %s..." %
  2372. board_root)
  2373. for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
  2374. try:
  2375. platform = Platform()
  2376. platform.load(file)
  2377. if platform.name in [p.name for p in self.platforms]:
  2378. logger.error(f"Duplicate platform {platform.name} in {file}")
  2379. raise Exception(f"Duplicate platform identifier {platform.name} found")
  2380. if platform.twister:
  2381. self.platforms.append(platform)
  2382. if platform.default:
  2383. self.default_platforms.append(platform.name)
  2384. except RuntimeError as e:
  2385. logger.error("E: %s: can't load: %s" % (file, e))
  2386. self.load_errors += 1
  2387. def get_all_tests(self):
  2388. tests = []
  2389. for _, tc in self.testcases.items():
  2390. for case in tc.cases:
  2391. tests.append(case)
  2392. return tests
  2393. @staticmethod
  2394. def get_toolchain():
  2395. toolchain_script = Path(ZEPHYR_BASE) / Path('cmake/verify-toolchain.cmake')
  2396. result = CMake.run_cmake_script([toolchain_script, "FORMAT=json"])
  2397. try:
  2398. if result['returncode']:
  2399. raise TwisterRuntimeError(f"E: {result['returnmsg']}")
  2400. except Exception as e:
  2401. print(str(e))
  2402. sys.exit(2)
  2403. toolchain = json.loads(result['stdout'])['ZEPHYR_TOOLCHAIN_VARIANT']
  2404. logger.info(f"Using '{toolchain}' toolchain.")
  2405. return toolchain
  2406. def add_testcases(self, testcase_filter=[]):
  2407. for root in self.roots:
  2408. root = os.path.abspath(root)
  2409. logger.debug("Reading test case configuration files under %s..." % root)
  2410. for dirpath, _, filenames in os.walk(root, topdown=True):
  2411. if self.SAMPLE_FILENAME in filenames:
  2412. filename = self.SAMPLE_FILENAME
  2413. elif self.TESTCASE_FILENAME in filenames:
  2414. filename = self.TESTCASE_FILENAME
  2415. else:
  2416. continue
  2417. logger.debug("Found possible test case in " + dirpath)
  2418. tc_path = os.path.join(dirpath, filename)
  2419. try:
  2420. parsed_data = TwisterConfigParser(tc_path, self.tc_schema)
  2421. parsed_data.load()
  2422. tc_path = os.path.dirname(tc_path)
  2423. workdir = os.path.relpath(tc_path, root)
  2424. for name in parsed_data.tests.keys():
  2425. tc = TestCase(root, workdir, name)
  2426. tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
  2427. tc.source_dir = tc_path
  2428. tc.yamlfile = tc_path
  2429. tc.type = tc_dict["type"]
  2430. tc.tags = tc_dict["tags"]
  2431. tc.extra_args = tc_dict["extra_args"]
  2432. tc.extra_configs = tc_dict["extra_configs"]
  2433. tc.arch_allow = tc_dict["arch_allow"]
  2434. tc.arch_exclude = tc_dict["arch_exclude"]
  2435. tc.skip = tc_dict["skip"]
  2436. tc.platform_exclude = tc_dict["platform_exclude"]
  2437. tc.platform_allow = tc_dict["platform_allow"]
  2438. tc.toolchain_exclude = tc_dict["toolchain_exclude"]
  2439. tc.toolchain_allow = tc_dict["toolchain_allow"]
  2440. tc.tc_filter = tc_dict["filter"]
  2441. tc.timeout = tc_dict["timeout"]
  2442. tc.harness = tc_dict["harness"]
  2443. tc.harness_config = tc_dict["harness_config"]
  2444. if tc.harness == 'console' and not tc.harness_config:
  2445. raise Exception('Harness config error: console harness defined without a configuration.')
  2446. tc.build_only = tc_dict["build_only"]
  2447. tc.build_on_all = tc_dict["build_on_all"]
  2448. tc.slow = tc_dict["slow"]
  2449. tc.min_ram = tc_dict["min_ram"]
  2450. tc.depends_on = tc_dict["depends_on"]
  2451. tc.min_flash = tc_dict["min_flash"]
  2452. tc.extra_sections = tc_dict["extra_sections"]
  2453. tc.integration_platforms = tc_dict["integration_platforms"]
  2454. tc.parse_subcases(tc_path)
  2455. if testcase_filter:
  2456. if tc.name and tc.name in testcase_filter:
  2457. self.testcases[tc.name] = tc
  2458. else:
  2459. self.testcases[tc.name] = tc
  2460. except Exception as e:
  2461. logger.error("%s: can't load (skipping): %s" % (tc_path, e))
  2462. self.load_errors += 1
  2463. return len(self.testcases)
  2464. def get_platform(self, name):
  2465. selected_platform = None
  2466. for platform in self.platforms:
  2467. if platform.name == name:
  2468. selected_platform = platform
  2469. break
  2470. return selected_platform
  2471. def load_quarantine(self, file):
  2472. """
  2473. Loads quarantine list from the given yaml file. Creates a dictionary
  2474. of all tests configurations (platform + scenario: comment) that shall be
  2475. skipped due to quarantine
  2476. """
  2477. # Load yaml into quarantine_yaml
  2478. quarantine_yaml = scl.yaml_load_verify(file, self.quarantine_schema)
  2479. # Create quarantine_list with a product of the listed
  2480. # platforms and scenarios for each entry in quarantine yaml
  2481. quarantine_list = []
  2482. for quar_dict in quarantine_yaml:
  2483. if quar_dict['platforms'][0] == "all":
  2484. plat = [p.name for p in self.platforms]
  2485. else:
  2486. plat = quar_dict['platforms']
  2487. comment = quar_dict.get('comment', "NA")
  2488. quarantine_list.append([{".".join([p, s]): comment}
  2489. for p in plat for s in quar_dict['scenarios']])
  2490. # Flatten the quarantine_list
  2491. quarantine_list = [it for sublist in quarantine_list for it in sublist]
  2492. # Change quarantine_list into a dictionary
  2493. for d in quarantine_list:
  2494. self.quarantine.update(d)
  2495. def load_from_file(self, file, filter_status=[], filter_platform=[]):
  2496. try:
  2497. with open(file, "r") as fp:
  2498. cr = csv.DictReader(fp)
  2499. instance_list = []
  2500. for row in cr:
  2501. if row["status"] in filter_status:
  2502. continue
  2503. test = row["test"]
  2504. platform = self.get_platform(row["platform"])
  2505. if filter_platform and platform.name not in filter_platform:
  2506. continue
  2507. instance = TestInstance(self.testcases[test], platform, self.outdir)
  2508. if self.device_testing:
  2509. tfilter = 'runnable'
  2510. else:
  2511. tfilter = 'buildable'
  2512. instance.run = instance.check_runnable(
  2513. self.enable_slow,
  2514. tfilter,
  2515. self.fixtures
  2516. )
  2517. instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
  2518. instance_list.append(instance)
  2519. self.add_instances(instance_list)
  2520. except KeyError as e:
  2521. logger.error("Key error while parsing tests file.({})".format(str(e)))
  2522. sys.exit(2)
  2523. except FileNotFoundError as e:
  2524. logger.error("Couldn't find input file with list of tests. ({})".format(e))
  2525. sys.exit(2)
  2526. def apply_filters(self, **kwargs):
  2527. toolchain = self.get_toolchain()
  2528. discards = {}
  2529. platform_filter = kwargs.get('platform')
  2530. exclude_platform = kwargs.get('exclude_platform', [])
  2531. testcase_filter = kwargs.get('run_individual_tests', [])
  2532. arch_filter = kwargs.get('arch')
  2533. tag_filter = kwargs.get('tag')
  2534. exclude_tag = kwargs.get('exclude_tag')
  2535. all_filter = kwargs.get('all')
  2536. runnable = kwargs.get('runnable')
  2537. force_toolchain = kwargs.get('force_toolchain')
  2538. force_platform = kwargs.get('force_platform')
  2539. emu_filter = kwargs.get('emulation_only')
  2540. logger.debug("platform filter: " + str(platform_filter))
  2541. logger.debug(" arch_filter: " + str(arch_filter))
  2542. logger.debug(" tag_filter: " + str(tag_filter))
  2543. logger.debug(" exclude_tag: " + str(exclude_tag))
  2544. default_platforms = False
  2545. emulation_platforms = False
  2546. if all_filter:
  2547. logger.info("Selecting all possible platforms per test case")
  2548. # When --all used, any --platform arguments ignored
  2549. platform_filter = []
  2550. elif not platform_filter and not emu_filter:
  2551. logger.info("Selecting default platforms per test case")
  2552. default_platforms = True
  2553. elif emu_filter:
  2554. logger.info("Selecting emulation platforms per test case")
  2555. emulation_platforms = True
  2556. if platform_filter:
  2557. platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
  2558. elif emu_filter:
  2559. platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
  2560. elif arch_filter:
  2561. platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms))
  2562. elif default_platforms:
  2563. platforms = list(filter(lambda p: p.default, self.platforms))
  2564. else:
  2565. platforms = self.platforms
  2566. logger.info("Building initial testcase list...")
  2567. for tc_name, tc in self.testcases.items():
  2568. if tc.build_on_all and not platform_filter:
  2569. platform_scope = self.platforms
  2570. elif tc.integration_platforms and self.integration:
  2571. platform_scope = list(filter(lambda item: item.name in tc.integration_platforms, \
  2572. self.platforms))
  2573. else:
  2574. platform_scope = platforms
  2575. integration = self.integration and tc.integration_platforms
  2576. # If there isn't any overlap between the platform_allow list and the platform_scope
  2577. # we set the scope to the platform_allow list
  2578. if tc.platform_allow and not platform_filter and not integration:
  2579. a = set(platform_scope)
  2580. b = set(filter(lambda item: item.name in tc.platform_allow, self.platforms))
  2581. c = a.intersection(b)
  2582. if not c:
  2583. platform_scope = list(filter(lambda item: item.name in tc.platform_allow, \
  2584. self.platforms))
  2585. # list of instances per testcase, aka configurations.
  2586. instance_list = []
  2587. for plat in platform_scope:
  2588. instance = TestInstance(tc, plat, self.outdir)
  2589. if runnable:
  2590. tfilter = 'runnable'
  2591. else:
  2592. tfilter = 'buildable'
  2593. instance.run = instance.check_runnable(
  2594. self.enable_slow,
  2595. tfilter,
  2596. self.fixtures
  2597. )
  2598. for t in tc.cases:
  2599. instance.results[t] = None
  2600. if runnable and self.duts:
  2601. for h in self.duts:
  2602. if h.platform == plat.name:
  2603. if tc.harness_config.get('fixture') in h.fixtures:
  2604. instance.run = True
  2605. if not force_platform and plat.name in exclude_platform:
  2606. discards[instance] = discards.get(instance, "Platform is excluded on command line.")
  2607. if (plat.arch == "unit") != (tc.type == "unit"):
  2608. # Discard silently
  2609. continue
  2610. if runnable and not instance.run:
  2611. discards[instance] = discards.get(instance, "Not runnable on device")
  2612. if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms:
  2613. discards[instance] = discards.get(instance, "Not part of integration platforms")
  2614. if tc.skip:
  2615. discards[instance] = discards.get(instance, "Skip filter")
  2616. if tag_filter and not tc.tags.intersection(tag_filter):
  2617. discards[instance] = discards.get(instance, "Command line testcase tag filter")
  2618. if exclude_tag and tc.tags.intersection(exclude_tag):
  2619. discards[instance] = discards.get(instance, "Command line testcase exclude filter")
  2620. if testcase_filter and tc_name not in testcase_filter:
  2621. discards[instance] = discards.get(instance, "Testcase name filter")
  2622. if arch_filter and plat.arch not in arch_filter:
  2623. discards[instance] = discards.get(instance, "Command line testcase arch filter")
  2624. if not force_platform:
  2625. if tc.arch_allow and plat.arch not in tc.arch_allow:
  2626. discards[instance] = discards.get(instance, "Not in test case arch allow list")
  2627. if tc.arch_exclude and plat.arch in tc.arch_exclude:
  2628. discards[instance] = discards.get(instance, "In test case arch exclude")
  2629. if tc.platform_exclude and plat.name in tc.platform_exclude:
  2630. discards[instance] = discards.get(instance, "In test case platform exclude")
  2631. if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
  2632. discards[instance] = discards.get(instance, "In test case toolchain exclude")
  2633. if platform_filter and plat.name not in platform_filter:
  2634. discards[instance] = discards.get(instance, "Command line platform filter")
  2635. if tc.platform_allow and plat.name not in tc.platform_allow:
  2636. discards[instance] = discards.get(instance, "Not in testcase platform allow list")
  2637. if tc.toolchain_allow and toolchain not in tc.toolchain_allow:
  2638. discards[instance] = discards.get(instance, "Not in testcase toolchain allow list")
  2639. if not plat.env_satisfied:
  2640. discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
  2641. if not force_toolchain \
  2642. and toolchain and (toolchain not in plat.supported_toolchains) \
  2643. and "host" not in plat.supported_toolchains \
  2644. and tc.type != 'unit':
  2645. discards[instance] = discards.get(instance, "Not supported by the toolchain")
  2646. if plat.ram < tc.min_ram:
  2647. discards[instance] = discards.get(instance, "Not enough RAM")
  2648. if tc.depends_on:
  2649. dep_intersection = tc.depends_on.intersection(set(plat.supported))
  2650. if dep_intersection != set(tc.depends_on):
  2651. discards[instance] = discards.get(instance, "No hardware support")
  2652. if plat.flash < tc.min_flash:
  2653. discards[instance] = discards.get(instance, "Not enough FLASH")
  2654. if set(plat.ignore_tags) & tc.tags:
  2655. discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
  2656. if plat.only_tags and not set(plat.only_tags) & tc.tags:
  2657. discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
  2658. test_configuration = ".".join([instance.platform.name,
  2659. instance.testcase.id])
  2660. # skip quarantined tests
  2661. if test_configuration in self.quarantine and not self.quarantine_verify:
  2662. discards[instance] = discards.get(instance,
  2663. f"Quarantine: {self.quarantine[test_configuration]}")
  2664. # run only quarantined test to verify their statuses (skip everything else)
  2665. if self.quarantine_verify and test_configuration not in self.quarantine:
  2666. discards[instance] = discards.get(instance, "Not under quarantine")
  2667. # if nothing stopped us until now, it means this configuration
  2668. # needs to be added.
  2669. instance_list.append(instance)
  2670. # no configurations, so jump to next testcase
  2671. if not instance_list:
  2672. continue
  2673. # if twister was launched with no platform options at all, we
  2674. # take all default platforms
  2675. if default_platforms and not tc.build_on_all and not integration:
  2676. if tc.platform_allow:
  2677. a = set(self.default_platforms)
  2678. b = set(tc.platform_allow)
  2679. c = a.intersection(b)
  2680. if c:
  2681. aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
  2682. self.add_instances(aa)
  2683. else:
  2684. self.add_instances(instance_list)
  2685. else:
  2686. instances = list(filter(lambda tc: tc.platform.default, instance_list))
  2687. self.add_instances(instances)
  2688. elif integration:
  2689. instances = list(filter(lambda item: item.platform.name in tc.integration_platforms, instance_list))
  2690. self.add_instances(instances)
  2691. elif emulation_platforms:
  2692. self.add_instances(instance_list)
  2693. for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
  2694. discards[instance] = discards.get(instance, "Not an emulated platform")
  2695. else:
  2696. self.add_instances(instance_list)
  2697. for _, case in self.instances.items():
  2698. case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
  2699. self.discards = discards
  2700. self.selected_platforms = set(p.platform.name for p in self.instances.values())
  2701. for instance in self.discards:
  2702. instance.reason = self.discards[instance]
  2703. # If integration mode is on all skips on integration_platforms are treated as errors.
  2704. if self.integration and instance.platform.name in instance.testcase.integration_platforms \
  2705. and "Quarantine" not in instance.reason:
  2706. instance.status = "error"
  2707. instance.reason += " but is one of the integration platforms"
  2708. instance.fill_results_by_status()
  2709. self.instances[instance.name] = instance
  2710. else:
  2711. instance.status = "skipped"
  2712. instance.fill_results_by_status()
  2713. self.filtered_platforms = set(p.platform.name for p in self.instances.values()
  2714. if p.status != "skipped" )
  2715. return discards
  2716. def add_instances(self, instance_list):
  2717. for instance in instance_list:
  2718. self.instances[instance.name] = instance
  2719. @staticmethod
  2720. def calc_one_elf_size(instance):
  2721. if instance.status not in ["error", "failed", "skipped"]:
  2722. if instance.platform.type != "native":
  2723. size_calc = instance.calculate_sizes()
  2724. instance.metrics["ram_size"] = size_calc.get_ram_size()
  2725. instance.metrics["rom_size"] = size_calc.get_rom_size()
  2726. instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
  2727. else:
  2728. instance.metrics["ram_size"] = 0
  2729. instance.metrics["rom_size"] = 0
  2730. instance.metrics["unrecognized"] = []
  2731. instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
  2732. def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False):
  2733. for instance in self.instances.values():
  2734. if build_only:
  2735. instance.run = False
  2736. if instance.status not in ['passed', 'skipped', 'error']:
  2737. logger.debug(f"adding {instance.name}")
  2738. instance.status = None
  2739. if test_only and instance.run:
  2740. pipeline.put({"op": "run", "test": instance})
  2741. else:
  2742. pipeline.put({"op": "cmake", "test": instance})
  2743. # If the instance got 'error' status before, proceed to the report stage
  2744. if instance.status == "error":
  2745. pipeline.put({"op": "report", "test": instance})
  2746. def pipeline_mgr(self, pipeline, done_queue, lock, results):
  2747. while True:
  2748. try:
  2749. task = pipeline.get_nowait()
  2750. except queue.Empty:
  2751. break
  2752. else:
  2753. test = task['test']
  2754. pb = ProjectBuilder(self,
  2755. test,
  2756. lsan=self.enable_lsan,
  2757. asan=self.enable_asan,
  2758. ubsan=self.enable_ubsan,
  2759. coverage=self.enable_coverage,
  2760. extra_args=self.extra_args,
  2761. device_testing=self.device_testing,
  2762. cmake_only=self.cmake_only,
  2763. cleanup=self.cleanup,
  2764. valgrind=self.enable_valgrind,
  2765. inline_logs=self.inline_logs,
  2766. generator=self.generator,
  2767. generator_cmd=self.generator_cmd,
  2768. verbose=self.verbose,
  2769. warnings_as_errors=self.warnings_as_errors,
  2770. overflow_as_errors=self.overflow_as_errors
  2771. )
  2772. pb.process(pipeline, done_queue, task, lock, results)
  2773. return True
  2774. def execute(self, pipeline, done, results):
  2775. lock = Lock()
  2776. logger.info("Adding tasks to the queue...")
  2777. self.add_tasks_to_queue(pipeline, self.build_only, self.test_only)
  2778. logger.info("Added initial list of jobs to queue")
  2779. processes = []
  2780. for job in range(self.jobs):
  2781. logger.debug(f"Launch process {job}")
  2782. p = Process(target=self.pipeline_mgr, args=(pipeline, done, lock, results, ))
  2783. processes.append(p)
  2784. p.start()
  2785. try:
  2786. for p in processes:
  2787. p.join()
  2788. except KeyboardInterrupt:
  2789. logger.info("Execution interrupted")
  2790. for p in processes:
  2791. p.terminate()
  2792. # FIXME: This needs to move out.
  2793. if self.enable_size_report and not self.cmake_only:
  2794. # Parallelize size calculation
  2795. executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
  2796. futures = [executor.submit(self.calc_one_elf_size, instance)
  2797. for instance in self.instances.values()]
  2798. concurrent.futures.wait(futures)
  2799. else:
  2800. for instance in self.instances.values():
  2801. instance.metrics["ram_size"] = 0
  2802. instance.metrics["rom_size"] = 0
  2803. instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
  2804. instance.metrics["unrecognized"] = []
  2805. return results
  2806. def discard_report(self, filename):
  2807. try:
  2808. if not self.discards:
  2809. raise TwisterRuntimeError("apply_filters() hasn't been run!")
  2810. except Exception as e:
  2811. logger.error(str(e))
  2812. sys.exit(2)
  2813. with open(filename, "wt") as csvfile:
  2814. fieldnames = ["test", "arch", "platform", "reason"]
  2815. cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
  2816. cw.writeheader()
  2817. for instance, reason in sorted(self.discards.items()):
  2818. rowdict = {"test": instance.testcase.name,
  2819. "arch": instance.platform.arch,
  2820. "platform": instance.platform.name,
  2821. "reason": reason}
  2822. cw.writerow(rowdict)
  2823. def target_report(self, outdir, suffix, append=False):
  2824. platforms = {inst.platform.name for _, inst in self.instances.items()}
  2825. for platform in platforms:
  2826. if suffix:
  2827. filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
  2828. else:
  2829. filename = os.path.join(outdir,"{}.xml".format(platform))
  2830. self.xunit_report(filename, platform, full_report=True,
  2831. append=append, version=self.version)
  2832. @staticmethod
  2833. def process_log(log_file):
  2834. filtered_string = ""
  2835. if os.path.exists(log_file):
  2836. with open(log_file, "rb") as f:
  2837. log = f.read().decode("utf-8")
  2838. filtered_string = ''.join(filter(lambda x: x in string.printable, log))
  2839. return filtered_string
  2840. def xunit_report(self, filename, platform=None, full_report=False, append=False, version="NA"):
  2841. total = 0
  2842. fails = passes = errors = skips = 0
  2843. if platform:
  2844. selected = [platform]
  2845. logger.info(f"Writing target report for {platform}...")
  2846. else:
  2847. logger.info(f"Writing xunit report {filename}...")
  2848. selected = self.selected_platforms
  2849. if os.path.exists(filename) and append:
  2850. tree = ET.parse(filename)
  2851. eleTestsuites = tree.getroot()
  2852. else:
  2853. eleTestsuites = ET.Element('testsuites')
  2854. for p in selected:
  2855. inst = self.get_platform_instances(p)
  2856. fails = 0
  2857. passes = 0
  2858. errors = 0
  2859. skips = 0
  2860. duration = 0
  2861. for _, instance in inst.items():
  2862. handler_time = instance.metrics.get('handler_time', 0)
  2863. duration += handler_time
  2864. if full_report and instance.run:
  2865. for k in instance.results.keys():
  2866. if instance.results[k] == 'PASS':
  2867. passes += 1
  2868. elif instance.results[k] == 'BLOCK':
  2869. errors += 1
  2870. elif instance.results[k] == 'SKIP' or instance.status in ['skipped']:
  2871. skips += 1
  2872. else:
  2873. fails += 1
  2874. else:
  2875. if instance.status in ["error", "failed", "timeout", "flash_error"]:
  2876. if instance.reason in ['build_error', 'handler_crash']:
  2877. errors += 1
  2878. else:
  2879. fails += 1
  2880. elif instance.status == 'skipped':
  2881. skips += 1
  2882. elif instance.status == 'passed':
  2883. passes += 1
  2884. else:
  2885. if instance.status:
  2886. logger.error(f"{instance.name}: Unknown status {instance.status}")
  2887. else:
  2888. logger.error(f"{instance.name}: No status")
  2889. total = (errors + passes + fails + skips)
  2890. # do not produce a report if no tests were actually run (only built)
  2891. if total == 0:
  2892. continue
  2893. run = p
  2894. eleTestsuite = None
  2895. # When we re-run the tests, we re-use the results and update only with
  2896. # the newly run tests.
  2897. if os.path.exists(filename) and append:
  2898. ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
  2899. if ts:
  2900. eleTestsuite = ts[0]
  2901. eleTestsuite.attrib['failures'] = "%d" % fails
  2902. eleTestsuite.attrib['errors'] = "%d" % errors
  2903. eleTestsuite.attrib['skipped'] = "%d" % skips
  2904. else:
  2905. logger.info(f"Did not find any existing results for {p}")
  2906. eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
  2907. name=run, time="%f" % duration,
  2908. tests="%d" % (total),
  2909. failures="%d" % fails,
  2910. errors="%d" % (errors), skipped="%s" % (skips))
  2911. eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
  2912. # Multiple 'property' can be added to 'properties'
  2913. # differing by name and value
  2914. ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
  2915. else:
  2916. eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
  2917. name=run, time="%f" % duration,
  2918. tests="%d" % (total),
  2919. failures="%d" % fails,
  2920. errors="%d" % (errors), skipped="%s" % (skips))
  2921. eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
  2922. # Multiple 'property' can be added to 'properties'
  2923. # differing by name and value
  2924. ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
  2925. for _, instance in inst.items():
  2926. if full_report:
  2927. tname = os.path.basename(instance.testcase.name)
  2928. else:
  2929. tname = instance.testcase.id
  2930. handler_time = instance.metrics.get('handler_time', 0)
  2931. if full_report:
  2932. for k in instance.results.keys():
  2933. # remove testcases that are being re-run from exiting reports
  2934. for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
  2935. eleTestsuite.remove(tc)
  2936. classname = ".".join(tname.split(".")[:2])
  2937. eleTestcase = ET.SubElement(
  2938. eleTestsuite, 'testcase',
  2939. classname=classname,
  2940. name="%s" % (k), time="%f" % handler_time)
  2941. if instance.results[k] in ['FAIL', 'BLOCK'] or \
  2942. (not instance.run and instance.status in ["error", "failed", "timeout"]):
  2943. if instance.results[k] == 'FAIL':
  2944. el = ET.SubElement(
  2945. eleTestcase,
  2946. 'failure',
  2947. type="failure",
  2948. message="failed")
  2949. else:
  2950. el = ET.SubElement(
  2951. eleTestcase,
  2952. 'error',
  2953. type="failure",
  2954. message=instance.reason)
  2955. log_root = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
  2956. log_file = os.path.join(log_root, "handler.log")
  2957. el.text = self.process_log(log_file)
  2958. elif instance.results[k] == 'PASS' \
  2959. or (not instance.run and instance.status in ["passed"]):
  2960. pass
  2961. elif instance.results[k] == 'SKIP' or (instance.status in ["skipped"]):
  2962. el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason)
  2963. else:
  2964. el = ET.SubElement(
  2965. eleTestcase,
  2966. 'error',
  2967. type="error",
  2968. message=f"{instance.reason}")
  2969. else:
  2970. if platform:
  2971. classname = ".".join(instance.testcase.name.split(".")[:2])
  2972. else:
  2973. classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
  2974. # remove testcases that are being re-run from exiting reports
  2975. for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"][@name="{instance.testcase.name}"]'):
  2976. eleTestsuite.remove(tc)
  2977. eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
  2978. classname=classname,
  2979. name="%s" % (instance.testcase.name),
  2980. time="%f" % handler_time)
  2981. if instance.status in ["error", "failed", "timeout", "flash_error"]:
  2982. failure = ET.SubElement(
  2983. eleTestcase,
  2984. 'failure',
  2985. type="failure",
  2986. message=instance.reason)
  2987. log_root = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
  2988. bl = os.path.join(log_root, "build.log")
  2989. hl = os.path.join(log_root, "handler.log")
  2990. log_file = bl
  2991. if instance.reason != 'Build error':
  2992. if os.path.exists(hl):
  2993. log_file = hl
  2994. else:
  2995. log_file = bl
  2996. failure.text = self.process_log(log_file)
  2997. elif instance.status == "skipped":
  2998. ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
  2999. result = ET.tostring(eleTestsuites)
  3000. with open(filename, 'wb') as report:
  3001. report.write(result)
  3002. return fails, passes, errors, skips
  3003. def csv_report(self, filename):
  3004. with open(filename, "wt") as csvfile:
  3005. fieldnames = ["test", "arch", "platform", "status",
  3006. "extra_args", "handler", "handler_time", "ram_size",
  3007. "rom_size"]
  3008. cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
  3009. cw.writeheader()
  3010. for instance in self.instances.values():
  3011. rowdict = {"test": instance.testcase.name,
  3012. "arch": instance.platform.arch,
  3013. "platform": instance.platform.name,
  3014. "extra_args": " ".join(instance.testcase.extra_args),
  3015. "handler": instance.platform.simulation}
  3016. rowdict["status"] = instance.status
  3017. if instance.status not in ["error", "failed", "timeout"]:
  3018. if instance.handler:
  3019. rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
  3020. ram_size = instance.metrics.get("ram_size", 0)
  3021. rom_size = instance.metrics.get("rom_size", 0)
  3022. rowdict["ram_size"] = ram_size
  3023. rowdict["rom_size"] = rom_size
  3024. cw.writerow(rowdict)
  3025. def json_report(self, filename, append=False, version="NA"):
  3026. logger.info(f"Writing JSON report {filename}")
  3027. report = {}
  3028. selected = self.selected_platforms
  3029. report["environment"] = {"os": os.name,
  3030. "zephyr_version": version,
  3031. "toolchain": self.get_toolchain()
  3032. }
  3033. json_data = {}
  3034. if os.path.exists(filename) and append:
  3035. with open(filename, 'r') as json_file:
  3036. json_data = json.load(json_file)
  3037. suites = json_data.get("testsuites", [])
  3038. if suites:
  3039. suite = suites[0]
  3040. testcases = suite.get("testcases", [])
  3041. else:
  3042. suite = {}
  3043. testcases = []
  3044. for p in selected:
  3045. inst = self.get_platform_instances(p)
  3046. for _, instance in inst.items():
  3047. testcase = {}
  3048. handler_log = os.path.join(instance.build_dir, "handler.log")
  3049. build_log = os.path.join(instance.build_dir, "build.log")
  3050. device_log = os.path.join(instance.build_dir, "device.log")
  3051. handler_time = instance.metrics.get('handler_time', 0)
  3052. ram_size = instance.metrics.get ("ram_size", 0)
  3053. rom_size = instance.metrics.get("rom_size",0)
  3054. for k in instance.results.keys():
  3055. testcases = list(filter(lambda d: not (d.get('testcase') == k and d.get('platform') == p), testcases ))
  3056. testcase = {"testcase": k,
  3057. "arch": instance.platform.arch,
  3058. "platform": p,
  3059. }
  3060. if ram_size:
  3061. testcase["ram_size"] = ram_size
  3062. if rom_size:
  3063. testcase["rom_size"] = rom_size
  3064. if instance.results[k] in ["PASS"] or instance.status == 'passed':
  3065. testcase["status"] = "passed"
  3066. if instance.handler:
  3067. testcase["execution_time"] = handler_time
  3068. elif instance.results[k] in ['FAIL', 'BLOCK'] or instance.status in ["error", "failed", "timeout", "flash_error"]:
  3069. testcase["status"] = "failed"
  3070. testcase["reason"] = instance.reason
  3071. testcase["execution_time"] = handler_time
  3072. if os.path.exists(handler_log):
  3073. testcase["test_output"] = self.process_log(handler_log)
  3074. elif os.path.exists(device_log):
  3075. testcase["device_log"] = self.process_log(device_log)
  3076. else:
  3077. testcase["build_log"] = self.process_log(build_log)
  3078. elif instance.status == 'skipped':
  3079. testcase["status"] = "skipped"
  3080. testcase["reason"] = instance.reason
  3081. testcases.append(testcase)
  3082. suites = [ {"testcases": testcases} ]
  3083. report["testsuites"] = suites
  3084. with open(filename, "wt") as json_file:
  3085. json.dump(report, json_file, indent=4, separators=(',',':'))
  3086. def get_testcase(self, identifier):
  3087. results = []
  3088. for _, tc in self.testcases.items():
  3089. for case in tc.cases:
  3090. if case == identifier:
  3091. results.append(tc)
  3092. return results
  3093. class CoverageTool:
  3094. """ Base class for every supported coverage tool
  3095. """
  3096. def __init__(self):
  3097. self.gcov_tool = None
  3098. self.base_dir = None
  3099. @staticmethod
  3100. def factory(tool):
  3101. if tool == 'lcov':
  3102. t = Lcov()
  3103. elif tool == 'gcovr':
  3104. t = Gcovr()
  3105. else:
  3106. logger.error("Unsupported coverage tool specified: {}".format(tool))
  3107. return None
  3108. logger.debug(f"Select {tool} as the coverage tool...")
  3109. return t
  3110. @staticmethod
  3111. def retrieve_gcov_data(input_file):
  3112. logger.debug("Working on %s" % input_file)
  3113. extracted_coverage_info = {}
  3114. capture_data = False
  3115. capture_complete = False
  3116. with open(input_file, 'r') as fp:
  3117. for line in fp.readlines():
  3118. if re.search("GCOV_COVERAGE_DUMP_START", line):
  3119. capture_data = True
  3120. continue
  3121. if re.search("GCOV_COVERAGE_DUMP_END", line):
  3122. capture_complete = True
  3123. break
  3124. # Loop until the coverage data is found.
  3125. if not capture_data:
  3126. continue
  3127. if line.startswith("*"):
  3128. sp = line.split("<")
  3129. if len(sp) > 1:
  3130. # Remove the leading delimiter "*"
  3131. file_name = sp[0][1:]
  3132. # Remove the trailing new line char
  3133. hex_dump = sp[1][:-1]
  3134. else:
  3135. continue
  3136. else:
  3137. continue
  3138. extracted_coverage_info.update({file_name: hex_dump})
  3139. if not capture_data:
  3140. capture_complete = True
  3141. return {'complete': capture_complete, 'data': extracted_coverage_info}
  3142. @staticmethod
  3143. def create_gcda_files(extracted_coverage_info):
  3144. logger.debug("Generating gcda files")
  3145. for filename, hexdump_val in extracted_coverage_info.items():
  3146. # if kobject_hash is given for coverage gcovr fails
  3147. # hence skipping it problem only in gcovr v4.1
  3148. if "kobject_hash" in filename:
  3149. filename = (filename[:-4]) + "gcno"
  3150. try:
  3151. os.remove(filename)
  3152. except Exception:
  3153. pass
  3154. continue
  3155. with open(filename, 'wb') as fp:
  3156. fp.write(bytes.fromhex(hexdump_val))
  3157. def generate(self, outdir):
  3158. for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
  3159. gcov_data = self.__class__.retrieve_gcov_data(filename)
  3160. capture_complete = gcov_data['complete']
  3161. extracted_coverage_info = gcov_data['data']
  3162. if capture_complete:
  3163. self.__class__.create_gcda_files(extracted_coverage_info)
  3164. logger.debug("Gcov data captured: {}".format(filename))
  3165. else:
  3166. logger.error("Gcov data capture incomplete: {}".format(filename))
  3167. with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
  3168. ret = self._generate(outdir, coveragelog)
  3169. if ret == 0:
  3170. logger.info("HTML report generated: {}".format(
  3171. os.path.join(outdir, "coverage", "index.html")))
  3172. class Lcov(CoverageTool):
  3173. def __init__(self):
  3174. super().__init__()
  3175. self.ignores = []
  3176. def add_ignore_file(self, pattern):
  3177. self.ignores.append('*' + pattern + '*')
  3178. def add_ignore_directory(self, pattern):
  3179. self.ignores.append('*/' + pattern + '/*')
  3180. def _generate(self, outdir, coveragelog):
  3181. coveragefile = os.path.join(outdir, "coverage.info")
  3182. ztestfile = os.path.join(outdir, "ztest.info")
  3183. cmd = ["lcov", "--gcov-tool", self.gcov_tool,
  3184. "--capture", "--directory", outdir,
  3185. "--rc", "lcov_branch_coverage=1",
  3186. "--output-file", coveragefile]
  3187. cmd_str = " ".join(cmd)
  3188. logger.debug(f"Running {cmd_str}...")
  3189. subprocess.call(cmd, stdout=coveragelog)
  3190. # We want to remove tests/* and tests/ztest/test/* but save tests/ztest
  3191. subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
  3192. coveragefile,
  3193. os.path.join(self.base_dir, "tests", "ztest", "*"),
  3194. "--output-file", ztestfile,
  3195. "--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
  3196. if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
  3197. subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
  3198. ztestfile,
  3199. os.path.join(self.base_dir, "tests/ztest/test/*"),
  3200. "--output-file", ztestfile,
  3201. "--rc", "lcov_branch_coverage=1"],
  3202. stdout=coveragelog)
  3203. files = [coveragefile, ztestfile]
  3204. else:
  3205. files = [coveragefile]
  3206. for i in self.ignores:
  3207. subprocess.call(
  3208. ["lcov", "--gcov-tool", self.gcov_tool, "--remove",
  3209. coveragefile, i, "--output-file",
  3210. coveragefile, "--rc", "lcov_branch_coverage=1"],
  3211. stdout=coveragelog)
  3212. # The --ignore-errors source option is added to avoid it exiting due to
  3213. # samples/application_development/external_lib/
  3214. return subprocess.call(["genhtml", "--legend", "--branch-coverage",
  3215. "--ignore-errors", "source",
  3216. "-output-directory",
  3217. os.path.join(outdir, "coverage")] + files,
  3218. stdout=coveragelog)
  3219. class Gcovr(CoverageTool):
  3220. def __init__(self):
  3221. super().__init__()
  3222. self.ignores = []
  3223. def add_ignore_file(self, pattern):
  3224. self.ignores.append('.*' + pattern + '.*')
  3225. def add_ignore_directory(self, pattern):
  3226. self.ignores.append(".*/" + pattern + '/.*')
  3227. @staticmethod
  3228. def _interleave_list(prefix, list):
  3229. tuple_list = [(prefix, item) for item in list]
  3230. return [item for sublist in tuple_list for item in sublist]
  3231. def _generate(self, outdir, coveragelog):
  3232. coveragefile = os.path.join(outdir, "coverage.json")
  3233. ztestfile = os.path.join(outdir, "ztest.json")
  3234. excludes = Gcovr._interleave_list("-e", self.ignores)
  3235. # We want to remove tests/* and tests/ztest/test/* but save tests/ztest
  3236. cmd = ["gcovr", "-r", self.base_dir, "--gcov-executable",
  3237. self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o",
  3238. coveragefile, outdir]
  3239. cmd_str = " ".join(cmd)
  3240. logger.debug(f"Running {cmd_str}...")
  3241. subprocess.call(cmd, stdout=coveragelog)
  3242. subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
  3243. self.gcov_tool, "-f", "tests/ztest", "-e",
  3244. "tests/ztest/test/*", "--json", "-o", ztestfile,
  3245. outdir], stdout=coveragelog)
  3246. if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
  3247. files = [coveragefile, ztestfile]
  3248. else:
  3249. files = [coveragefile]
  3250. subdir = os.path.join(outdir, "coverage")
  3251. os.makedirs(subdir, exist_ok=True)
  3252. tracefiles = self._interleave_list("--add-tracefile", files)
  3253. return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
  3254. "--html-details"] + tracefiles +
  3255. ["-o", os.path.join(subdir, "index.html")],
  3256. stdout=coveragelog)
  3257. class DUT(object):
  3258. def __init__(self,
  3259. id=None,
  3260. serial=None,
  3261. platform=None,
  3262. product=None,
  3263. serial_pty=None,
  3264. connected=False,
  3265. pre_script=None,
  3266. post_script=None,
  3267. post_flash_script=None,
  3268. runner=None):
  3269. self.serial = serial
  3270. self.platform = platform
  3271. self.serial_pty = serial_pty
  3272. self._counter = Value("i", 0)
  3273. self._available = Value("i", 1)
  3274. self.connected = connected
  3275. self.pre_script = pre_script
  3276. self.id = id
  3277. self.product = product
  3278. self.runner = runner
  3279. self.fixtures = []
  3280. self.post_flash_script = post_flash_script
  3281. self.post_script = post_script
  3282. self.pre_script = pre_script
  3283. self.probe_id = None
  3284. self.notes = None
  3285. self.lock = Lock()
  3286. self.match = False
  3287. @property
  3288. def available(self):
  3289. with self._available.get_lock():
  3290. return self._available.value
  3291. @available.setter
  3292. def available(self, value):
  3293. with self._available.get_lock():
  3294. self._available.value = value
  3295. @property
  3296. def counter(self):
  3297. with self._counter.get_lock():
  3298. return self._counter.value
  3299. @counter.setter
  3300. def counter(self, value):
  3301. with self._counter.get_lock():
  3302. self._counter.value = value
  3303. def to_dict(self):
  3304. d = {}
  3305. exclude = ['_available', '_counter', 'match']
  3306. v = vars(self)
  3307. for k in v.keys():
  3308. if k not in exclude and v[k]:
  3309. d[k] = v[k]
  3310. return d
  3311. def __repr__(self):
  3312. return f"<{self.platform} ({self.product}) on {self.serial}>"
  3313. class HardwareMap:
  3314. schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "hwmap-schema.yaml")
  3315. manufacturer = [
  3316. 'ARM',
  3317. 'SEGGER',
  3318. 'MBED',
  3319. 'STMicroelectronics',
  3320. 'Atmel Corp.',
  3321. 'Texas Instruments',
  3322. 'Silicon Labs',
  3323. 'NXP Semiconductors',
  3324. 'Microchip Technology Inc.',
  3325. 'FTDI',
  3326. 'Digilent'
  3327. ]
  3328. runner_mapping = {
  3329. 'pyocd': [
  3330. 'DAPLink CMSIS-DAP',
  3331. 'MBED CMSIS-DAP'
  3332. ],
  3333. 'jlink': [
  3334. 'J-Link',
  3335. 'J-Link OB'
  3336. ],
  3337. 'openocd': [
  3338. 'STM32 STLink', '^XDS110.*', 'STLINK-V3'
  3339. ],
  3340. 'dediprog': [
  3341. 'TTL232R-3V3',
  3342. 'MCP2200 USB Serial Port Emulator'
  3343. ]
  3344. }
  3345. def __init__(self):
  3346. self.detected = []
  3347. self.duts = []
  3348. def add_device(self, serial, platform, pre_script, is_pty):
  3349. device = DUT(platform=platform, connected=True, pre_script=pre_script)
  3350. if is_pty:
  3351. device.serial_pty = serial
  3352. else:
  3353. device.serial = serial
  3354. self.duts.append(device)
  3355. def load(self, map_file):
  3356. hwm_schema = scl.yaml_load(self.schema_path)
  3357. duts = scl.yaml_load_verify(map_file, hwm_schema)
  3358. for dut in duts:
  3359. pre_script = dut.get('pre_script')
  3360. post_script = dut.get('post_script')
  3361. post_flash_script = dut.get('post_flash_script')
  3362. platform = dut.get('platform')
  3363. id = dut.get('id')
  3364. runner = dut.get('runner')
  3365. serial = dut.get('serial')
  3366. product = dut.get('product')
  3367. fixtures = dut.get('fixtures', [])
  3368. new_dut = DUT(platform=platform,
  3369. product=product,
  3370. runner=runner,
  3371. id=id,
  3372. serial=serial,
  3373. connected=serial is not None,
  3374. pre_script=pre_script,
  3375. post_script=post_script,
  3376. post_flash_script=post_flash_script)
  3377. new_dut.fixtures = fixtures
  3378. new_dut.counter = 0
  3379. self.duts.append(new_dut)
  3380. def scan(self, persistent=False):
  3381. from serial.tools import list_ports
  3382. if persistent and platform.system() == 'Linux':
  3383. # On Linux, /dev/serial/by-id provides symlinks to
  3384. # '/dev/ttyACMx' nodes using names which are unique as
  3385. # long as manufacturers fill out USB metadata nicely.
  3386. #
  3387. # This creates a map from '/dev/ttyACMx' device nodes
  3388. # to '/dev/serial/by-id/usb-...' symlinks. The symlinks
  3389. # go into the hardware map because they stay the same
  3390. # even when the user unplugs / replugs the device.
  3391. #
  3392. # Some inexpensive USB/serial adapters don't result
  3393. # in unique names here, though, so use of this feature
  3394. # requires explicitly setting persistent=True.
  3395. by_id = Path('/dev/serial/by-id')
  3396. def readlink(link):
  3397. return str((by_id / link).resolve())
  3398. persistent_map = {readlink(link): str(link)
  3399. for link in by_id.iterdir()}
  3400. else:
  3401. persistent_map = {}
  3402. serial_devices = list_ports.comports()
  3403. logger.info("Scanning connected hardware...")
  3404. for d in serial_devices:
  3405. if d.manufacturer in self.manufacturer:
  3406. # TI XDS110 can have multiple serial devices for a single board
  3407. # assume endpoint 0 is the serial, skip all others
  3408. if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
  3409. continue
  3410. s_dev = DUT(platform="unknown",
  3411. id=d.serial_number,
  3412. serial=persistent_map.get(d.device, d.device),
  3413. product=d.product,
  3414. runner='unknown',
  3415. connected=True)
  3416. for runner, _ in self.runner_mapping.items():
  3417. products = self.runner_mapping.get(runner)
  3418. if d.product in products:
  3419. s_dev.runner = runner
  3420. continue
  3421. # Try regex matching
  3422. for p in products:
  3423. if re.match(p, d.product):
  3424. s_dev.runner = runner
  3425. s_dev.connected = True
  3426. self.detected.append(s_dev)
  3427. else:
  3428. logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
  3429. def save(self, hwm_file):
  3430. # use existing map
  3431. self.detected.sort(key=lambda x: x.serial or '')
  3432. if os.path.exists(hwm_file):
  3433. with open(hwm_file, 'r') as yaml_file:
  3434. hwm = yaml.load(yaml_file, Loader=SafeLoader)
  3435. if hwm:
  3436. hwm.sort(key=lambda x: x['serial'] or '')
  3437. # disconnect everything
  3438. for h in hwm:
  3439. h['connected'] = False
  3440. h['serial'] = None
  3441. for _detected in self.detected:
  3442. for h in hwm:
  3443. if _detected.id == h['id'] and _detected.product == h['product'] and not _detected.match:
  3444. h['connected'] = True
  3445. h['serial'] = _detected.serial
  3446. _detected.match = True
  3447. new_duts = list(filter(lambda d: not d.match, self.detected))
  3448. new = []
  3449. for d in new_duts:
  3450. new.append(d.to_dict())
  3451. if hwm:
  3452. hwm = hwm + new
  3453. else:
  3454. hwm = new
  3455. with open(hwm_file, 'w') as yaml_file:
  3456. yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
  3457. self.load(hwm_file)
  3458. logger.info("Registered devices:")
  3459. self.dump()
  3460. else:
  3461. # create new file
  3462. dl = []
  3463. for _connected in self.detected:
  3464. platform = _connected.platform
  3465. id = _connected.id
  3466. runner = _connected.runner
  3467. serial = _connected.serial
  3468. product = _connected.product
  3469. d = {
  3470. 'platform': platform,
  3471. 'id': id,
  3472. 'runner': runner,
  3473. 'serial': serial,
  3474. 'product': product,
  3475. 'connected': _connected.connected
  3476. }
  3477. dl.append(d)
  3478. with open(hwm_file, 'w') as yaml_file:
  3479. yaml.dump(dl, yaml_file, Dumper=Dumper, default_flow_style=False)
  3480. logger.info("Detected devices:")
  3481. self.dump(detected=True)
  3482. def dump(self, filtered=[], header=[], connected_only=False, detected=False):
  3483. print("")
  3484. table = []
  3485. if detected:
  3486. to_show = self.detected
  3487. else:
  3488. to_show = self.duts
  3489. if not header:
  3490. header = ["Platform", "ID", "Serial device"]
  3491. for p in to_show:
  3492. platform = p.platform
  3493. connected = p.connected
  3494. if filtered and platform not in filtered:
  3495. continue
  3496. if not connected_only or connected:
  3497. table.append([platform, p.id, p.serial])
  3498. print(tabulate(table, headers=header, tablefmt="github"))