ÿØÿà JFIF    ÿÛ „  ( %"1!%)+...383,7(-.+  -+++--++++---+-+-----+---------------+---+-++7-----ÿÀ  ß â" ÿÄ     ÿÄ H    !1AQaq"‘¡2B±ÁÑð#R“Ò Tbr‚²á3csƒ’ÂñDS¢³$CÿÄ   ÿÄ %  !1AQa"23‘ÿÚ   ? ôÿ ¨pŸªáÿ —åYõõ\?àÒü©ŠÄï¨pŸªáÿ —åYõõ\?àÓü©ŠÄá 0Ÿªáÿ Ÿå[úƒ ú®ði~TÁbqÐ8OÕpÿ ƒOò¤Oè`–RÂáœá™êi€ßÉ< FtŸI“öÌ8úDf´°å}“¾œ6  öFá°y¥jñÇh†ˆ¢ã/ÃÐ:ªcÈ "Y¡ðÑl>ÿ ”ÏËte:qž\oäŠe÷󲍷˜HT4&ÿ ÓÐü6ö®¿øþßèô Ÿ•7Ñi’•j|“ñì>b…þS?*Óôÿ ÓÐü*h¥£ír¶ü UãS炟[AÐaè[ûª•õ&õj?†Éö+EzP—WeÒírJFt ‘BŒ†Ï‡%#tE Øz ¥OÛ«!1›üä±Í™%ºÍãö]°î(–:@<‹ŒÊö×òÆt¦ãº+‡¦%ÌÁ²h´OƒJŒtMÜ>ÀÜÊw3Y´•牋4ǍýʏTì>œú=Íwhyë,¾Ôò×õ¿ßÊa»«þˆѪQ|%6ž™A õ%:øj<>É—ÿ Å_ˆCbõ¥š±ý¯Ýƒï…¶|RëócÍf溪“t.СøTÿ *Ä¿-{†çàczůŽ_–^XþŒ±miB[X±d 1,é”zEù»& î9gœf™9Ð'.;—™i}!ôšåîqêÛ٤ёý£½ÆA–àôe"A$˝Úsäÿ ÷Û #°xŸëí(l »ý3—¥5m! rt`†0~'j2(]S¦¦kv,ÚÇ l¦øJA£Šƒ J3E8ÙiŽ:cÉžúeZ°€¯\®kÖ(79«Ž:¯X”¾³Š&¡* ….‰Ž(ÜíŸ2¥ª‡×Hi²TF¤ò[¨íÈRëÉ䢍mgÑ.Ÿ<öäS0í„ǹÁU´f#Vß;Õ–…P@3ío<ä-±»Ž.L|kªÀê›fÂ6@»eu‚|ÓaÞÆŸ…¨ááå>åŠ?cKü6ùTÍÆ”†sĤÚ;H2RÚ†õ\Ö·Ÿn'¾ ñ#ºI¤Å´%çÁ­‚â7›‹qT3Iï¨ÖÚ5I7Ë!ÅOóŸ¶øÝñØôת¦$Tcö‘[«Ö³šÒ';Aþ ¸èíg A2Z"i¸vdÄ÷.iõ®§)¿]¤À†–‡É&ä{V¶iŽ”.Ó×Õÿ û?h¬Mt–íª[ÿ Ñÿ ÌV(í}=ibÔ¡›¥¢±b Lô¥‡piη_Z<‡z§èŒ)iÖwiÇ 2hÙ3·=’d÷8éŽ1¦¸c¤µ€7›7Ø ð\á)} ¹fËí›pAÃL%âc2 í§æQz¿;T8sæ°qø)QFMð‰XŒÂ±N¢aF¨…8¯!U  Z©RÊ ÖPVÄÀÍin™Ì-GˆªÅËŠ›•zË}º±ŽÍFò¹}Uw×#ä5B¤{î}Ð<ÙD é©¤&‡ïDbàÁôMÁ." ¤‡ú*õ'VŽ|¼´Úgllº¼klz[Æüï÷Aób‡Eÿ dÑ»Xx9ÃÜ£ÁT/`¼¸vI±Ýµ·Ë‚“G³þ*Ÿû´r|*}<¨îºœ @¦mÄ’M¹”.œ«Y–|6ÏU¤jç¥ÕÞqO ˜kDÆÁ¨5ÿ š;ÐЦ¦€GÙk \ –Þ=â¼=SͧµªS°ÚÍpÜãQűÀõ¬?ÃÁ1Ñ•õZà?hóœ€ L¦l{Y*K˜Ù›zc˜–ˆâ ø+¾ ­-Ök¥%ùEÜA'}ˆ><ÊIè“bpÍ/qÞâvoX€w,\úªò6Z[XdÒæ­@Ö—€$òJí#é>'°Ú ôª˜<)4ryÙ£|óAÅn5žêŸyÒäMÝ2{"}‰–¤l÷ûWX\l¾Á¸góÉOÔ /óñB¤f¸çñ[.P˜ZsÊË*ßT܈§QN¢’¡¨§V¼(Üù*eÕ“”5T¨‹Âê¥FŒã½Dü[8'Ò¥a…Ú¶k7a *•›¼'Ò·\8¨ª\@\õ¢¦íq+DÙrmÎ…_ªæ»ŠÓœ¡¯’Ré9MÅ×D™lælffc+ŒÑ,ý™ÿ ¯þǤ=Å’Á7µ÷ÚÛ/“Ü€ñýã¼àí¾ÕÑ+ƒ,uµMâÀÄbm:ÒÎPæ{˜Gz[ƒ¯«® KHà`ߨŠéí¯P8Aq.C‰ à€kòpj´kN¶qô€…Õ,ÜNŠª-­{Zö’æû44‰sŽè‰îVíRœÕm" 6?³D9¡ÇTíÅꋇ`4«¸ÝÁô ï’ýorqКÇZ«x4Žâéþuïf¹µö[P ,Q£éaX±`PÉÍZ ¸äYúg üAx ’6Lê‚xÝÓ*äQ  Ï’¨hÍ =²,6ï#rÃ<¯–£»ƒ‹,–ê•€ aÛsñ'%Æ"®ÛüìBᝠHÚ3ß°©$“XnœÖ’î2ËTeûìxîß ¦å¿çÉ ðK§þ{‘t‚Ϋ¬jéîZ[ ”š7L¥4VÚCE×]m¤Øy”ä4-dz£œ§¸x.*ãÊÊ b÷•h:©‡¦s`BTÁRû¾g⻩‹jø sF¢àJøFl‘È•Xᓁà~*j¯ +(ÚÕ6-£¯÷GŠØy‚<Ç’.F‹Hœw(+)ÜÜâÈzÄäT§FߘãÏ;DmVœ3Àu@mÚüXÝü•3B¨òÌÁÛ<·ÃÜ z,Ì@õÅ·d2]ü8s÷IôÞ¯^Ç9¢u„~ëAŸï4«M? K]­ÅàPl@s_ p:°¬ZR”´›JC[CS.h‹ƒïËœ«Æ]–÷ó‚wR×k7X‰k›‘´ù¦=¡«‰¨¨Â')—71ó’c‡Ðúµ `é.{§p¹ój\Ž{1h{o±Ý=áUÊïGÖŒõ–-BÄm+AZX¶¡ ïHðæ¥JmÙ;…䡟ˆ¦ ° äšiÉg«$üMk5¤L“’çÊvïâï ,=f“"íἊ5ô¬x6{ɏžID0e¸vçmi'︧ºð9$ò¹÷*£’9ÿ ²TÔ…×>JV¥}Œ}$p[bÔ®*[jzS*8 ”·T›Í–ñUîƒwo$áè=LT™ç—~ô·¤ÈÚ$榍q‰„+´kFm)ž‹©i–ËqÞŠ‰à¶ü( ‚•§ •°ò·‡#5ª•µÊ﯅¡X¨šÁ*F#TXJÊ ušJVÍ&=iÄs1‚3•'fý§5Ñ<=[íÞ­ PÚ;ѱÌ_~Ä££8rÞ ²w;’hDT°>ÈG¬8Á²ÚzŽ®ò®qZcqJêäÞ-ö[ܘbň±çb“ж31²n×iƒðÕ;1¶þÉ ªX‰,ßqÏ$>•î íZ¥Z 1{ç൵+ƒÕµ¥°T$§K]á»Ûï*·¤tMI’ÂZbŽÕiÒ˜}bÓ0£ª5›¨ [5Ž^ÝœWøÂÝh° ¢OWun£¤5 a2Z.G2³YL]jåtì”ä ÁÓ‘%"©<Ôúʰsº UZvä‡ÄiÆÒM .÷V·™ø#kèýiíÌ–ª)µT[)BˆõÑ xB¾B€ÖT¨.¥~ð@VĶr#¸ü*åZNDŽH;âi ],©£öØpù(šºãö¼T.uCê•4@ÿ GÕÛ)Cx›®0ø#:ÏðFÒbR\(€€Ä®fã4Þ‰Fä¯HXƒÅ,†öEÑÔÜ]Öv²?tLÃvBY£ú6Êu5ÅAQ³1‘’¬x–HŒÐ‡ ^ ¸KwJôÖŽ5×CÚ¨vÜ«/B0$×k°=ðbÇ(Ï)w±A†Á† 11Í=èQšµ626ŒÜ/`G«µ<}—-Ö7KEHÈÉðóȤmݱû±·ø«Snmá=“䫚mݱŸ¡¶~ó·“äUóJæúòB|E LêŽy´jDÔ$G¢þÐñ7óR8ýÒ…Ç› WVe#·Ÿ p·Fx~•ݤF÷0Èÿ K¯æS<6’¡WШ; ´ÿ ¥Êø\Òuî†åÝ–VNœkÒ7oòX¨Á­Ø÷FÎÑä±g÷ÿ M~Çî=p,X´ ÝÌÚÅ‹’ÃjÖ.ØöÏñ qïQ¤ÓZE†° =6·]܈ s¸>v•Ž^Ý\wq9r‰Î\¸¡kURÒ$­*‹Nq?Þª*!sŠÆ:TU_u±T+øX¡ ®¹¡,ÄâÃBTsÜ$Ø›4m椴zÜK]’’›Pƒ @€#â˜`é¹=I‡fiV•Ôî“nRm+µFPOhÍ0B£ €+¬5c v•:P'ÒyÎ ‰V~‚Ó†ÖuókDoh$å\*ö%Ю=£«…aȼ½÷Û.-½VŒŠ¼'lyî±1¬3ó#ÞE¿ÔS¤gV£m›=§\û"—WU¤ÚǼÿ ÂnÁGŒÃ ‚õN D³õNÚíŒÕ;HôyÄÈ©P¹Ä{:?R‘Ô¨âF÷ø£bÅó® JS|‚R÷ivýáâ€Æé¡è³´IئÑT!§˜•ت‚¬â@q€wnïCWÄ@JU€ê¯m6]Ï:£âx'+ÒðXvÓ¦Úm=–´7œ $ì“B£~p%ÕŸUþ« N@¼üï~w˜ñø5®—'Ôe»¤5ã//€ž~‰Tþ›Å7•#¤× Íö pÄ$ùeåì*«ÓŠEØWEÈsßg ¦ûvžSsLpºÊW–âµEWöˬH; ™!CYõZ ÃÄf æ#1W. \uWâ\,\Çf j’<qTbên›Î[vxx£ë 'ö¨1›˜ÀM¼Pÿ H)ƒêêŒA7s,|F“ 꺸k³9Ìö*ç®;Ö!Ö$Eiž•¹ÒÚ†ýóéÝû¾ÕS®ó$’NÝäŸz¤5r¦ãÄÃD÷Üø!°ø‡Ô&@m™Ì^Ãä­d q5Lnÿ N;.6½·N|#ä"1Nƒx“ã<3('&ñßt  ~ªu”1Tb㫨9ê–›–bìd$ߣ=#ÕãÒmU¯eí$EFù5ýYô櫨æì™Ç—±ssM]·á¿0ÕåJRÓªîiƒ+O58ÖñªŠÒx" \µâá¨i’¤i —Ö ” M+M¤ë9‚‰A¦°Qõ¾ßøK~¼Ã‘g…Ö´~÷Ï[3GUœÒ½#…kàÔ®Ò”‰³·dWV‰IP‰Ú8u¹”E ÖqLj¾êÕCBš{A^Âß;–¨`¯¬ìö ˼ ×tìø.tƐm*n¨y4o&Àx¥n¦×î‡aupáÛj8¿m›è¶ã!o½;ß0y^ý×^EÑ¿ÒjzŒ­)vÚÑnÄL …^ªô× ‡—‚3k Îý­hï]içå–îÏ*÷ñþ»Ô CÒjøjÍznˆ´ ¹#b'Fô‹ ‰v¥'’à'T´ƒHýÍ%M‰ ƒ&ÆÇŒï1 ‘ –Þ ‰i¬s žR-Ÿ kЬá¬7:þ 0ŒÅÒÕ/aÙ¬ÃÝ#Úøœ ©aiVc‰. ¹¦ãµ” ›Yg¦›ÆÎýº°f³7ƒhá·¸­}&D9¡ÂsÉÙÞèŠõØàC™¨ñbFC|´Ü(ŸƒÚÒ-%»'a Ì¿)ËÇn¿úÿ ÞŽX…4ÊÅH^ôΑí@ù¹Eh¶“L8Çjù ¼ÎåVªóR©Ï5uà V4lZß®=€xÖŸ–ÑÈ ÷”¨°¾__yM1tÉ?uÆþIkÄgæ@þ[¢†°XÃJ£j·:nkÅ¢u ‘}âGzö­/IµèЬ¼48q¦F°ŽR¼=ûì{´¯RýicS ÕÛ íNtÍÙï£,w4rêì®»~x(©Uñ§#Ñ&œÕ¤>ÎåÍÓ9’Ö{9eV­[Öjâ²ãu]˜å2›qÑšÕJç0€sÄ|Êëè0튔bÁ>“{×_F`Ø©ºê:µä,v¤ðfc1±"«ÔÍän1#=· Âøv~H½ÐßA¾¿Ü€Óš]Õ; I¾÷ç‚Qi†î¹9ywÔKG˜áñ zQY—§ÃÕZ07§X‚ Áh;ÁM)iÌCH-¯T‘ë|A0{Ò½LÚ–TâÖkÜ’dÀ“rmm»”جPF³ÖcbE§T€ÒxKºû’Ó®7±²(\4ŽÃ¸Uu@j™yĵ;³µ!Á¢b.W¤=mõ´êµK k ¸K^ÜÛ#p*Ü14qkZç5ïë †°5Ï%ÍÛ<Õ¤×Ô¥ê†C Õ´¼ú$ƒÖ“”]Ù¬qÞÚ[4©ý!ûÏ—Áb쳐XµA¬â~`›Çr¸8ìùÝ䫦<>ä÷«?xs´ÇÑ /á;¹øüÊÈÙà{"@Žïzâ¬[âß‚ U_<ÇŸ½4èN˜ú61®qŠu ¦þF£»äJ_ˆÙÎ~ ÞAã–݄ϗrŠD;xTž‘ô`É«…suãO`?³à™ô Lý#Íc5öoæØ‚y´´÷«ZR§<&JÇ+éâô´€i!Àˆ0æAoàðLèÖ-2ŸõW.’t^–(KÁmHµV@xÜÇy®Ñø­â^:Ú3w· 7½¹°ñ¸â¹®:',«Mœ—n­Á+Ãbš LÈ‘ÄnRÓÅœ%¦²‰¨ùQ:¤f‚ "PÕtô¸…cæl…&˜Ú˜Ôkv‹ž+vŠ,=¢v­6—Xy*¥t£«<™:“aîϲ=¦6rO]XI¿Œ÷¤zÚ­›¶ 6÷”w\d ü~v®ˆÌk«^m<ÿ ¢‰Õ\)ùºŽ;… lîÙÅEŠ®cѾ@vnMÏ,¼“ñ•ŽBxðÃzãÇç%3ˆ"}Ù•Åî> BÉú;Ò]V+P˜F_´ßé> Øše|ï‡ÄOmFæÇ ãqÞ$/xÐx­z`ï9"œÜij‚!7.\Td…9M‡•iŽ‹¾‘50ÞŽn¥ß4ÉôO ¹*í^QêËÜÇÌ8=ާs‰'ÂëÙ«á%Pú[O †ÅP¯Vsް.‰,kc¶ ¬A9n˜XÎ-ÞšN["¹QÕ‰ƒMýÁߺXJæÍaLj¾×Ãmã¾ãÚ uñÒþåQô¦¥ /ÄUx:‚ÍÜ’ Đ©ØÝ3V¨‰ÕnÐ6ó*óúK­«…c ¯U òhsý­jóÔj#,ímŒRµ«lbïUTŒÑ8†Ä0œÏr`ð¡¬É Ї ë"À² ™ 6¥ f¶ ¢ÚoܱԷ-<Àî)†a¶ž'Ú»¨TXqØæ¶÷YÄHy˜9ÈIW­YÀuMFë ºÏ’AqÌ4·/Ú †ô'i$øä­=Ä Ý|öK×40è|È6p‘0§)o¥ctî§H+CA-“ xØ|ÐXАç l8íºð3Ø:³¤¬KX¯UÿÙ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Memory-to-memory device framework for Video for Linux 2. * * Helper functions for devices that use memory buffers for both source * and destination. * * Copyright (c) 2009 Samsung Electronics Co., Ltd. * Pawel Osciak, * Marek Szyprowski, */ #ifndef _MEDIA_V4L2_MEM2MEM_H #define _MEDIA_V4L2_MEM2MEM_H #include /** * struct v4l2_m2m_ops - mem-to-mem device driver callbacks * @device_run: required. Begin the actual job (transaction) inside this * callback. * The job does NOT have to end before this callback returns * (and it will be the usual case). When the job finishes, * v4l2_m2m_job_finish() has to be called. * @job_ready: optional. Should return 0 if the driver does not have a job * fully prepared to run yet (i.e. it will not be able to finish a * transaction without sleeping). If not provided, it will be * assumed that one source and one destination buffer are all * that is required for the driver to perform one full transaction. * This method may not sleep. * @job_abort: optional. Informs the driver that it has to abort the currently * running transaction as soon as possible (i.e. as soon as it can * stop the device safely; e.g. in the next interrupt handler), * even if the transaction would not have been finished by then. * After the driver performs the necessary steps, it has to call * v4l2_m2m_job_finish() (as if the transaction ended normally). * This function does not have to (and will usually not) wait * until the device enters a state when it can be stopped. */ struct v4l2_m2m_ops { void (*device_run)(void *priv); int (*job_ready)(void *priv); void (*job_abort)(void *priv); }; struct video_device; struct v4l2_m2m_dev; /** * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be * processed * * @q: pointer to struct &vb2_queue * @rdy_queue: List of V4L2 mem-to-mem queues * @rdy_spinlock: spin lock to protect the struct usage * @num_rdy: number of buffers ready to be processed * @buffered: is the queue buffered? * * Queue for buffers ready to be processed as soon as this * instance receives access to the device. */ struct v4l2_m2m_queue_ctx { struct vb2_queue q; struct list_head rdy_queue; spinlock_t rdy_spinlock; u8 num_rdy; bool buffered; }; /** * struct v4l2_m2m_ctx - Memory to memory context structure * * @q_lock: struct &mutex lock * @m2m_dev: opaque pointer to the internal data to handle M2M context * @cap_q_ctx: Capture (output to memory) queue context * @out_q_ctx: Output (input from memory) queue context * @queue: List of memory to memory contexts * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c: * %TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT. * @finished: Wait queue used to signalize when a job queue finished. * @priv: Instance private data * * The memory to memory context is specific to a file handle, NOT to e.g. * a device. */ struct v4l2_m2m_ctx { /* optional cap/out vb2 queues lock */ struct mutex *q_lock; /* internal use only */ struct v4l2_m2m_dev *m2m_dev; struct v4l2_m2m_queue_ctx cap_q_ctx; struct v4l2_m2m_queue_ctx out_q_ctx; /* For device job queue */ struct list_head queue; unsigned long job_flags; wait_queue_head_t finished; void *priv; }; /** * struct v4l2_m2m_buffer - Memory to memory buffer * * @vb: pointer to struct &vb2_v4l2_buffer * @list: list of m2m buffers */ struct v4l2_m2m_buffer { struct vb2_v4l2_buffer vb; struct list_head list; }; /** * v4l2_m2m_get_curr_priv() - return driver private data for the currently * running instance or NULL if no instance is running * * @m2m_dev: opaque pointer to the internal data to handle M2M context */ void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev); /** * v4l2_m2m_get_vq() - return vb2_queue for the given type * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type */ struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type); /** * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to * the pending job queue and add it if so. * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * * There are three basic requirements an instance has to meet to be able to run: * 1) at least one source buffer has to be queued, * 2) at least one destination buffer has to be queued, * 3) streaming has to be on. * * If a queue is buffered (for example a decoder hardware ringbuffer that has * to be drained before doing streamoff), allow scheduling without v4l2 buffers * on that queue. * * There may also be additional, custom requirements. In such case the driver * should supply a custom callback (job_ready in v4l2_m2m_ops) that should * return 1 if the instance is ready. * An example of the above could be an instance that requires more than one * src/dst buffer per transaction. */ void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx); /** * v4l2_m2m_job_finish() - inform the framework that a job has been finished * and have it clean up * * @m2m_dev: opaque pointer to the internal data to handle M2M context * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * * Called by a driver to yield back the device after it has finished with it. * Should be called as soon as possible after reaching a state which allows * other instances to take control of the device. * * This function has to be called only after &v4l2_m2m_ops->device_run * callback has been called on the driver. To prevent recursion, it should * not be called directly from the &v4l2_m2m_ops->device_run callback though. */ void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, struct v4l2_m2m_ctx *m2m_ctx); static inline void v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state) { vb2_buffer_done(&buf->vb2_buf, state); } /** * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer * * @file: pointer to struct &file * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @reqbufs: pointer to struct &v4l2_requestbuffers */ int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_requestbuffers *reqbufs); /** * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer * * @file: pointer to struct &file * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @buf: pointer to struct &v4l2_buffer * * See v4l2_m2m_mmap() documentation for details. */ int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_buffer *buf); /** * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on * the type * * @file: pointer to struct &file * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @buf: pointer to struct &v4l2_buffer */ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_buffer *buf); /** * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on * the type * * @file: pointer to struct &file * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @buf: pointer to struct &v4l2_buffer */ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_buffer *buf); /** * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on * the type * * @file: pointer to struct &file * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @buf: pointer to struct &v4l2_buffer */ int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_buffer *buf); /** * v4l2_m2m_create_bufs() - create a source or destination buffer, depending * on the type * * @file: pointer to struct &file * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @create: pointer to struct &v4l2_create_buffers */ int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_create_buffers *create); /** * v4l2_m2m_expbuf() - export a source or destination buffer, depending on * the type * * @file: pointer to struct &file * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @eb: pointer to struct &v4l2_exportbuffer */ int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_exportbuffer *eb); /** * v4l2_m2m_streamon() - turn on streaming for a video queue * * @file: pointer to struct &file * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type */ int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type); /** * v4l2_m2m_streamoff() - turn off streaming for a video queue * * @file: pointer to struct &file * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type */ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type); /** * v4l2_m2m_poll() - poll replacement, for destination buffers only * * @file: pointer to struct &file * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @wait: pointer to struct &poll_table_struct * * Call from the driver's poll() function. Will poll both queues. If a buffer * is available to dequeue (with dqbuf) from the source queue, this will * indicate that a non-blocking write can be performed, while read will be * returned in case of the destination queue. */ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct poll_table_struct *wait); /** * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer * * @file: pointer to struct &file * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @vma: pointer to struct &vm_area_struct * * Call from driver's mmap() function. Will handle mmap() for both queues * seamlessly for videobuffer, which will receive normal per-queue offsets and * proper videobuf queue pointers. The differentiation is made outside videobuf * by adding a predefined offset to buffers from one of the queues and * subtracting it before passing it back to videobuf. Only drivers (and * thus applications) receive modified offsets. */ int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct vm_area_struct *vma); /** * v4l2_m2m_init() - initialize per-driver m2m data * * @m2m_ops: pointer to struct v4l2_m2m_ops * * Usually called from driver's ``probe()`` function. * * Return: returns an opaque pointer to the internal data to handle M2M context */ struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops); #if defined(CONFIG_MEDIA_CONTROLLER) void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev); int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, struct video_device *vdev, int function); #else static inline void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) { } static inline int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, struct video_device *vdev, int function) { return 0; } #endif /** * v4l2_m2m_release() - cleans up and frees a m2m_dev structure * * @m2m_dev: opaque pointer to the internal data to handle M2M context * * Usually called from driver's ``remove()`` function. */ void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev); /** * v4l2_m2m_ctx_init() - allocate and initialize a m2m context * * @m2m_dev: opaque pointer to the internal data to handle M2M context * @drv_priv: driver's instance private data * @queue_init: a callback for queue type-specific initialization function * to be used for initializing videobuf_queues * * Usually called from driver's ``open()`` function. */ struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, void *drv_priv, int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)); static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx, bool buffered) { m2m_ctx->out_q_ctx.buffered = buffered; } static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx, bool buffered) { m2m_ctx->cap_q_ctx.buffered = buffered; } /** * v4l2_m2m_ctx_release() - release m2m context * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * * Usually called from driver's release() function. */ void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx); /** * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list. * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @vbuf: pointer to struct &vb2_v4l2_buffer * * Call from videobuf_queue_ops->ops->buf_queue, videobuf_queue_ops callback. */ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_v4l2_buffer *vbuf); /** * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for * use * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx */ static inline unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) { unsigned int num_buf_rdy; unsigned long flags; spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy; spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); return num_buf_rdy; } /** * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers * ready for use * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx */ static inline unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) { unsigned int num_buf_rdy; unsigned long flags; spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy; spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); return num_buf_rdy; } /** * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers * * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx */ struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx); /** * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready * buffers * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx */ static inline struct vb2_v4l2_buffer * v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx) { return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx); } /** * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of * ready buffers * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx */ static inline struct vb2_v4l2_buffer * v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) { return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx); } /** * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers * * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx */ struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx); /** * v4l2_m2m_last_src_buf() - return last destination buffer from the list of * ready buffers * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx */ static inline struct vb2_v4l2_buffer * v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx) { return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx); } /** * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of * ready buffers * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx */ static inline struct vb2_v4l2_buffer * v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) { return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx); } /** * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready * buffers * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @b: current buffer of type struct v4l2_m2m_buffer */ #define v4l2_m2m_for_each_dst_buf(m2m_ctx, b) \ list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list) /** * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @b: current buffer of type struct v4l2_m2m_buffer */ #define v4l2_m2m_for_each_src_buf(m2m_ctx, b) \ list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list) /** * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready * buffers safely * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @b: current buffer of type struct v4l2_m2m_buffer * @n: used as temporary storage */ #define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n) \ list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list) /** * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready * buffers safely * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @b: current buffer of type struct v4l2_m2m_buffer * @n: used as temporary storage */ #define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n) \ list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list) /** * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx */ static inline struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx) { return &m2m_ctx->out_q_ctx.q; } /** * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx */ static inline struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx) { return &m2m_ctx->cap_q_ctx.q; } /** * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and * return it * * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx */ struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx); /** * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready * buffers and return it * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx */ static inline struct vb2_v4l2_buffer * v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) { return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx); } /** * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of * ready buffers and return it * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx */ static inline struct vb2_v4l2_buffer * v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) { return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx); } /** * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready * buffers * * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx * @vbuf: the buffer to be removed */ void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, struct vb2_v4l2_buffer *vbuf); /** * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list * of ready buffers * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @vbuf: the buffer to be removed */ static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_v4l2_buffer *vbuf) { v4l2_m2m_buf_remove_by_buf(&m2m_ctx->out_q_ctx, vbuf); } /** * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the * list of ready buffers * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx * @vbuf: the buffer to be removed */ static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_v4l2_buffer *vbuf) { v4l2_m2m_buf_remove_by_buf(&m2m_ctx->cap_q_ctx, vbuf); } struct vb2_v4l2_buffer * v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx); static inline struct vb2_v4l2_buffer * v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) { return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->out_q_ctx, idx); } static inline struct vb2_v4l2_buffer * v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) { return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx); } /** * v4l2_m2m_buf_copy_metadata() - copy buffer metadata from * the output buffer to the capture buffer * * @out_vb: the output buffer that is the source of the metadata. * @cap_vb: the capture buffer that will receive the metadata. * @copy_frame_flags: copy the KEY/B/PFRAME flags as well. * * This helper function copies the timestamp, timecode (if the TIMECODE * buffer flag was set), field and the TIMECODE, KEYFRAME, BFRAME, PFRAME * and TSTAMP_SRC_MASK flags from @out_vb to @cap_vb. * * If @copy_frame_flags is false, then the KEYFRAME, BFRAME and PFRAME * flags are not copied. This is typically needed for encoders that * set this bits explicitly. */ void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, struct vb2_v4l2_buffer *cap_vb, bool copy_frame_flags); /* v4l2 request helper */ void v4l2_m2m_request_queue(struct media_request *req); /* v4l2 ioctl helpers */ int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *rb); int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh, struct v4l2_create_buffers *create); int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf); int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh, struct v4l2_exportbuffer *eb); int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf); int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf); int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh, struct v4l2_buffer *buf); int v4l2_m2m_ioctl_streamon(struct file *file, void *fh, enum v4l2_buf_type type); int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh, enum v4l2_buf_type type); int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *ec); int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dc); int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma); __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait); #endif /* _MEDIA_V4L2_MEM2MEM_H */